]>
Commit | Line | Data |
---|---|---|
79bf2bb3 TG |
1 | /* |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
b10db7f0 | 12 | * Distribute under GPLv2. |
79bf2bb3 TG |
13 | */ |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/profile.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/tick.h> | |
8083e4ad | 23 | #include <linux/module.h> |
79bf2bb3 | 24 | |
9e203bcc DM |
25 | #include <asm/irq_regs.h> |
26 | ||
79bf2bb3 TG |
27 | #include "tick-internal.h" |
28 | ||
29 | /* | |
30 | * Per cpu nohz control structure | |
31 | */ | |
32 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); | |
33 | ||
34 | /* | |
35 | * The time, when the last jiffy update happened. Protected by xtime_lock. | |
36 | */ | |
37 | static ktime_t last_jiffies_update; | |
38 | ||
289f480a IM |
39 | struct tick_sched *tick_get_tick_sched(int cpu) |
40 | { | |
41 | return &per_cpu(tick_cpu_sched, cpu); | |
42 | } | |
43 | ||
79bf2bb3 TG |
44 | /* |
45 | * Must be called with interrupts disabled ! | |
46 | */ | |
47 | static void tick_do_update_jiffies64(ktime_t now) | |
48 | { | |
49 | unsigned long ticks = 0; | |
50 | ktime_t delta; | |
51 | ||
7a14ce1d IM |
52 | /* |
53 | * Do a quick check without holding xtime_lock: | |
54 | */ | |
55 | delta = ktime_sub(now, last_jiffies_update); | |
56 | if (delta.tv64 < tick_period.tv64) | |
57 | return; | |
58 | ||
79bf2bb3 TG |
59 | /* Reevalute with xtime_lock held */ |
60 | write_seqlock(&xtime_lock); | |
61 | ||
62 | delta = ktime_sub(now, last_jiffies_update); | |
63 | if (delta.tv64 >= tick_period.tv64) { | |
64 | ||
65 | delta = ktime_sub(delta, tick_period); | |
66 | last_jiffies_update = ktime_add(last_jiffies_update, | |
67 | tick_period); | |
68 | ||
69 | /* Slow path for long timeouts */ | |
70 | if (unlikely(delta.tv64 >= tick_period.tv64)) { | |
71 | s64 incr = ktime_to_ns(tick_period); | |
72 | ||
73 | ticks = ktime_divns(delta, incr); | |
74 | ||
75 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
76 | incr * ticks); | |
77 | } | |
78 | do_timer(++ticks); | |
49d670fb TG |
79 | |
80 | /* Keep the tick_next_period variable up to date */ | |
81 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | |
79bf2bb3 TG |
82 | } |
83 | write_sequnlock(&xtime_lock); | |
84 | } | |
85 | ||
86 | /* | |
87 | * Initialize and return retrieve the jiffies update. | |
88 | */ | |
89 | static ktime_t tick_init_jiffy_update(void) | |
90 | { | |
91 | ktime_t period; | |
92 | ||
93 | write_seqlock(&xtime_lock); | |
94 | /* Did we start the jiffies update yet ? */ | |
95 | if (last_jiffies_update.tv64 == 0) | |
96 | last_jiffies_update = tick_next_period; | |
97 | period = last_jiffies_update; | |
98 | write_sequnlock(&xtime_lock); | |
99 | return period; | |
100 | } | |
101 | ||
102 | /* | |
103 | * NOHZ - aka dynamic tick functionality | |
104 | */ | |
105 | #ifdef CONFIG_NO_HZ | |
106 | /* | |
107 | * NO HZ enabled ? | |
108 | */ | |
109 | static int tick_nohz_enabled __read_mostly = 1; | |
110 | ||
111 | /* | |
112 | * Enable / Disable tickless mode | |
113 | */ | |
114 | static int __init setup_tick_nohz(char *str) | |
115 | { | |
116 | if (!strcmp(str, "off")) | |
117 | tick_nohz_enabled = 0; | |
118 | else if (!strcmp(str, "on")) | |
119 | tick_nohz_enabled = 1; | |
120 | else | |
121 | return 0; | |
122 | return 1; | |
123 | } | |
124 | ||
125 | __setup("nohz=", setup_tick_nohz); | |
126 | ||
127 | /** | |
128 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
129 | * | |
130 | * Called from interrupt entry when the CPU was idle | |
131 | * | |
132 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
133 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
134 | * value. We do this unconditionally on any cpu, as we don't know whether the | |
135 | * cpu, which has the update task assigned is in a long sleep. | |
136 | */ | |
934d96ea | 137 | static void tick_nohz_update_jiffies(void) |
79bf2bb3 TG |
138 | { |
139 | int cpu = smp_processor_id(); | |
140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
141 | unsigned long flags; | |
142 | ktime_t now; | |
143 | ||
144 | if (!ts->tick_stopped) | |
145 | return; | |
146 | ||
6a7b3dc3 | 147 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
79bf2bb3 | 148 | now = ktime_get(); |
5df7fa1c | 149 | ts->idle_waketime = now; |
79bf2bb3 TG |
150 | |
151 | local_irq_save(flags); | |
152 | tick_do_update_jiffies64(now); | |
153 | local_irq_restore(flags); | |
02ff3755 IM |
154 | |
155 | touch_softlockup_watchdog(); | |
79bf2bb3 TG |
156 | } |
157 | ||
719254fa | 158 | static void tick_nohz_stop_idle(int cpu) |
6378ddb5 VP |
159 | { |
160 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
161 | ||
162 | if (ts->idle_active) { | |
163 | ktime_t now, delta; | |
164 | now = ktime_get(); | |
165 | delta = ktime_sub(now, ts->idle_entrytime); | |
166 | ts->idle_lastupdate = now; | |
167 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
168 | ts->idle_active = 0; | |
56c7426b PZ |
169 | |
170 | sched_clock_idle_wakeup_event(0); | |
6378ddb5 VP |
171 | } |
172 | } | |
173 | ||
903b8a8d | 174 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
6378ddb5 | 175 | { |
6378ddb5 VP |
176 | ktime_t now, delta; |
177 | ||
178 | now = ktime_get(); | |
179 | if (ts->idle_active) { | |
180 | delta = ktime_sub(now, ts->idle_entrytime); | |
181 | ts->idle_lastupdate = now; | |
182 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
183 | } | |
184 | ts->idle_entrytime = now; | |
185 | ts->idle_active = 1; | |
56c7426b | 186 | sched_clock_idle_sleep_event(); |
6378ddb5 VP |
187 | return now; |
188 | } | |
189 | ||
190 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |
191 | { | |
192 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
193 | ||
8083e4ad | 194 | if (!tick_nohz_enabled) |
195 | return -1; | |
196 | ||
197 | if (ts->idle_active) | |
198 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | |
199 | else | |
200 | *last_update_time = ktime_to_us(ktime_get()); | |
201 | ||
6378ddb5 VP |
202 | return ktime_to_us(ts->idle_sleeptime); |
203 | } | |
8083e4ad | 204 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
6378ddb5 | 205 | |
79bf2bb3 TG |
206 | /** |
207 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | |
208 | * | |
209 | * When the next event is more than a tick into the future, stop the idle tick | |
210 | * Called either from the idle loop or from irq_exit() when an idle period was | |
211 | * just interrupted by an interrupt which did not cause a reschedule. | |
212 | */ | |
b8f8c3cf | 213 | void tick_nohz_stop_sched_tick(int inidle) |
79bf2bb3 TG |
214 | { |
215 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; | |
216 | struct tick_sched *ts; | |
6378ddb5 | 217 | ktime_t last_update, expires, now; |
4f86d3a8 | 218 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
79bf2bb3 TG |
219 | int cpu; |
220 | ||
221 | local_irq_save(flags); | |
222 | ||
223 | cpu = smp_processor_id(); | |
224 | ts = &per_cpu(tick_cpu_sched, cpu); | |
903b8a8d | 225 | now = tick_nohz_start_idle(ts); |
79bf2bb3 | 226 | |
5e41d0d6 TG |
227 | /* |
228 | * If this cpu is offline and it is the one which updates | |
229 | * jiffies, then give up the assignment and let it be taken by | |
230 | * the cpu which runs the tick timer next. If we don't drop | |
231 | * this here the jiffies might be stale and do_timer() never | |
232 | * invoked. | |
233 | */ | |
234 | if (unlikely(!cpu_online(cpu))) { | |
235 | if (cpu == tick_do_timer_cpu) | |
6441402b | 236 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
5e41d0d6 TG |
237 | } |
238 | ||
79bf2bb3 TG |
239 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
240 | goto end; | |
241 | ||
b8f8c3cf TG |
242 | if (!inidle && !ts->inidle) |
243 | goto end; | |
244 | ||
245 | ts->inidle = 1; | |
246 | ||
79bf2bb3 TG |
247 | if (need_resched()) |
248 | goto end; | |
249 | ||
fa116ea3 | 250 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
35282316 TG |
251 | static int ratelimit; |
252 | ||
253 | if (ratelimit < 10) { | |
254 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", | |
255 | local_softirq_pending()); | |
256 | ratelimit++; | |
257 | } | |
857f3fd7 | 258 | goto end; |
35282316 | 259 | } |
79bf2bb3 | 260 | |
79bf2bb3 | 261 | ts->idle_calls++; |
79bf2bb3 TG |
262 | /* Read jiffies and the time when jiffies were updated last */ |
263 | do { | |
264 | seq = read_seqbegin(&xtime_lock); | |
265 | last_update = last_jiffies_update; | |
266 | last_jiffies = jiffies; | |
267 | } while (read_seqretry(&xtime_lock, seq)); | |
268 | ||
269 | /* Get the next timer wheel timer */ | |
270 | next_jiffies = get_next_timer_interrupt(last_jiffies); | |
271 | delta_jiffies = next_jiffies - last_jiffies; | |
272 | ||
b845b517 | 273 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu)) |
6ba9b346 | 274 | delta_jiffies = 1; |
79bf2bb3 TG |
275 | /* |
276 | * Do not stop the tick, if we are only one off | |
277 | * or if the cpu is required for rcu | |
278 | */ | |
6ba9b346 | 279 | if (!ts->tick_stopped && delta_jiffies == 1) |
79bf2bb3 TG |
280 | goto out; |
281 | ||
282 | /* Schedule the tick, if we are at least one jiffie off */ | |
283 | if ((long)delta_jiffies >= 1) { | |
284 | ||
00147449 WR |
285 | /* |
286 | * calculate the expiry time for the next timer wheel | |
287 | * timer | |
288 | */ | |
289 | expires = ktime_add_ns(last_update, tick_period.tv64 * | |
290 | delta_jiffies); | |
291 | ||
292 | /* | |
293 | * If this cpu is the one which updates jiffies, then | |
294 | * give up the assignment and let it be taken by the | |
295 | * cpu which runs the tick timer next, which might be | |
296 | * this cpu as well. If we don't drop this here the | |
297 | * jiffies might be stale and do_timer() never | |
298 | * invoked. | |
299 | */ | |
300 | if (cpu == tick_do_timer_cpu) | |
301 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
302 | ||
6ba9b346 | 303 | if (delta_jiffies > 1) |
6a7b3dc3 | 304 | cpumask_set_cpu(cpu, nohz_cpu_mask); |
00147449 WR |
305 | |
306 | /* Skip reprogram of event if its not changed */ | |
307 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) | |
308 | goto out; | |
309 | ||
79bf2bb3 TG |
310 | /* |
311 | * nohz_stop_sched_tick can be called several times before | |
312 | * the nohz_restart_sched_tick is called. This happens when | |
313 | * interrupts arrive which do not cause a reschedule. In the | |
314 | * first call we save the current tick time, so we can restart | |
315 | * the scheduler tick in nohz_restart_sched_tick. | |
316 | */ | |
317 | if (!ts->tick_stopped) { | |
46cb4b7c SS |
318 | if (select_nohz_load_balancer(1)) { |
319 | /* | |
320 | * sched tick not stopped! | |
321 | */ | |
6a7b3dc3 | 322 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
46cb4b7c SS |
323 | goto out; |
324 | } | |
325 | ||
cc584b21 | 326 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
79bf2bb3 TG |
327 | ts->tick_stopped = 1; |
328 | ts->idle_jiffies = last_jiffies; | |
2232c2d8 | 329 | rcu_enter_nohz(); |
79bf2bb3 | 330 | } |
d3ed7824 | 331 | |
eaad084b TG |
332 | ts->idle_sleeps++; |
333 | ||
334 | /* | |
335 | * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that | |
336 | * there is no timer pending or at least extremly far | |
337 | * into the future (12 days for HZ=1000). In this case | |
338 | * we simply stop the tick timer: | |
339 | */ | |
340 | if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) { | |
341 | ts->idle_expires.tv64 = KTIME_MAX; | |
342 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) | |
343 | hrtimer_cancel(&ts->sched_timer); | |
344 | goto out; | |
345 | } | |
346 | ||
00147449 | 347 | /* Mark expiries */ |
79bf2bb3 | 348 | ts->idle_expires = expires; |
79bf2bb3 TG |
349 | |
350 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
351 | hrtimer_start(&ts->sched_timer, expires, | |
352 | HRTIMER_MODE_ABS); | |
353 | /* Check, if the timer was already in the past */ | |
354 | if (hrtimer_active(&ts->sched_timer)) | |
355 | goto out; | |
4c9dc641 | 356 | } else if (!tick_program_event(expires, 0)) |
79bf2bb3 TG |
357 | goto out; |
358 | /* | |
359 | * We are past the event already. So we crossed a | |
360 | * jiffie boundary. Update jiffies and raise the | |
361 | * softirq. | |
362 | */ | |
363 | tick_do_update_jiffies64(ktime_get()); | |
6a7b3dc3 | 364 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
79bf2bb3 TG |
365 | } |
366 | raise_softirq_irqoff(TIMER_SOFTIRQ); | |
367 | out: | |
368 | ts->next_jiffies = next_jiffies; | |
369 | ts->last_jiffies = last_jiffies; | |
4f86d3a8 | 370 | ts->sleep_length = ktime_sub(dev->next_event, now); |
79bf2bb3 TG |
371 | end: |
372 | local_irq_restore(flags); | |
373 | } | |
374 | ||
4f86d3a8 LB |
375 | /** |
376 | * tick_nohz_get_sleep_length - return the length of the current sleep | |
377 | * | |
378 | * Called from power state control code with interrupts disabled | |
379 | */ | |
380 | ktime_t tick_nohz_get_sleep_length(void) | |
381 | { | |
382 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
383 | ||
384 | return ts->sleep_length; | |
385 | } | |
386 | ||
c34bec5a TG |
387 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
388 | { | |
389 | hrtimer_cancel(&ts->sched_timer); | |
268a3dcf | 390 | hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); |
c34bec5a TG |
391 | |
392 | while (1) { | |
393 | /* Forward the time to expire in the future */ | |
394 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
395 | ||
396 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
268a3dcf | 397 | hrtimer_start_expires(&ts->sched_timer, |
c34bec5a TG |
398 | HRTIMER_MODE_ABS); |
399 | /* Check, if the timer was already in the past */ | |
400 | if (hrtimer_active(&ts->sched_timer)) | |
401 | break; | |
402 | } else { | |
268a3dcf TG |
403 | if (!tick_program_event( |
404 | hrtimer_get_expires(&ts->sched_timer), 0)) | |
c34bec5a TG |
405 | break; |
406 | } | |
407 | /* Update jiffies and reread time */ | |
408 | tick_do_update_jiffies64(now); | |
409 | now = ktime_get(); | |
410 | } | |
411 | } | |
412 | ||
79bf2bb3 | 413 | /** |
8dce39c2 | 414 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task |
79bf2bb3 TG |
415 | * |
416 | * Restart the idle tick when the CPU is woken up from idle | |
417 | */ | |
418 | void tick_nohz_restart_sched_tick(void) | |
419 | { | |
420 | int cpu = smp_processor_id(); | |
421 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
79741dd3 | 422 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
79bf2bb3 | 423 | unsigned long ticks; |
79741dd3 | 424 | #endif |
6378ddb5 | 425 | ktime_t now; |
79bf2bb3 | 426 | |
6378ddb5 VP |
427 | local_irq_disable(); |
428 | tick_nohz_stop_idle(cpu); | |
429 | ||
b8f8c3cf TG |
430 | if (!ts->inidle || !ts->tick_stopped) { |
431 | ts->inidle = 0; | |
6378ddb5 | 432 | local_irq_enable(); |
79bf2bb3 | 433 | return; |
6378ddb5 | 434 | } |
79bf2bb3 | 435 | |
b8f8c3cf TG |
436 | ts->inidle = 0; |
437 | ||
2232c2d8 SR |
438 | rcu_exit_nohz(); |
439 | ||
79bf2bb3 | 440 | /* Update jiffies first */ |
46cb4b7c | 441 | select_nohz_load_balancer(0); |
6378ddb5 | 442 | now = ktime_get(); |
79bf2bb3 | 443 | tick_do_update_jiffies64(now); |
6a7b3dc3 | 444 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
79bf2bb3 | 445 | |
79741dd3 | 446 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
79bf2bb3 TG |
447 | /* |
448 | * We stopped the tick in idle. Update process times would miss the | |
449 | * time we slept as update_process_times does only a 1 tick | |
450 | * accounting. Enforce that this is accounted to idle ! | |
451 | */ | |
452 | ticks = jiffies - ts->idle_jiffies; | |
453 | /* | |
454 | * We might be one off. Do not randomly account a huge number of ticks! | |
455 | */ | |
79741dd3 MS |
456 | if (ticks && ticks < LONG_MAX) |
457 | account_idle_ticks(ticks); | |
458 | #endif | |
79bf2bb3 | 459 | |
126e01bf | 460 | touch_softlockup_watchdog(); |
79bf2bb3 TG |
461 | /* |
462 | * Cancel the scheduled timer and restore the tick | |
463 | */ | |
464 | ts->tick_stopped = 0; | |
5df7fa1c | 465 | ts->idle_exittime = now; |
79bf2bb3 | 466 | |
c34bec5a | 467 | tick_nohz_restart(ts, now); |
79bf2bb3 | 468 | |
79bf2bb3 TG |
469 | local_irq_enable(); |
470 | } | |
471 | ||
472 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | |
473 | { | |
474 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
cc584b21 | 475 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
79bf2bb3 TG |
476 | } |
477 | ||
478 | /* | |
479 | * The nohz low res interrupt handler | |
480 | */ | |
481 | static void tick_nohz_handler(struct clock_event_device *dev) | |
482 | { | |
483 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
484 | struct pt_regs *regs = get_irq_regs(); | |
d3ed7824 | 485 | int cpu = smp_processor_id(); |
79bf2bb3 TG |
486 | ktime_t now = ktime_get(); |
487 | ||
488 | dev->next_event.tv64 = KTIME_MAX; | |
489 | ||
d3ed7824 TG |
490 | /* |
491 | * Check if the do_timer duty was dropped. We don't care about | |
492 | * concurrency: This happens only when the cpu in charge went | |
493 | * into a long sleep. If two cpus happen to assign themself to | |
494 | * this duty, then the jiffies update is still serialized by | |
495 | * xtime_lock. | |
496 | */ | |
6441402b | 497 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
d3ed7824 TG |
498 | tick_do_timer_cpu = cpu; |
499 | ||
79bf2bb3 | 500 | /* Check, if the jiffies need an update */ |
d3ed7824 TG |
501 | if (tick_do_timer_cpu == cpu) |
502 | tick_do_update_jiffies64(now); | |
79bf2bb3 TG |
503 | |
504 | /* | |
505 | * When we are idle and the tick is stopped, we have to touch | |
506 | * the watchdog as we might not schedule for a really long | |
507 | * time. This happens on complete idle SMP systems while | |
508 | * waiting on the login prompt. We also increment the "start | |
509 | * of idle" jiffy stamp so the idle accounting adjustment we | |
510 | * do when we go busy again does not account too much ticks. | |
511 | */ | |
512 | if (ts->tick_stopped) { | |
513 | touch_softlockup_watchdog(); | |
514 | ts->idle_jiffies++; | |
515 | } | |
516 | ||
517 | update_process_times(user_mode(regs)); | |
518 | profile_tick(CPU_PROFILING); | |
519 | ||
79bf2bb3 TG |
520 | while (tick_nohz_reprogram(ts, now)) { |
521 | now = ktime_get(); | |
522 | tick_do_update_jiffies64(now); | |
523 | } | |
524 | } | |
525 | ||
526 | /** | |
527 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
528 | */ | |
529 | static void tick_nohz_switch_to_nohz(void) | |
530 | { | |
531 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
532 | ktime_t next; | |
533 | ||
534 | if (!tick_nohz_enabled) | |
535 | return; | |
536 | ||
537 | local_irq_disable(); | |
538 | if (tick_switch_to_oneshot(tick_nohz_handler)) { | |
539 | local_irq_enable(); | |
540 | return; | |
541 | } | |
542 | ||
543 | ts->nohz_mode = NOHZ_MODE_LOWRES; | |
544 | ||
545 | /* | |
546 | * Recycle the hrtimer in ts, so we can share the | |
547 | * hrtimer_forward with the highres code. | |
548 | */ | |
549 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
550 | /* Get the next period */ | |
551 | next = tick_init_jiffy_update(); | |
552 | ||
553 | for (;;) { | |
cc584b21 | 554 | hrtimer_set_expires(&ts->sched_timer, next); |
79bf2bb3 TG |
555 | if (!tick_program_event(next, 0)) |
556 | break; | |
557 | next = ktime_add(next, tick_period); | |
558 | } | |
559 | local_irq_enable(); | |
560 | ||
561 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", | |
562 | smp_processor_id()); | |
563 | } | |
564 | ||
fb02fbc1 TG |
565 | /* |
566 | * When NOHZ is enabled and the tick is stopped, we need to kick the | |
567 | * tick timer from irq_enter() so that the jiffies update is kept | |
568 | * alive during long running softirqs. That's ugly as hell, but | |
569 | * correctness is key even if we need to fix the offending softirq in | |
570 | * the first place. | |
571 | * | |
572 | * Note, this is different to tick_nohz_restart. We just kick the | |
573 | * timer and do not touch the other magic bits which need to be done | |
574 | * when idle is left. | |
575 | */ | |
576 | static void tick_nohz_kick_tick(int cpu) | |
577 | { | |
ae99286b TG |
578 | #if 0 |
579 | /* Switch back to 2.6.27 behaviour */ | |
580 | ||
fb02fbc1 | 581 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
c4bd822e | 582 | ktime_t delta, now; |
fb02fbc1 TG |
583 | |
584 | if (!ts->tick_stopped) | |
585 | return; | |
586 | ||
c4bd822e TG |
587 | /* |
588 | * Do not touch the tick device, when the next expiry is either | |
589 | * already reached or less/equal than the tick period. | |
590 | */ | |
591 | now = ktime_get(); | |
268a3dcf | 592 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
c4bd822e TG |
593 | if (delta.tv64 <= tick_period.tv64) |
594 | return; | |
595 | ||
596 | tick_nohz_restart(ts, now); | |
ae99286b | 597 | #endif |
fb02fbc1 TG |
598 | } |
599 | ||
79bf2bb3 TG |
600 | #else |
601 | ||
602 | static inline void tick_nohz_switch_to_nohz(void) { } | |
603 | ||
604 | #endif /* NO_HZ */ | |
605 | ||
719254fa TG |
606 | /* |
607 | * Called from irq_enter to notify about the possible interruption of idle() | |
608 | */ | |
609 | void tick_check_idle(int cpu) | |
610 | { | |
fb02fbc1 | 611 | tick_check_oneshot_broadcast(cpu); |
719254fa TG |
612 | #ifdef CONFIG_NO_HZ |
613 | tick_nohz_stop_idle(cpu); | |
614 | tick_nohz_update_jiffies(); | |
fb02fbc1 | 615 | tick_nohz_kick_tick(cpu); |
719254fa TG |
616 | #endif |
617 | } | |
618 | ||
79bf2bb3 TG |
619 | /* |
620 | * High resolution timer specific code | |
621 | */ | |
622 | #ifdef CONFIG_HIGH_RES_TIMERS | |
623 | /* | |
4c9dc641 | 624 | * We rearm the timer until we get disabled by the idle code. |
79bf2bb3 TG |
625 | * Called with interrupts disabled and timer->base->cpu_base->lock held. |
626 | */ | |
627 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
628 | { | |
629 | struct tick_sched *ts = | |
630 | container_of(timer, struct tick_sched, sched_timer); | |
79bf2bb3 TG |
631 | struct pt_regs *regs = get_irq_regs(); |
632 | ktime_t now = ktime_get(); | |
d3ed7824 TG |
633 | int cpu = smp_processor_id(); |
634 | ||
635 | #ifdef CONFIG_NO_HZ | |
636 | /* | |
637 | * Check if the do_timer duty was dropped. We don't care about | |
638 | * concurrency: This happens only when the cpu in charge went | |
639 | * into a long sleep. If two cpus happen to assign themself to | |
640 | * this duty, then the jiffies update is still serialized by | |
641 | * xtime_lock. | |
642 | */ | |
6441402b | 643 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
d3ed7824 TG |
644 | tick_do_timer_cpu = cpu; |
645 | #endif | |
79bf2bb3 TG |
646 | |
647 | /* Check, if the jiffies need an update */ | |
d3ed7824 TG |
648 | if (tick_do_timer_cpu == cpu) |
649 | tick_do_update_jiffies64(now); | |
79bf2bb3 TG |
650 | |
651 | /* | |
652 | * Do not call, when we are not in irq context and have | |
653 | * no valid regs pointer | |
654 | */ | |
655 | if (regs) { | |
656 | /* | |
657 | * When we are idle and the tick is stopped, we have to touch | |
658 | * the watchdog as we might not schedule for a really long | |
659 | * time. This happens on complete idle SMP systems while | |
660 | * waiting on the login prompt. We also increment the "start of | |
661 | * idle" jiffy stamp so the idle accounting adjustment we do | |
662 | * when we go busy again does not account too much ticks. | |
663 | */ | |
664 | if (ts->tick_stopped) { | |
665 | touch_softlockup_watchdog(); | |
666 | ts->idle_jiffies++; | |
667 | } | |
79bf2bb3 TG |
668 | update_process_times(user_mode(regs)); |
669 | profile_tick(CPU_PROFILING); | |
79bf2bb3 TG |
670 | } |
671 | ||
79bf2bb3 TG |
672 | hrtimer_forward(timer, now, tick_period); |
673 | ||
674 | return HRTIMER_RESTART; | |
675 | } | |
676 | ||
677 | /** | |
678 | * tick_setup_sched_timer - setup the tick emulation timer | |
679 | */ | |
680 | void tick_setup_sched_timer(void) | |
681 | { | |
682 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
683 | ktime_t now = ktime_get(); | |
3704540b | 684 | u64 offset; |
79bf2bb3 TG |
685 | |
686 | /* | |
687 | * Emulate tick processing via per-CPU hrtimers: | |
688 | */ | |
689 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
690 | ts->sched_timer.function = tick_sched_timer; | |
79bf2bb3 | 691 | |
3704540b | 692 | /* Get the next period (per cpu) */ |
cc584b21 | 693 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
3704540b | 694 | offset = ktime_to_ns(tick_period) >> 1; |
b2d9323d | 695 | do_div(offset, num_possible_cpus()); |
3704540b | 696 | offset *= smp_processor_id(); |
cc584b21 | 697 | hrtimer_add_expires_ns(&ts->sched_timer, offset); |
79bf2bb3 TG |
698 | |
699 | for (;;) { | |
700 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
cc584b21 | 701 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS); |
79bf2bb3 TG |
702 | /* Check, if the timer was already in the past */ |
703 | if (hrtimer_active(&ts->sched_timer)) | |
704 | break; | |
705 | now = ktime_get(); | |
706 | } | |
707 | ||
708 | #ifdef CONFIG_NO_HZ | |
709 | if (tick_nohz_enabled) | |
710 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | |
711 | #endif | |
712 | } | |
3c4fbe5e | 713 | #endif /* HIGH_RES_TIMERS */ |
79bf2bb3 | 714 | |
3c4fbe5e | 715 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
716 | void tick_cancel_sched_timer(int cpu) |
717 | { | |
718 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
719 | ||
3c4fbe5e | 720 | # ifdef CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
721 | if (ts->sched_timer.base) |
722 | hrtimer_cancel(&ts->sched_timer); | |
3c4fbe5e | 723 | # endif |
a7901766 | 724 | |
79bf2bb3 TG |
725 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
726 | } | |
3c4fbe5e | 727 | #endif |
79bf2bb3 TG |
728 | |
729 | /** | |
730 | * Async notification about clocksource changes | |
731 | */ | |
732 | void tick_clock_notify(void) | |
733 | { | |
734 | int cpu; | |
735 | ||
736 | for_each_possible_cpu(cpu) | |
737 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
738 | } | |
739 | ||
740 | /* | |
741 | * Async notification about clock event changes | |
742 | */ | |
743 | void tick_oneshot_notify(void) | |
744 | { | |
745 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
746 | ||
747 | set_bit(0, &ts->check_clocks); | |
748 | } | |
749 | ||
750 | /** | |
751 | * Check, if a change happened, which makes oneshot possible. | |
752 | * | |
753 | * Called cyclic from the hrtimer softirq (driven by the timer | |
754 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
755 | * mode, because high resolution timers are disabled (either compile | |
756 | * or runtime). | |
757 | */ | |
758 | int tick_check_oneshot_change(int allow_nohz) | |
759 | { | |
760 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
761 | ||
762 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
763 | return 0; | |
764 | ||
765 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
766 | return 0; | |
767 | ||
cf4fc6cb | 768 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
79bf2bb3 TG |
769 | return 0; |
770 | ||
771 | if (!allow_nohz) | |
772 | return 1; | |
773 | ||
774 | tick_nohz_switch_to_nohz(); | |
775 | return 0; | |
776 | } |