]>
Commit | Line | Data |
---|---|---|
79bf2bb3 TG |
1 | /* |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
b10db7f0 | 12 | * Distribute under GPLv2. |
79bf2bb3 TG |
13 | */ |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/profile.h> | |
21 | #include <linux/sched.h> | |
8083e4ad | 22 | #include <linux/module.h> |
79bf2bb3 | 23 | |
9e203bcc DM |
24 | #include <asm/irq_regs.h> |
25 | ||
79bf2bb3 TG |
26 | #include "tick-internal.h" |
27 | ||
28 | /* | |
29 | * Per cpu nohz control structure | |
30 | */ | |
31 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); | |
32 | ||
33 | /* | |
34 | * The time, when the last jiffy update happened. Protected by xtime_lock. | |
35 | */ | |
36 | static ktime_t last_jiffies_update; | |
37 | ||
289f480a IM |
38 | struct tick_sched *tick_get_tick_sched(int cpu) |
39 | { | |
40 | return &per_cpu(tick_cpu_sched, cpu); | |
41 | } | |
42 | ||
79bf2bb3 TG |
43 | /* |
44 | * Must be called with interrupts disabled ! | |
45 | */ | |
46 | static void tick_do_update_jiffies64(ktime_t now) | |
47 | { | |
48 | unsigned long ticks = 0; | |
49 | ktime_t delta; | |
50 | ||
7a14ce1d IM |
51 | /* |
52 | * Do a quick check without holding xtime_lock: | |
53 | */ | |
54 | delta = ktime_sub(now, last_jiffies_update); | |
55 | if (delta.tv64 < tick_period.tv64) | |
56 | return; | |
57 | ||
79bf2bb3 TG |
58 | /* Reevalute with xtime_lock held */ |
59 | write_seqlock(&xtime_lock); | |
60 | ||
61 | delta = ktime_sub(now, last_jiffies_update); | |
62 | if (delta.tv64 >= tick_period.tv64) { | |
63 | ||
64 | delta = ktime_sub(delta, tick_period); | |
65 | last_jiffies_update = ktime_add(last_jiffies_update, | |
66 | tick_period); | |
67 | ||
68 | /* Slow path for long timeouts */ | |
69 | if (unlikely(delta.tv64 >= tick_period.tv64)) { | |
70 | s64 incr = ktime_to_ns(tick_period); | |
71 | ||
72 | ticks = ktime_divns(delta, incr); | |
73 | ||
74 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
75 | incr * ticks); | |
76 | } | |
77 | do_timer(++ticks); | |
49d670fb TG |
78 | |
79 | /* Keep the tick_next_period variable up to date */ | |
80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | |
79bf2bb3 TG |
81 | } |
82 | write_sequnlock(&xtime_lock); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Initialize and return retrieve the jiffies update. | |
87 | */ | |
88 | static ktime_t tick_init_jiffy_update(void) | |
89 | { | |
90 | ktime_t period; | |
91 | ||
92 | write_seqlock(&xtime_lock); | |
93 | /* Did we start the jiffies update yet ? */ | |
94 | if (last_jiffies_update.tv64 == 0) | |
95 | last_jiffies_update = tick_next_period; | |
96 | period = last_jiffies_update; | |
97 | write_sequnlock(&xtime_lock); | |
98 | return period; | |
99 | } | |
100 | ||
101 | /* | |
102 | * NOHZ - aka dynamic tick functionality | |
103 | */ | |
104 | #ifdef CONFIG_NO_HZ | |
105 | /* | |
106 | * NO HZ enabled ? | |
107 | */ | |
108 | static int tick_nohz_enabled __read_mostly = 1; | |
109 | ||
110 | /* | |
111 | * Enable / Disable tickless mode | |
112 | */ | |
113 | static int __init setup_tick_nohz(char *str) | |
114 | { | |
115 | if (!strcmp(str, "off")) | |
116 | tick_nohz_enabled = 0; | |
117 | else if (!strcmp(str, "on")) | |
118 | tick_nohz_enabled = 1; | |
119 | else | |
120 | return 0; | |
121 | return 1; | |
122 | } | |
123 | ||
124 | __setup("nohz=", setup_tick_nohz); | |
125 | ||
126 | /** | |
127 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
128 | * | |
129 | * Called from interrupt entry when the CPU was idle | |
130 | * | |
131 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
132 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
133 | * value. We do this unconditionally on any cpu, as we don't know whether the | |
134 | * cpu, which has the update task assigned is in a long sleep. | |
135 | */ | |
eed3b9cf | 136 | static void tick_nohz_update_jiffies(ktime_t now) |
79bf2bb3 TG |
137 | { |
138 | int cpu = smp_processor_id(); | |
139 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
140 | unsigned long flags; | |
79bf2bb3 | 141 | |
5df7fa1c | 142 | ts->idle_waketime = now; |
79bf2bb3 TG |
143 | |
144 | local_irq_save(flags); | |
145 | tick_do_update_jiffies64(now); | |
146 | local_irq_restore(flags); | |
02ff3755 IM |
147 | |
148 | touch_softlockup_watchdog(); | |
79bf2bb3 TG |
149 | } |
150 | ||
595aac48 AV |
151 | /* |
152 | * Updates the per cpu time idle statistics counters | |
153 | */ | |
8d63bf94 | 154 | static void |
8c215bd3 | 155 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
6378ddb5 | 156 | { |
eed3b9cf | 157 | ktime_t delta; |
6378ddb5 | 158 | |
595aac48 AV |
159 | if (ts->idle_active) { |
160 | delta = ktime_sub(now, ts->idle_entrytime); | |
8c215bd3 | 161 | if (nr_iowait_cpu(cpu) > 0) |
0224cf4c | 162 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
6beea0cd MH |
163 | else |
164 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
8c7b09f4 | 165 | ts->idle_entrytime = now; |
595aac48 | 166 | } |
8d63bf94 | 167 | |
e0e37c20 | 168 | if (last_update_time) |
8d63bf94 AV |
169 | *last_update_time = ktime_to_us(now); |
170 | ||
595aac48 AV |
171 | } |
172 | ||
173 | static void tick_nohz_stop_idle(int cpu, ktime_t now) | |
174 | { | |
175 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
176 | ||
8c215bd3 | 177 | update_ts_time_stats(cpu, ts, now, NULL); |
eed3b9cf | 178 | ts->idle_active = 0; |
56c7426b | 179 | |
eed3b9cf | 180 | sched_clock_idle_wakeup_event(0); |
6378ddb5 VP |
181 | } |
182 | ||
8c215bd3 | 183 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
6378ddb5 | 184 | { |
430ee881 | 185 | ktime_t now = ktime_get(); |
595aac48 | 186 | |
6378ddb5 VP |
187 | ts->idle_entrytime = now; |
188 | ts->idle_active = 1; | |
56c7426b | 189 | sched_clock_idle_sleep_event(); |
6378ddb5 VP |
190 | return now; |
191 | } | |
192 | ||
b1f724c3 AV |
193 | /** |
194 | * get_cpu_idle_time_us - get the total idle time of a cpu | |
195 | * @cpu: CPU number to query | |
09a1d34f MH |
196 | * @last_update_time: variable to store update time in. Do not update |
197 | * counters if NULL. | |
b1f724c3 AV |
198 | * |
199 | * Return the cummulative idle time (since boot) for a given | |
6beea0cd | 200 | * CPU, in microseconds. |
b1f724c3 AV |
201 | * |
202 | * This time is measured via accounting rather than sampling, | |
203 | * and is as accurate as ktime_get() is. | |
204 | * | |
205 | * This function returns -1 if NOHZ is not enabled. | |
206 | */ | |
6378ddb5 VP |
207 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
208 | { | |
209 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 210 | ktime_t now, idle; |
6378ddb5 | 211 | |
8083e4ad | 212 | if (!tick_nohz_enabled) |
213 | return -1; | |
214 | ||
09a1d34f MH |
215 | now = ktime_get(); |
216 | if (last_update_time) { | |
217 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
218 | idle = ts->idle_sleeptime; | |
219 | } else { | |
220 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { | |
221 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
222 | ||
223 | idle = ktime_add(ts->idle_sleeptime, delta); | |
224 | } else { | |
225 | idle = ts->idle_sleeptime; | |
226 | } | |
227 | } | |
228 | ||
229 | return ktime_to_us(idle); | |
8083e4ad | 230 | |
6378ddb5 | 231 | } |
8083e4ad | 232 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
6378ddb5 | 233 | |
6beea0cd | 234 | /** |
0224cf4c AV |
235 | * get_cpu_iowait_time_us - get the total iowait time of a cpu |
236 | * @cpu: CPU number to query | |
09a1d34f MH |
237 | * @last_update_time: variable to store update time in. Do not update |
238 | * counters if NULL. | |
0224cf4c AV |
239 | * |
240 | * Return the cummulative iowait time (since boot) for a given | |
241 | * CPU, in microseconds. | |
242 | * | |
243 | * This time is measured via accounting rather than sampling, | |
244 | * and is as accurate as ktime_get() is. | |
245 | * | |
246 | * This function returns -1 if NOHZ is not enabled. | |
247 | */ | |
248 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |
249 | { | |
250 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 251 | ktime_t now, iowait; |
0224cf4c AV |
252 | |
253 | if (!tick_nohz_enabled) | |
254 | return -1; | |
255 | ||
09a1d34f MH |
256 | now = ktime_get(); |
257 | if (last_update_time) { | |
258 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
259 | iowait = ts->iowait_sleeptime; | |
260 | } else { | |
261 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { | |
262 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
0224cf4c | 263 | |
09a1d34f MH |
264 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
265 | } else { | |
266 | iowait = ts->iowait_sleeptime; | |
267 | } | |
268 | } | |
0224cf4c | 269 | |
09a1d34f | 270 | return ktime_to_us(iowait); |
0224cf4c AV |
271 | } |
272 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |
273 | ||
280f0677 | 274 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) |
79bf2bb3 | 275 | { |
280f0677 | 276 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
aa9b1630 | 277 | unsigned long rcu_delta_jiffies; |
6378ddb5 | 278 | ktime_t last_update, expires, now; |
4f86d3a8 | 279 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
98962465 | 280 | u64 time_delta; |
79bf2bb3 TG |
281 | int cpu; |
282 | ||
79bf2bb3 TG |
283 | cpu = smp_processor_id(); |
284 | ts = &per_cpu(tick_cpu_sched, cpu); | |
f2e21c96 | 285 | |
8c215bd3 | 286 | now = tick_nohz_start_idle(cpu, ts); |
79bf2bb3 | 287 | |
5e41d0d6 TG |
288 | /* |
289 | * If this cpu is offline and it is the one which updates | |
290 | * jiffies, then give up the assignment and let it be taken by | |
291 | * the cpu which runs the tick timer next. If we don't drop | |
292 | * this here the jiffies might be stale and do_timer() never | |
293 | * invoked. | |
294 | */ | |
295 | if (unlikely(!cpu_online(cpu))) { | |
296 | if (cpu == tick_do_timer_cpu) | |
6441402b | 297 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
5e41d0d6 TG |
298 | } |
299 | ||
79bf2bb3 | 300 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
280f0677 | 301 | return; |
79bf2bb3 TG |
302 | |
303 | if (need_resched()) | |
280f0677 | 304 | return; |
79bf2bb3 | 305 | |
fa116ea3 | 306 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
35282316 TG |
307 | static int ratelimit; |
308 | ||
309 | if (ratelimit < 10) { | |
310 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", | |
529eaccd | 311 | (unsigned int) local_softirq_pending()); |
35282316 TG |
312 | ratelimit++; |
313 | } | |
280f0677 | 314 | return; |
35282316 | 315 | } |
79bf2bb3 | 316 | |
79bf2bb3 | 317 | ts->idle_calls++; |
79bf2bb3 TG |
318 | /* Read jiffies and the time when jiffies were updated last */ |
319 | do { | |
320 | seq = read_seqbegin(&xtime_lock); | |
321 | last_update = last_jiffies_update; | |
322 | last_jiffies = jiffies; | |
27185016 | 323 | time_delta = timekeeping_max_deferment(); |
79bf2bb3 TG |
324 | } while (read_seqretry(&xtime_lock, seq)); |
325 | ||
aa9b1630 | 326 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || |
396e894d | 327 | arch_needs_cpu(cpu)) { |
3c5d92a0 | 328 | next_jiffies = last_jiffies + 1; |
6ba9b346 | 329 | delta_jiffies = 1; |
3c5d92a0 MS |
330 | } else { |
331 | /* Get the next timer wheel timer */ | |
332 | next_jiffies = get_next_timer_interrupt(last_jiffies); | |
333 | delta_jiffies = next_jiffies - last_jiffies; | |
aa9b1630 PM |
334 | if (rcu_delta_jiffies < delta_jiffies) { |
335 | next_jiffies = last_jiffies + rcu_delta_jiffies; | |
336 | delta_jiffies = rcu_delta_jiffies; | |
337 | } | |
3c5d92a0 | 338 | } |
79bf2bb3 TG |
339 | /* |
340 | * Do not stop the tick, if we are only one off | |
341 | * or if the cpu is required for rcu | |
342 | */ | |
6ba9b346 | 343 | if (!ts->tick_stopped && delta_jiffies == 1) |
79bf2bb3 TG |
344 | goto out; |
345 | ||
346 | /* Schedule the tick, if we are at least one jiffie off */ | |
347 | if ((long)delta_jiffies >= 1) { | |
348 | ||
00147449 WR |
349 | /* |
350 | * If this cpu is the one which updates jiffies, then | |
351 | * give up the assignment and let it be taken by the | |
352 | * cpu which runs the tick timer next, which might be | |
353 | * this cpu as well. If we don't drop this here the | |
354 | * jiffies might be stale and do_timer() never | |
27185016 TG |
355 | * invoked. Keep track of the fact that it was the one |
356 | * which had the do_timer() duty last. If this cpu is | |
357 | * the one which had the do_timer() duty last, we | |
358 | * limit the sleep time to the timekeeping | |
359 | * max_deferement value which we retrieved | |
360 | * above. Otherwise we can sleep as long as we want. | |
00147449 | 361 | */ |
27185016 | 362 | if (cpu == tick_do_timer_cpu) { |
00147449 | 363 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
27185016 TG |
364 | ts->do_timer_last = 1; |
365 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
366 | time_delta = KTIME_MAX; | |
367 | ts->do_timer_last = 0; | |
368 | } else if (!ts->do_timer_last) { | |
369 | time_delta = KTIME_MAX; | |
370 | } | |
371 | ||
00147449 | 372 | /* |
98962465 JH |
373 | * calculate the expiry time for the next timer wheel |
374 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals | |
375 | * that there is no timer pending or at least extremely | |
376 | * far into the future (12 days for HZ=1000). In this | |
377 | * case we set the expiry to the end of time. | |
378 | */ | |
379 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { | |
380 | /* | |
381 | * Calculate the time delta for the next timer event. | |
382 | * If the time delta exceeds the maximum time delta | |
383 | * permitted by the current clocksource then adjust | |
384 | * the time delta accordingly to ensure the | |
385 | * clocksource does not wrap. | |
386 | */ | |
387 | time_delta = min_t(u64, time_delta, | |
388 | tick_period.tv64 * delta_jiffies); | |
98962465 | 389 | } |
00147449 | 390 | |
27185016 TG |
391 | if (time_delta < KTIME_MAX) |
392 | expires = ktime_add_ns(last_update, time_delta); | |
393 | else | |
394 | expires.tv64 = KTIME_MAX; | |
00147449 | 395 | |
00147449 WR |
396 | /* Skip reprogram of event if its not changed */ |
397 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) | |
398 | goto out; | |
399 | ||
79bf2bb3 TG |
400 | /* |
401 | * nohz_stop_sched_tick can be called several times before | |
402 | * the nohz_restart_sched_tick is called. This happens when | |
403 | * interrupts arrive which do not cause a reschedule. In the | |
404 | * first call we save the current tick time, so we can restart | |
405 | * the scheduler tick in nohz_restart_sched_tick. | |
406 | */ | |
407 | if (!ts->tick_stopped) { | |
83cd4fe2 | 408 | select_nohz_load_balancer(1); |
46cb4b7c | 409 | |
cc584b21 | 410 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
79bf2bb3 TG |
411 | ts->tick_stopped = 1; |
412 | ts->idle_jiffies = last_jiffies; | |
413 | } | |
d3ed7824 | 414 | |
eaad084b TG |
415 | ts->idle_sleeps++; |
416 | ||
98962465 JH |
417 | /* Mark expires */ |
418 | ts->idle_expires = expires; | |
419 | ||
eaad084b | 420 | /* |
98962465 JH |
421 | * If the expiration time == KTIME_MAX, then |
422 | * in this case we simply stop the tick timer. | |
eaad084b | 423 | */ |
98962465 | 424 | if (unlikely(expires.tv64 == KTIME_MAX)) { |
eaad084b TG |
425 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
426 | hrtimer_cancel(&ts->sched_timer); | |
427 | goto out; | |
428 | } | |
429 | ||
79bf2bb3 TG |
430 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
431 | hrtimer_start(&ts->sched_timer, expires, | |
5c333864 | 432 | HRTIMER_MODE_ABS_PINNED); |
79bf2bb3 TG |
433 | /* Check, if the timer was already in the past */ |
434 | if (hrtimer_active(&ts->sched_timer)) | |
435 | goto out; | |
4c9dc641 | 436 | } else if (!tick_program_event(expires, 0)) |
79bf2bb3 TG |
437 | goto out; |
438 | /* | |
439 | * We are past the event already. So we crossed a | |
440 | * jiffie boundary. Update jiffies and raise the | |
441 | * softirq. | |
442 | */ | |
443 | tick_do_update_jiffies64(ktime_get()); | |
79bf2bb3 TG |
444 | } |
445 | raise_softirq_irqoff(TIMER_SOFTIRQ); | |
446 | out: | |
447 | ts->next_jiffies = next_jiffies; | |
448 | ts->last_jiffies = last_jiffies; | |
4f86d3a8 | 449 | ts->sleep_length = ktime_sub(dev->next_event, now); |
280f0677 FW |
450 | } |
451 | ||
452 | /** | |
453 | * tick_nohz_idle_enter - stop the idle tick from the idle task | |
454 | * | |
455 | * When the next event is more than a tick into the future, stop the idle tick | |
456 | * Called when we start the idle loop. | |
2bbb6817 | 457 | * |
1268fbc7 | 458 | * The arch is responsible of calling: |
2bbb6817 FW |
459 | * |
460 | * - rcu_idle_enter() after its last use of RCU before the CPU is put | |
461 | * to sleep. | |
462 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. | |
280f0677 | 463 | */ |
1268fbc7 | 464 | void tick_nohz_idle_enter(void) |
280f0677 FW |
465 | { |
466 | struct tick_sched *ts; | |
467 | ||
1268fbc7 FW |
468 | WARN_ON_ONCE(irqs_disabled()); |
469 | ||
0db49b72 LT |
470 | /* |
471 | * Update the idle state in the scheduler domain hierarchy | |
472 | * when tick_nohz_stop_sched_tick() is called from the idle loop. | |
473 | * State will be updated to busy during the first busy tick after | |
474 | * exiting idle. | |
475 | */ | |
476 | set_cpu_sd_state_idle(); | |
477 | ||
1268fbc7 FW |
478 | local_irq_disable(); |
479 | ||
280f0677 FW |
480 | ts = &__get_cpu_var(tick_cpu_sched); |
481 | /* | |
482 | * set ts->inidle unconditionally. even if the system did not | |
483 | * switch to nohz mode the cpu frequency governers rely on the | |
484 | * update of the idle time accounting in tick_nohz_start_idle(). | |
485 | */ | |
486 | ts->inidle = 1; | |
487 | tick_nohz_stop_sched_tick(ts); | |
1268fbc7 FW |
488 | |
489 | local_irq_enable(); | |
280f0677 FW |
490 | } |
491 | ||
492 | /** | |
493 | * tick_nohz_irq_exit - update next tick event from interrupt exit | |
494 | * | |
495 | * When an interrupt fires while we are idle and it doesn't cause | |
496 | * a reschedule, it may still add, modify or delete a timer, enqueue | |
497 | * an RCU callback, etc... | |
498 | * So we need to re-calculate and reprogram the next tick event. | |
499 | */ | |
500 | void tick_nohz_irq_exit(void) | |
501 | { | |
502 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
503 | ||
504 | if (!ts->inidle) | |
505 | return; | |
506 | ||
507 | tick_nohz_stop_sched_tick(ts); | |
79bf2bb3 TG |
508 | } |
509 | ||
4f86d3a8 LB |
510 | /** |
511 | * tick_nohz_get_sleep_length - return the length of the current sleep | |
512 | * | |
513 | * Called from power state control code with interrupts disabled | |
514 | */ | |
515 | ktime_t tick_nohz_get_sleep_length(void) | |
516 | { | |
517 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
518 | ||
519 | return ts->sleep_length; | |
520 | } | |
521 | ||
c34bec5a TG |
522 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
523 | { | |
524 | hrtimer_cancel(&ts->sched_timer); | |
268a3dcf | 525 | hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); |
c34bec5a TG |
526 | |
527 | while (1) { | |
528 | /* Forward the time to expire in the future */ | |
529 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
530 | ||
531 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
268a3dcf | 532 | hrtimer_start_expires(&ts->sched_timer, |
5c333864 | 533 | HRTIMER_MODE_ABS_PINNED); |
c34bec5a TG |
534 | /* Check, if the timer was already in the past */ |
535 | if (hrtimer_active(&ts->sched_timer)) | |
536 | break; | |
537 | } else { | |
268a3dcf TG |
538 | if (!tick_program_event( |
539 | hrtimer_get_expires(&ts->sched_timer), 0)) | |
c34bec5a TG |
540 | break; |
541 | } | |
6f103929 | 542 | /* Reread time and update jiffies */ |
c34bec5a | 543 | now = ktime_get(); |
6f103929 | 544 | tick_do_update_jiffies64(now); |
c34bec5a TG |
545 | } |
546 | } | |
547 | ||
79bf2bb3 | 548 | /** |
280f0677 | 549 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
79bf2bb3 TG |
550 | * |
551 | * Restart the idle tick when the CPU is woken up from idle | |
280f0677 FW |
552 | * This also exit the RCU extended quiescent state. The CPU |
553 | * can use RCU again after this function is called. | |
79bf2bb3 | 554 | */ |
280f0677 | 555 | void tick_nohz_idle_exit(void) |
79bf2bb3 TG |
556 | { |
557 | int cpu = smp_processor_id(); | |
558 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
79741dd3 | 559 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
79bf2bb3 | 560 | unsigned long ticks; |
79741dd3 | 561 | #endif |
6378ddb5 | 562 | ktime_t now; |
79bf2bb3 | 563 | |
6378ddb5 | 564 | local_irq_disable(); |
2bbb6817 | 565 | |
15f827be FW |
566 | WARN_ON_ONCE(!ts->inidle); |
567 | ||
568 | ts->inidle = 0; | |
569 | ||
570 | if (ts->idle_active || ts->tick_stopped) | |
eed3b9cf MS |
571 | now = ktime_get(); |
572 | ||
573 | if (ts->idle_active) | |
574 | tick_nohz_stop_idle(cpu, now); | |
6378ddb5 | 575 | |
15f827be | 576 | if (!ts->tick_stopped) { |
6378ddb5 | 577 | local_irq_enable(); |
79bf2bb3 | 578 | return; |
6378ddb5 | 579 | } |
79bf2bb3 TG |
580 | |
581 | /* Update jiffies first */ | |
46cb4b7c | 582 | select_nohz_load_balancer(0); |
79bf2bb3 | 583 | tick_do_update_jiffies64(now); |
5aaa0b7a | 584 | update_cpu_load_nohz(); |
79bf2bb3 | 585 | |
79741dd3 | 586 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
79bf2bb3 TG |
587 | /* |
588 | * We stopped the tick in idle. Update process times would miss the | |
589 | * time we slept as update_process_times does only a 1 tick | |
590 | * accounting. Enforce that this is accounted to idle ! | |
591 | */ | |
592 | ticks = jiffies - ts->idle_jiffies; | |
593 | /* | |
594 | * We might be one off. Do not randomly account a huge number of ticks! | |
595 | */ | |
79741dd3 MS |
596 | if (ticks && ticks < LONG_MAX) |
597 | account_idle_ticks(ticks); | |
598 | #endif | |
79bf2bb3 | 599 | |
126e01bf | 600 | touch_softlockup_watchdog(); |
79bf2bb3 TG |
601 | /* |
602 | * Cancel the scheduled timer and restore the tick | |
603 | */ | |
604 | ts->tick_stopped = 0; | |
5df7fa1c | 605 | ts->idle_exittime = now; |
79bf2bb3 | 606 | |
c34bec5a | 607 | tick_nohz_restart(ts, now); |
79bf2bb3 | 608 | |
79bf2bb3 TG |
609 | local_irq_enable(); |
610 | } | |
611 | ||
612 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | |
613 | { | |
614 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
cc584b21 | 615 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
79bf2bb3 TG |
616 | } |
617 | ||
618 | /* | |
619 | * The nohz low res interrupt handler | |
620 | */ | |
621 | static void tick_nohz_handler(struct clock_event_device *dev) | |
622 | { | |
623 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
624 | struct pt_regs *regs = get_irq_regs(); | |
d3ed7824 | 625 | int cpu = smp_processor_id(); |
79bf2bb3 TG |
626 | ktime_t now = ktime_get(); |
627 | ||
628 | dev->next_event.tv64 = KTIME_MAX; | |
629 | ||
d3ed7824 TG |
630 | /* |
631 | * Check if the do_timer duty was dropped. We don't care about | |
632 | * concurrency: This happens only when the cpu in charge went | |
633 | * into a long sleep. If two cpus happen to assign themself to | |
634 | * this duty, then the jiffies update is still serialized by | |
635 | * xtime_lock. | |
636 | */ | |
6441402b | 637 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
d3ed7824 TG |
638 | tick_do_timer_cpu = cpu; |
639 | ||
79bf2bb3 | 640 | /* Check, if the jiffies need an update */ |
d3ed7824 TG |
641 | if (tick_do_timer_cpu == cpu) |
642 | tick_do_update_jiffies64(now); | |
79bf2bb3 TG |
643 | |
644 | /* | |
645 | * When we are idle and the tick is stopped, we have to touch | |
646 | * the watchdog as we might not schedule for a really long | |
647 | * time. This happens on complete idle SMP systems while | |
648 | * waiting on the login prompt. We also increment the "start | |
649 | * of idle" jiffy stamp so the idle accounting adjustment we | |
650 | * do when we go busy again does not account too much ticks. | |
651 | */ | |
652 | if (ts->tick_stopped) { | |
653 | touch_softlockup_watchdog(); | |
654 | ts->idle_jiffies++; | |
655 | } | |
656 | ||
657 | update_process_times(user_mode(regs)); | |
658 | profile_tick(CPU_PROFILING); | |
659 | ||
79bf2bb3 TG |
660 | while (tick_nohz_reprogram(ts, now)) { |
661 | now = ktime_get(); | |
662 | tick_do_update_jiffies64(now); | |
663 | } | |
664 | } | |
665 | ||
666 | /** | |
667 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
668 | */ | |
669 | static void tick_nohz_switch_to_nohz(void) | |
670 | { | |
671 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
672 | ktime_t next; | |
673 | ||
674 | if (!tick_nohz_enabled) | |
675 | return; | |
676 | ||
677 | local_irq_disable(); | |
678 | if (tick_switch_to_oneshot(tick_nohz_handler)) { | |
679 | local_irq_enable(); | |
680 | return; | |
681 | } | |
682 | ||
683 | ts->nohz_mode = NOHZ_MODE_LOWRES; | |
684 | ||
685 | /* | |
686 | * Recycle the hrtimer in ts, so we can share the | |
687 | * hrtimer_forward with the highres code. | |
688 | */ | |
689 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
690 | /* Get the next period */ | |
691 | next = tick_init_jiffy_update(); | |
692 | ||
693 | for (;;) { | |
cc584b21 | 694 | hrtimer_set_expires(&ts->sched_timer, next); |
79bf2bb3 TG |
695 | if (!tick_program_event(next, 0)) |
696 | break; | |
697 | next = ktime_add(next, tick_period); | |
698 | } | |
699 | local_irq_enable(); | |
79bf2bb3 TG |
700 | } |
701 | ||
fb02fbc1 TG |
702 | /* |
703 | * When NOHZ is enabled and the tick is stopped, we need to kick the | |
704 | * tick timer from irq_enter() so that the jiffies update is kept | |
705 | * alive during long running softirqs. That's ugly as hell, but | |
706 | * correctness is key even if we need to fix the offending softirq in | |
707 | * the first place. | |
708 | * | |
709 | * Note, this is different to tick_nohz_restart. We just kick the | |
710 | * timer and do not touch the other magic bits which need to be done | |
711 | * when idle is left. | |
712 | */ | |
eed3b9cf | 713 | static void tick_nohz_kick_tick(int cpu, ktime_t now) |
fb02fbc1 | 714 | { |
ae99286b TG |
715 | #if 0 |
716 | /* Switch back to 2.6.27 behaviour */ | |
717 | ||
fb02fbc1 | 718 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
eed3b9cf | 719 | ktime_t delta; |
fb02fbc1 | 720 | |
c4bd822e TG |
721 | /* |
722 | * Do not touch the tick device, when the next expiry is either | |
723 | * already reached or less/equal than the tick period. | |
724 | */ | |
268a3dcf | 725 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
c4bd822e TG |
726 | if (delta.tv64 <= tick_period.tv64) |
727 | return; | |
728 | ||
729 | tick_nohz_restart(ts, now); | |
ae99286b | 730 | #endif |
fb02fbc1 TG |
731 | } |
732 | ||
eed3b9cf MS |
733 | static inline void tick_check_nohz(int cpu) |
734 | { | |
735 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
736 | ktime_t now; | |
737 | ||
738 | if (!ts->idle_active && !ts->tick_stopped) | |
739 | return; | |
740 | now = ktime_get(); | |
741 | if (ts->idle_active) | |
742 | tick_nohz_stop_idle(cpu, now); | |
743 | if (ts->tick_stopped) { | |
744 | tick_nohz_update_jiffies(now); | |
745 | tick_nohz_kick_tick(cpu, now); | |
746 | } | |
747 | } | |
748 | ||
79bf2bb3 TG |
749 | #else |
750 | ||
751 | static inline void tick_nohz_switch_to_nohz(void) { } | |
eed3b9cf | 752 | static inline void tick_check_nohz(int cpu) { } |
79bf2bb3 TG |
753 | |
754 | #endif /* NO_HZ */ | |
755 | ||
719254fa TG |
756 | /* |
757 | * Called from irq_enter to notify about the possible interruption of idle() | |
758 | */ | |
759 | void tick_check_idle(int cpu) | |
760 | { | |
fb02fbc1 | 761 | tick_check_oneshot_broadcast(cpu); |
eed3b9cf | 762 | tick_check_nohz(cpu); |
719254fa TG |
763 | } |
764 | ||
79bf2bb3 TG |
765 | /* |
766 | * High resolution timer specific code | |
767 | */ | |
768 | #ifdef CONFIG_HIGH_RES_TIMERS | |
769 | /* | |
4c9dc641 | 770 | * We rearm the timer until we get disabled by the idle code. |
79bf2bb3 TG |
771 | * Called with interrupts disabled and timer->base->cpu_base->lock held. |
772 | */ | |
773 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
774 | { | |
775 | struct tick_sched *ts = | |
776 | container_of(timer, struct tick_sched, sched_timer); | |
79bf2bb3 TG |
777 | struct pt_regs *regs = get_irq_regs(); |
778 | ktime_t now = ktime_get(); | |
d3ed7824 TG |
779 | int cpu = smp_processor_id(); |
780 | ||
781 | #ifdef CONFIG_NO_HZ | |
782 | /* | |
783 | * Check if the do_timer duty was dropped. We don't care about | |
784 | * concurrency: This happens only when the cpu in charge went | |
785 | * into a long sleep. If two cpus happen to assign themself to | |
786 | * this duty, then the jiffies update is still serialized by | |
787 | * xtime_lock. | |
788 | */ | |
6441402b | 789 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
d3ed7824 TG |
790 | tick_do_timer_cpu = cpu; |
791 | #endif | |
79bf2bb3 TG |
792 | |
793 | /* Check, if the jiffies need an update */ | |
d3ed7824 TG |
794 | if (tick_do_timer_cpu == cpu) |
795 | tick_do_update_jiffies64(now); | |
79bf2bb3 TG |
796 | |
797 | /* | |
798 | * Do not call, when we are not in irq context and have | |
799 | * no valid regs pointer | |
800 | */ | |
801 | if (regs) { | |
802 | /* | |
803 | * When we are idle and the tick is stopped, we have to touch | |
804 | * the watchdog as we might not schedule for a really long | |
805 | * time. This happens on complete idle SMP systems while | |
806 | * waiting on the login prompt. We also increment the "start of | |
807 | * idle" jiffy stamp so the idle accounting adjustment we do | |
808 | * when we go busy again does not account too much ticks. | |
809 | */ | |
810 | if (ts->tick_stopped) { | |
811 | touch_softlockup_watchdog(); | |
812 | ts->idle_jiffies++; | |
813 | } | |
79bf2bb3 TG |
814 | update_process_times(user_mode(regs)); |
815 | profile_tick(CPU_PROFILING); | |
79bf2bb3 TG |
816 | } |
817 | ||
79bf2bb3 TG |
818 | hrtimer_forward(timer, now, tick_period); |
819 | ||
820 | return HRTIMER_RESTART; | |
821 | } | |
822 | ||
5307c955 MG |
823 | static int sched_skew_tick; |
824 | ||
62cf20b3 TG |
825 | static int __init skew_tick(char *str) |
826 | { | |
827 | get_option(&str, &sched_skew_tick); | |
828 | ||
829 | return 0; | |
830 | } | |
831 | early_param("skew_tick", skew_tick); | |
832 | ||
79bf2bb3 TG |
833 | /** |
834 | * tick_setup_sched_timer - setup the tick emulation timer | |
835 | */ | |
836 | void tick_setup_sched_timer(void) | |
837 | { | |
838 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
839 | ktime_t now = ktime_get(); | |
840 | ||
841 | /* | |
842 | * Emulate tick processing via per-CPU hrtimers: | |
843 | */ | |
844 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
845 | ts->sched_timer.function = tick_sched_timer; | |
79bf2bb3 | 846 | |
3704540b | 847 | /* Get the next period (per cpu) */ |
cc584b21 | 848 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
79bf2bb3 | 849 | |
5307c955 MG |
850 | /* Offset the tick to avert xtime_lock contention. */ |
851 | if (sched_skew_tick) { | |
852 | u64 offset = ktime_to_ns(tick_period) >> 1; | |
853 | do_div(offset, num_possible_cpus()); | |
854 | offset *= smp_processor_id(); | |
855 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | |
856 | } | |
857 | ||
79bf2bb3 TG |
858 | for (;;) { |
859 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
5c333864 AB |
860 | hrtimer_start_expires(&ts->sched_timer, |
861 | HRTIMER_MODE_ABS_PINNED); | |
79bf2bb3 TG |
862 | /* Check, if the timer was already in the past */ |
863 | if (hrtimer_active(&ts->sched_timer)) | |
864 | break; | |
865 | now = ktime_get(); | |
866 | } | |
867 | ||
868 | #ifdef CONFIG_NO_HZ | |
29c158e8 | 869 | if (tick_nohz_enabled) |
79bf2bb3 TG |
870 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
871 | #endif | |
872 | } | |
3c4fbe5e | 873 | #endif /* HIGH_RES_TIMERS */ |
79bf2bb3 | 874 | |
3c4fbe5e | 875 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
876 | void tick_cancel_sched_timer(int cpu) |
877 | { | |
878 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
879 | ||
3c4fbe5e | 880 | # ifdef CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
881 | if (ts->sched_timer.base) |
882 | hrtimer_cancel(&ts->sched_timer); | |
3c4fbe5e | 883 | # endif |
a7901766 | 884 | |
79bf2bb3 TG |
885 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
886 | } | |
3c4fbe5e | 887 | #endif |
79bf2bb3 TG |
888 | |
889 | /** | |
890 | * Async notification about clocksource changes | |
891 | */ | |
892 | void tick_clock_notify(void) | |
893 | { | |
894 | int cpu; | |
895 | ||
896 | for_each_possible_cpu(cpu) | |
897 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
898 | } | |
899 | ||
900 | /* | |
901 | * Async notification about clock event changes | |
902 | */ | |
903 | void tick_oneshot_notify(void) | |
904 | { | |
905 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
906 | ||
907 | set_bit(0, &ts->check_clocks); | |
908 | } | |
909 | ||
910 | /** | |
911 | * Check, if a change happened, which makes oneshot possible. | |
912 | * | |
913 | * Called cyclic from the hrtimer softirq (driven by the timer | |
914 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
915 | * mode, because high resolution timers are disabled (either compile | |
916 | * or runtime). | |
917 | */ | |
918 | int tick_check_oneshot_change(int allow_nohz) | |
919 | { | |
920 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
921 | ||
922 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
923 | return 0; | |
924 | ||
925 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
926 | return 0; | |
927 | ||
cf4fc6cb | 928 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
79bf2bb3 TG |
929 | return 0; |
930 | ||
931 | if (!allow_nohz) | |
932 | return 1; | |
933 | ||
934 | tick_nohz_switch_to_nohz(); | |
935 | return 0; | |
936 | } |