]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
12 | * Distribute under GPLv2. | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/profile.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/irq_work.h> | |
24 | #include <linux/posix-timers.h> | |
25 | #include <linux/perf_event.h> | |
26 | ||
27 | #include <asm/irq_regs.h> | |
28 | ||
29 | #include "tick-internal.h" | |
30 | ||
31 | /* | |
32 | * Per cpu nohz control structure | |
33 | */ | |
34 | DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); | |
35 | ||
36 | /* | |
37 | * The time, when the last jiffy update happened. Protected by jiffies_lock. | |
38 | */ | |
39 | static ktime_t last_jiffies_update; | |
40 | ||
41 | struct tick_sched *tick_get_tick_sched(int cpu) | |
42 | { | |
43 | return &per_cpu(tick_cpu_sched, cpu); | |
44 | } | |
45 | ||
46 | /* | |
47 | * Must be called with interrupts disabled ! | |
48 | */ | |
49 | static void tick_do_update_jiffies64(ktime_t now) | |
50 | { | |
51 | unsigned long ticks = 0; | |
52 | ktime_t delta; | |
53 | ||
54 | /* | |
55 | * Do a quick check without holding jiffies_lock: | |
56 | */ | |
57 | delta = ktime_sub(now, last_jiffies_update); | |
58 | if (delta.tv64 < tick_period.tv64) | |
59 | return; | |
60 | ||
61 | /* Reevalute with jiffies_lock held */ | |
62 | write_seqlock(&jiffies_lock); | |
63 | ||
64 | delta = ktime_sub(now, last_jiffies_update); | |
65 | if (delta.tv64 >= tick_period.tv64) { | |
66 | ||
67 | delta = ktime_sub(delta, tick_period); | |
68 | last_jiffies_update = ktime_add(last_jiffies_update, | |
69 | tick_period); | |
70 | ||
71 | /* Slow path for long timeouts */ | |
72 | if (unlikely(delta.tv64 >= tick_period.tv64)) { | |
73 | s64 incr = ktime_to_ns(tick_period); | |
74 | ||
75 | ticks = ktime_divns(delta, incr); | |
76 | ||
77 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
78 | incr * ticks); | |
79 | } | |
80 | do_timer(++ticks); | |
81 | ||
82 | /* Keep the tick_next_period variable up to date */ | |
83 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | |
84 | } | |
85 | write_sequnlock(&jiffies_lock); | |
86 | } | |
87 | ||
88 | /* | |
89 | * Initialize and return retrieve the jiffies update. | |
90 | */ | |
91 | static ktime_t tick_init_jiffy_update(void) | |
92 | { | |
93 | ktime_t period; | |
94 | ||
95 | write_seqlock(&jiffies_lock); | |
96 | /* Did we start the jiffies update yet ? */ | |
97 | if (last_jiffies_update.tv64 == 0) | |
98 | last_jiffies_update = tick_next_period; | |
99 | period = last_jiffies_update; | |
100 | write_sequnlock(&jiffies_lock); | |
101 | return period; | |
102 | } | |
103 | ||
104 | ||
105 | static void tick_sched_do_timer(ktime_t now) | |
106 | { | |
107 | int cpu = smp_processor_id(); | |
108 | ||
109 | #ifdef CONFIG_NO_HZ_COMMON | |
110 | /* | |
111 | * Check if the do_timer duty was dropped. We don't care about | |
112 | * concurrency: This happens only when the cpu in charge went | |
113 | * into a long sleep. If two cpus happen to assign themself to | |
114 | * this duty, then the jiffies update is still serialized by | |
115 | * jiffies_lock. | |
116 | */ | |
117 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE) | |
118 | && !tick_nohz_full_cpu(cpu)) | |
119 | tick_do_timer_cpu = cpu; | |
120 | #endif | |
121 | ||
122 | /* Check, if the jiffies need an update */ | |
123 | if (tick_do_timer_cpu == cpu) | |
124 | tick_do_update_jiffies64(now); | |
125 | } | |
126 | ||
127 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) | |
128 | { | |
129 | #ifdef CONFIG_NO_HZ_COMMON | |
130 | /* | |
131 | * When we are idle and the tick is stopped, we have to touch | |
132 | * the watchdog as we might not schedule for a really long | |
133 | * time. This happens on complete idle SMP systems while | |
134 | * waiting on the login prompt. We also increment the "start of | |
135 | * idle" jiffy stamp so the idle accounting adjustment we do | |
136 | * when we go busy again does not account too much ticks. | |
137 | */ | |
138 | if (ts->tick_stopped) { | |
139 | touch_softlockup_watchdog(); | |
140 | if (is_idle_task(current)) | |
141 | ts->idle_jiffies++; | |
142 | } | |
143 | #endif | |
144 | update_process_times(user_mode(regs)); | |
145 | profile_tick(CPU_PROFILING); | |
146 | } | |
147 | ||
148 | #ifdef CONFIG_NO_HZ_FULL | |
149 | static cpumask_var_t nohz_full_mask; | |
150 | bool have_nohz_full_mask; | |
151 | ||
152 | static bool can_stop_full_tick(void) | |
153 | { | |
154 | WARN_ON_ONCE(!irqs_disabled()); | |
155 | ||
156 | if (!sched_can_stop_tick()) | |
157 | return false; | |
158 | ||
159 | if (!posix_cpu_timers_can_stop_tick(current)) | |
160 | return false; | |
161 | ||
162 | if (!perf_event_can_stop_tick()) | |
163 | return false; | |
164 | ||
165 | /* sched_clock_tick() needs us? */ | |
166 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | |
167 | /* | |
168 | * TODO: kick full dynticks CPUs when | |
169 | * sched_clock_stable is set. | |
170 | */ | |
171 | if (!sched_clock_stable) | |
172 | return false; | |
173 | #endif | |
174 | ||
175 | return true; | |
176 | } | |
177 | ||
178 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now); | |
179 | ||
180 | /* | |
181 | * Re-evaluate the need for the tick on the current CPU | |
182 | * and restart it if necessary. | |
183 | */ | |
184 | void tick_nohz_full_check(void) | |
185 | { | |
186 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
187 | ||
188 | if (tick_nohz_full_cpu(smp_processor_id())) { | |
189 | if (ts->tick_stopped && !is_idle_task(current)) { | |
190 | if (!can_stop_full_tick()) | |
191 | tick_nohz_restart_sched_tick(ts, ktime_get()); | |
192 | } | |
193 | } | |
194 | } | |
195 | ||
196 | static void nohz_full_kick_work_func(struct irq_work *work) | |
197 | { | |
198 | tick_nohz_full_check(); | |
199 | } | |
200 | ||
201 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | |
202 | .func = nohz_full_kick_work_func, | |
203 | }; | |
204 | ||
205 | /* | |
206 | * Kick the current CPU if it's full dynticks in order to force it to | |
207 | * re-evaluate its dependency on the tick and restart it if necessary. | |
208 | */ | |
209 | void tick_nohz_full_kick(void) | |
210 | { | |
211 | if (tick_nohz_full_cpu(smp_processor_id())) | |
212 | irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); | |
213 | } | |
214 | ||
215 | static void nohz_full_kick_ipi(void *info) | |
216 | { | |
217 | tick_nohz_full_check(); | |
218 | } | |
219 | ||
220 | /* | |
221 | * Kick all full dynticks CPUs in order to force these to re-evaluate | |
222 | * their dependency on the tick and restart it if necessary. | |
223 | */ | |
224 | void tick_nohz_full_kick_all(void) | |
225 | { | |
226 | if (!have_nohz_full_mask) | |
227 | return; | |
228 | ||
229 | preempt_disable(); | |
230 | smp_call_function_many(nohz_full_mask, | |
231 | nohz_full_kick_ipi, NULL, false); | |
232 | preempt_enable(); | |
233 | } | |
234 | ||
235 | int tick_nohz_full_cpu(int cpu) | |
236 | { | |
237 | if (!have_nohz_full_mask) | |
238 | return 0; | |
239 | ||
240 | return cpumask_test_cpu(cpu, nohz_full_mask); | |
241 | } | |
242 | ||
243 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ | |
244 | static int __init tick_nohz_full_setup(char *str) | |
245 | { | |
246 | int cpu; | |
247 | ||
248 | alloc_bootmem_cpumask_var(&nohz_full_mask); | |
249 | if (cpulist_parse(str, nohz_full_mask) < 0) { | |
250 | pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); | |
251 | return 1; | |
252 | } | |
253 | ||
254 | cpu = smp_processor_id(); | |
255 | if (cpumask_test_cpu(cpu, nohz_full_mask)) { | |
256 | pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); | |
257 | cpumask_clear_cpu(cpu, nohz_full_mask); | |
258 | } | |
259 | have_nohz_full_mask = true; | |
260 | ||
261 | return 1; | |
262 | } | |
263 | __setup("nohz_full=", tick_nohz_full_setup); | |
264 | ||
265 | static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, | |
266 | unsigned long action, | |
267 | void *hcpu) | |
268 | { | |
269 | unsigned int cpu = (unsigned long)hcpu; | |
270 | ||
271 | switch (action & ~CPU_TASKS_FROZEN) { | |
272 | case CPU_DOWN_PREPARE: | |
273 | /* | |
274 | * If we handle the timekeeping duty for full dynticks CPUs, | |
275 | * we can't safely shutdown that CPU. | |
276 | */ | |
277 | if (have_nohz_full_mask && tick_do_timer_cpu == cpu) | |
278 | return -EINVAL; | |
279 | break; | |
280 | } | |
281 | return NOTIFY_OK; | |
282 | } | |
283 | ||
284 | /* | |
285 | * Worst case string length in chunks of CPU range seems 2 steps | |
286 | * separations: 0,2,4,6,... | |
287 | * This is NR_CPUS + sizeof('\0') | |
288 | */ | |
289 | static char __initdata nohz_full_buf[NR_CPUS + 1]; | |
290 | ||
291 | static int tick_nohz_init_all(void) | |
292 | { | |
293 | int err = -1; | |
294 | ||
295 | #ifdef CONFIG_NO_HZ_FULL_ALL | |
296 | if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) { | |
297 | pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); | |
298 | return err; | |
299 | } | |
300 | err = 0; | |
301 | cpumask_setall(nohz_full_mask); | |
302 | cpumask_clear_cpu(smp_processor_id(), nohz_full_mask); | |
303 | have_nohz_full_mask = true; | |
304 | #endif | |
305 | return err; | |
306 | } | |
307 | ||
308 | void __init tick_nohz_init(void) | |
309 | { | |
310 | int cpu; | |
311 | ||
312 | if (!have_nohz_full_mask) { | |
313 | if (tick_nohz_init_all() < 0) | |
314 | return; | |
315 | } | |
316 | ||
317 | cpu_notifier(tick_nohz_cpu_down_callback, 0); | |
318 | ||
319 | /* Make sure full dynticks CPU are also RCU nocbs */ | |
320 | for_each_cpu(cpu, nohz_full_mask) { | |
321 | if (!rcu_is_nocb_cpu(cpu)) { | |
322 | pr_warning("NO_HZ: CPU %d is not RCU nocb: " | |
323 | "cleared from nohz_full range", cpu); | |
324 | cpumask_clear_cpu(cpu, nohz_full_mask); | |
325 | } | |
326 | } | |
327 | ||
328 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); | |
329 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); | |
330 | } | |
331 | #else | |
332 | #define have_nohz_full_mask (0) | |
333 | #endif | |
334 | ||
335 | /* | |
336 | * NOHZ - aka dynamic tick functionality | |
337 | */ | |
338 | #ifdef CONFIG_NO_HZ_COMMON | |
339 | /* | |
340 | * NO HZ enabled ? | |
341 | */ | |
342 | int tick_nohz_enabled __read_mostly = 1; | |
343 | ||
344 | /* | |
345 | * Enable / Disable tickless mode | |
346 | */ | |
347 | static int __init setup_tick_nohz(char *str) | |
348 | { | |
349 | if (!strcmp(str, "off")) | |
350 | tick_nohz_enabled = 0; | |
351 | else if (!strcmp(str, "on")) | |
352 | tick_nohz_enabled = 1; | |
353 | else | |
354 | return 0; | |
355 | return 1; | |
356 | } | |
357 | ||
358 | __setup("nohz=", setup_tick_nohz); | |
359 | ||
360 | /** | |
361 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
362 | * | |
363 | * Called from interrupt entry when the CPU was idle | |
364 | * | |
365 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
366 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
367 | * value. We do this unconditionally on any cpu, as we don't know whether the | |
368 | * cpu, which has the update task assigned is in a long sleep. | |
369 | */ | |
370 | static void tick_nohz_update_jiffies(ktime_t now) | |
371 | { | |
372 | int cpu = smp_processor_id(); | |
373 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
374 | unsigned long flags; | |
375 | ||
376 | ts->idle_waketime = now; | |
377 | ||
378 | local_irq_save(flags); | |
379 | tick_do_update_jiffies64(now); | |
380 | local_irq_restore(flags); | |
381 | ||
382 | touch_softlockup_watchdog(); | |
383 | } | |
384 | ||
385 | /* | |
386 | * Updates the per cpu time idle statistics counters | |
387 | */ | |
388 | static void | |
389 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) | |
390 | { | |
391 | ktime_t delta; | |
392 | ||
393 | if (ts->idle_active) { | |
394 | delta = ktime_sub(now, ts->idle_entrytime); | |
395 | if (nr_iowait_cpu(cpu) > 0) | |
396 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); | |
397 | else | |
398 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
399 | ts->idle_entrytime = now; | |
400 | } | |
401 | ||
402 | if (last_update_time) | |
403 | *last_update_time = ktime_to_us(now); | |
404 | ||
405 | } | |
406 | ||
407 | static void tick_nohz_stop_idle(int cpu, ktime_t now) | |
408 | { | |
409 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
410 | ||
411 | update_ts_time_stats(cpu, ts, now, NULL); | |
412 | ts->idle_active = 0; | |
413 | ||
414 | sched_clock_idle_wakeup_event(0); | |
415 | } | |
416 | ||
417 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) | |
418 | { | |
419 | ktime_t now = ktime_get(); | |
420 | ||
421 | ts->idle_entrytime = now; | |
422 | ts->idle_active = 1; | |
423 | sched_clock_idle_sleep_event(); | |
424 | return now; | |
425 | } | |
426 | ||
427 | /** | |
428 | * get_cpu_idle_time_us - get the total idle time of a cpu | |
429 | * @cpu: CPU number to query | |
430 | * @last_update_time: variable to store update time in. Do not update | |
431 | * counters if NULL. | |
432 | * | |
433 | * Return the cummulative idle time (since boot) for a given | |
434 | * CPU, in microseconds. | |
435 | * | |
436 | * This time is measured via accounting rather than sampling, | |
437 | * and is as accurate as ktime_get() is. | |
438 | * | |
439 | * This function returns -1 if NOHZ is not enabled. | |
440 | */ | |
441 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |
442 | { | |
443 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
444 | ktime_t now, idle; | |
445 | ||
446 | if (!tick_nohz_enabled) | |
447 | return -1; | |
448 | ||
449 | now = ktime_get(); | |
450 | if (last_update_time) { | |
451 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
452 | idle = ts->idle_sleeptime; | |
453 | } else { | |
454 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { | |
455 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
456 | ||
457 | idle = ktime_add(ts->idle_sleeptime, delta); | |
458 | } else { | |
459 | idle = ts->idle_sleeptime; | |
460 | } | |
461 | } | |
462 | ||
463 | return ktime_to_us(idle); | |
464 | ||
465 | } | |
466 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | |
467 | ||
468 | /** | |
469 | * get_cpu_iowait_time_us - get the total iowait time of a cpu | |
470 | * @cpu: CPU number to query | |
471 | * @last_update_time: variable to store update time in. Do not update | |
472 | * counters if NULL. | |
473 | * | |
474 | * Return the cummulative iowait time (since boot) for a given | |
475 | * CPU, in microseconds. | |
476 | * | |
477 | * This time is measured via accounting rather than sampling, | |
478 | * and is as accurate as ktime_get() is. | |
479 | * | |
480 | * This function returns -1 if NOHZ is not enabled. | |
481 | */ | |
482 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |
483 | { | |
484 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
485 | ktime_t now, iowait; | |
486 | ||
487 | if (!tick_nohz_enabled) | |
488 | return -1; | |
489 | ||
490 | now = ktime_get(); | |
491 | if (last_update_time) { | |
492 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
493 | iowait = ts->iowait_sleeptime; | |
494 | } else { | |
495 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { | |
496 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
497 | ||
498 | iowait = ktime_add(ts->iowait_sleeptime, delta); | |
499 | } else { | |
500 | iowait = ts->iowait_sleeptime; | |
501 | } | |
502 | } | |
503 | ||
504 | return ktime_to_us(iowait); | |
505 | } | |
506 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |
507 | ||
508 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |
509 | ktime_t now, int cpu) | |
510 | { | |
511 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; | |
512 | ktime_t last_update, expires, ret = { .tv64 = 0 }; | |
513 | unsigned long rcu_delta_jiffies; | |
514 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | |
515 | u64 time_delta; | |
516 | ||
517 | /* Read jiffies and the time when jiffies were updated last */ | |
518 | do { | |
519 | seq = read_seqbegin(&jiffies_lock); | |
520 | last_update = last_jiffies_update; | |
521 | last_jiffies = jiffies; | |
522 | time_delta = timekeeping_max_deferment(); | |
523 | } while (read_seqretry(&jiffies_lock, seq)); | |
524 | ||
525 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || | |
526 | arch_needs_cpu(cpu) || irq_work_needs_cpu()) { | |
527 | next_jiffies = last_jiffies + 1; | |
528 | delta_jiffies = 1; | |
529 | } else { | |
530 | /* Get the next timer wheel timer */ | |
531 | next_jiffies = get_next_timer_interrupt(last_jiffies); | |
532 | delta_jiffies = next_jiffies - last_jiffies; | |
533 | if (rcu_delta_jiffies < delta_jiffies) { | |
534 | next_jiffies = last_jiffies + rcu_delta_jiffies; | |
535 | delta_jiffies = rcu_delta_jiffies; | |
536 | } | |
537 | } | |
538 | /* | |
539 | * Do not stop the tick, if we are only one off | |
540 | * or if the cpu is required for rcu | |
541 | */ | |
542 | if (!ts->tick_stopped && delta_jiffies == 1) | |
543 | goto out; | |
544 | ||
545 | /* Schedule the tick, if we are at least one jiffie off */ | |
546 | if ((long)delta_jiffies >= 1) { | |
547 | ||
548 | /* | |
549 | * If this cpu is the one which updates jiffies, then | |
550 | * give up the assignment and let it be taken by the | |
551 | * cpu which runs the tick timer next, which might be | |
552 | * this cpu as well. If we don't drop this here the | |
553 | * jiffies might be stale and do_timer() never | |
554 | * invoked. Keep track of the fact that it was the one | |
555 | * which had the do_timer() duty last. If this cpu is | |
556 | * the one which had the do_timer() duty last, we | |
557 | * limit the sleep time to the timekeeping | |
558 | * max_deferement value which we retrieved | |
559 | * above. Otherwise we can sleep as long as we want. | |
560 | */ | |
561 | if (cpu == tick_do_timer_cpu) { | |
562 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
563 | ts->do_timer_last = 1; | |
564 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
565 | time_delta = KTIME_MAX; | |
566 | ts->do_timer_last = 0; | |
567 | } else if (!ts->do_timer_last) { | |
568 | time_delta = KTIME_MAX; | |
569 | } | |
570 | ||
571 | /* | |
572 | * calculate the expiry time for the next timer wheel | |
573 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals | |
574 | * that there is no timer pending or at least extremely | |
575 | * far into the future (12 days for HZ=1000). In this | |
576 | * case we set the expiry to the end of time. | |
577 | */ | |
578 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { | |
579 | /* | |
580 | * Calculate the time delta for the next timer event. | |
581 | * If the time delta exceeds the maximum time delta | |
582 | * permitted by the current clocksource then adjust | |
583 | * the time delta accordingly to ensure the | |
584 | * clocksource does not wrap. | |
585 | */ | |
586 | time_delta = min_t(u64, time_delta, | |
587 | tick_period.tv64 * delta_jiffies); | |
588 | } | |
589 | ||
590 | if (time_delta < KTIME_MAX) | |
591 | expires = ktime_add_ns(last_update, time_delta); | |
592 | else | |
593 | expires.tv64 = KTIME_MAX; | |
594 | ||
595 | /* Skip reprogram of event if its not changed */ | |
596 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) | |
597 | goto out; | |
598 | ||
599 | ret = expires; | |
600 | ||
601 | /* | |
602 | * nohz_stop_sched_tick can be called several times before | |
603 | * the nohz_restart_sched_tick is called. This happens when | |
604 | * interrupts arrive which do not cause a reschedule. In the | |
605 | * first call we save the current tick time, so we can restart | |
606 | * the scheduler tick in nohz_restart_sched_tick. | |
607 | */ | |
608 | if (!ts->tick_stopped) { | |
609 | nohz_balance_enter_idle(cpu); | |
610 | calc_load_enter_idle(); | |
611 | ||
612 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); | |
613 | ts->tick_stopped = 1; | |
614 | } | |
615 | ||
616 | /* | |
617 | * If the expiration time == KTIME_MAX, then | |
618 | * in this case we simply stop the tick timer. | |
619 | */ | |
620 | if (unlikely(expires.tv64 == KTIME_MAX)) { | |
621 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) | |
622 | hrtimer_cancel(&ts->sched_timer); | |
623 | goto out; | |
624 | } | |
625 | ||
626 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
627 | hrtimer_start(&ts->sched_timer, expires, | |
628 | HRTIMER_MODE_ABS_PINNED); | |
629 | /* Check, if the timer was already in the past */ | |
630 | if (hrtimer_active(&ts->sched_timer)) | |
631 | goto out; | |
632 | } else if (!tick_program_event(expires, 0)) | |
633 | goto out; | |
634 | /* | |
635 | * We are past the event already. So we crossed a | |
636 | * jiffie boundary. Update jiffies and raise the | |
637 | * softirq. | |
638 | */ | |
639 | tick_do_update_jiffies64(ktime_get()); | |
640 | } | |
641 | raise_softirq_irqoff(TIMER_SOFTIRQ); | |
642 | out: | |
643 | ts->next_jiffies = next_jiffies; | |
644 | ts->last_jiffies = last_jiffies; | |
645 | ts->sleep_length = ktime_sub(dev->next_event, now); | |
646 | ||
647 | return ret; | |
648 | } | |
649 | ||
650 | static void tick_nohz_full_stop_tick(struct tick_sched *ts) | |
651 | { | |
652 | #ifdef CONFIG_NO_HZ_FULL | |
653 | int cpu = smp_processor_id(); | |
654 | ||
655 | if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) | |
656 | return; | |
657 | ||
658 | if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) | |
659 | return; | |
660 | ||
661 | if (!can_stop_full_tick()) | |
662 | return; | |
663 | ||
664 | tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); | |
665 | #endif | |
666 | } | |
667 | ||
668 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |
669 | { | |
670 | /* | |
671 | * If this cpu is offline and it is the one which updates | |
672 | * jiffies, then give up the assignment and let it be taken by | |
673 | * the cpu which runs the tick timer next. If we don't drop | |
674 | * this here the jiffies might be stale and do_timer() never | |
675 | * invoked. | |
676 | */ | |
677 | if (unlikely(!cpu_online(cpu))) { | |
678 | if (cpu == tick_do_timer_cpu) | |
679 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
680 | } | |
681 | ||
682 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
683 | return false; | |
684 | ||
685 | if (need_resched()) | |
686 | return false; | |
687 | ||
688 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { | |
689 | static int ratelimit; | |
690 | ||
691 | if (ratelimit < 10 && | |
692 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { | |
693 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", | |
694 | (unsigned int) local_softirq_pending()); | |
695 | ratelimit++; | |
696 | } | |
697 | return false; | |
698 | } | |
699 | ||
700 | if (have_nohz_full_mask) { | |
701 | /* | |
702 | * Keep the tick alive to guarantee timekeeping progression | |
703 | * if there are full dynticks CPUs around | |
704 | */ | |
705 | if (tick_do_timer_cpu == cpu) | |
706 | return false; | |
707 | /* | |
708 | * Boot safety: make sure the timekeeping duty has been | |
709 | * assigned before entering dyntick-idle mode, | |
710 | */ | |
711 | if (tick_do_timer_cpu == TICK_DO_TIMER_NONE) | |
712 | return false; | |
713 | } | |
714 | ||
715 | return true; | |
716 | } | |
717 | ||
718 | static void __tick_nohz_idle_enter(struct tick_sched *ts) | |
719 | { | |
720 | ktime_t now, expires; | |
721 | int cpu = smp_processor_id(); | |
722 | ||
723 | now = tick_nohz_start_idle(cpu, ts); | |
724 | ||
725 | if (can_stop_idle_tick(cpu, ts)) { | |
726 | int was_stopped = ts->tick_stopped; | |
727 | ||
728 | ts->idle_calls++; | |
729 | ||
730 | expires = tick_nohz_stop_sched_tick(ts, now, cpu); | |
731 | if (expires.tv64 > 0LL) { | |
732 | ts->idle_sleeps++; | |
733 | ts->idle_expires = expires; | |
734 | } | |
735 | ||
736 | if (!was_stopped && ts->tick_stopped) | |
737 | ts->idle_jiffies = ts->last_jiffies; | |
738 | } | |
739 | } | |
740 | ||
741 | /** | |
742 | * tick_nohz_idle_enter - stop the idle tick from the idle task | |
743 | * | |
744 | * When the next event is more than a tick into the future, stop the idle tick | |
745 | * Called when we start the idle loop. | |
746 | * | |
747 | * The arch is responsible of calling: | |
748 | * | |
749 | * - rcu_idle_enter() after its last use of RCU before the CPU is put | |
750 | * to sleep. | |
751 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. | |
752 | */ | |
753 | void tick_nohz_idle_enter(void) | |
754 | { | |
755 | struct tick_sched *ts; | |
756 | ||
757 | WARN_ON_ONCE(irqs_disabled()); | |
758 | ||
759 | /* | |
760 | * Update the idle state in the scheduler domain hierarchy | |
761 | * when tick_nohz_stop_sched_tick() is called from the idle loop. | |
762 | * State will be updated to busy during the first busy tick after | |
763 | * exiting idle. | |
764 | */ | |
765 | set_cpu_sd_state_idle(); | |
766 | ||
767 | local_irq_disable(); | |
768 | ||
769 | ts = &__get_cpu_var(tick_cpu_sched); | |
770 | /* | |
771 | * set ts->inidle unconditionally. even if the system did not | |
772 | * switch to nohz mode the cpu frequency governers rely on the | |
773 | * update of the idle time accounting in tick_nohz_start_idle(). | |
774 | */ | |
775 | ts->inidle = 1; | |
776 | __tick_nohz_idle_enter(ts); | |
777 | ||
778 | local_irq_enable(); | |
779 | } | |
780 | EXPORT_SYMBOL_GPL(tick_nohz_idle_enter); | |
781 | ||
782 | /** | |
783 | * tick_nohz_irq_exit - update next tick event from interrupt exit | |
784 | * | |
785 | * When an interrupt fires while we are idle and it doesn't cause | |
786 | * a reschedule, it may still add, modify or delete a timer, enqueue | |
787 | * an RCU callback, etc... | |
788 | * So we need to re-calculate and reprogram the next tick event. | |
789 | */ | |
790 | void tick_nohz_irq_exit(void) | |
791 | { | |
792 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
793 | ||
794 | if (ts->inidle) { | |
795 | /* Cancel the timer because CPU already waken up from the C-states*/ | |
796 | menu_hrtimer_cancel(); | |
797 | __tick_nohz_idle_enter(ts); | |
798 | } else { | |
799 | tick_nohz_full_stop_tick(ts); | |
800 | } | |
801 | } | |
802 | ||
803 | /** | |
804 | * tick_nohz_get_sleep_length - return the length of the current sleep | |
805 | * | |
806 | * Called from power state control code with interrupts disabled | |
807 | */ | |
808 | ktime_t tick_nohz_get_sleep_length(void) | |
809 | { | |
810 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
811 | ||
812 | return ts->sleep_length; | |
813 | } | |
814 | ||
815 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | |
816 | { | |
817 | hrtimer_cancel(&ts->sched_timer); | |
818 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); | |
819 | ||
820 | while (1) { | |
821 | /* Forward the time to expire in the future */ | |
822 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
823 | ||
824 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
825 | hrtimer_start_expires(&ts->sched_timer, | |
826 | HRTIMER_MODE_ABS_PINNED); | |
827 | /* Check, if the timer was already in the past */ | |
828 | if (hrtimer_active(&ts->sched_timer)) | |
829 | break; | |
830 | } else { | |
831 | if (!tick_program_event( | |
832 | hrtimer_get_expires(&ts->sched_timer), 0)) | |
833 | break; | |
834 | } | |
835 | /* Reread time and update jiffies */ | |
836 | now = ktime_get(); | |
837 | tick_do_update_jiffies64(now); | |
838 | } | |
839 | } | |
840 | ||
841 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) | |
842 | { | |
843 | /* Update jiffies first */ | |
844 | tick_do_update_jiffies64(now); | |
845 | update_cpu_load_nohz(); | |
846 | ||
847 | calc_load_exit_idle(); | |
848 | touch_softlockup_watchdog(); | |
849 | /* | |
850 | * Cancel the scheduled timer and restore the tick | |
851 | */ | |
852 | ts->tick_stopped = 0; | |
853 | ts->idle_exittime = now; | |
854 | ||
855 | tick_nohz_restart(ts, now); | |
856 | } | |
857 | ||
858 | static void tick_nohz_account_idle_ticks(struct tick_sched *ts) | |
859 | { | |
860 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
861 | unsigned long ticks; | |
862 | ||
863 | if (vtime_accounting_enabled()) | |
864 | return; | |
865 | /* | |
866 | * We stopped the tick in idle. Update process times would miss the | |
867 | * time we slept as update_process_times does only a 1 tick | |
868 | * accounting. Enforce that this is accounted to idle ! | |
869 | */ | |
870 | ticks = jiffies - ts->idle_jiffies; | |
871 | /* | |
872 | * We might be one off. Do not randomly account a huge number of ticks! | |
873 | */ | |
874 | if (ticks && ticks < LONG_MAX) | |
875 | account_idle_ticks(ticks); | |
876 | #endif | |
877 | } | |
878 | ||
879 | /** | |
880 | * tick_nohz_idle_exit - restart the idle tick from the idle task | |
881 | * | |
882 | * Restart the idle tick when the CPU is woken up from idle | |
883 | * This also exit the RCU extended quiescent state. The CPU | |
884 | * can use RCU again after this function is called. | |
885 | */ | |
886 | void tick_nohz_idle_exit(void) | |
887 | { | |
888 | int cpu = smp_processor_id(); | |
889 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
890 | ktime_t now; | |
891 | ||
892 | local_irq_disable(); | |
893 | ||
894 | WARN_ON_ONCE(!ts->inidle); | |
895 | ||
896 | ts->inidle = 0; | |
897 | ||
898 | /* Cancel the timer because CPU already waken up from the C-states*/ | |
899 | menu_hrtimer_cancel(); | |
900 | if (ts->idle_active || ts->tick_stopped) | |
901 | now = ktime_get(); | |
902 | ||
903 | if (ts->idle_active) | |
904 | tick_nohz_stop_idle(cpu, now); | |
905 | ||
906 | if (ts->tick_stopped) { | |
907 | tick_nohz_restart_sched_tick(ts, now); | |
908 | tick_nohz_account_idle_ticks(ts); | |
909 | } | |
910 | ||
911 | local_irq_enable(); | |
912 | } | |
913 | EXPORT_SYMBOL_GPL(tick_nohz_idle_exit); | |
914 | ||
915 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | |
916 | { | |
917 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
918 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); | |
919 | } | |
920 | ||
921 | /* | |
922 | * The nohz low res interrupt handler | |
923 | */ | |
924 | static void tick_nohz_handler(struct clock_event_device *dev) | |
925 | { | |
926 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
927 | struct pt_regs *regs = get_irq_regs(); | |
928 | ktime_t now = ktime_get(); | |
929 | ||
930 | dev->next_event.tv64 = KTIME_MAX; | |
931 | ||
932 | tick_sched_do_timer(now); | |
933 | tick_sched_handle(ts, regs); | |
934 | ||
935 | while (tick_nohz_reprogram(ts, now)) { | |
936 | now = ktime_get(); | |
937 | tick_do_update_jiffies64(now); | |
938 | } | |
939 | } | |
940 | ||
941 | /** | |
942 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
943 | */ | |
944 | static void tick_nohz_switch_to_nohz(void) | |
945 | { | |
946 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
947 | ktime_t next; | |
948 | ||
949 | if (!tick_nohz_enabled) | |
950 | return; | |
951 | ||
952 | local_irq_disable(); | |
953 | if (tick_switch_to_oneshot(tick_nohz_handler)) { | |
954 | local_irq_enable(); | |
955 | return; | |
956 | } | |
957 | ||
958 | ts->nohz_mode = NOHZ_MODE_LOWRES; | |
959 | ||
960 | /* | |
961 | * Recycle the hrtimer in ts, so we can share the | |
962 | * hrtimer_forward with the highres code. | |
963 | */ | |
964 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
965 | /* Get the next period */ | |
966 | next = tick_init_jiffy_update(); | |
967 | ||
968 | for (;;) { | |
969 | hrtimer_set_expires(&ts->sched_timer, next); | |
970 | if (!tick_program_event(next, 0)) | |
971 | break; | |
972 | next = ktime_add(next, tick_period); | |
973 | } | |
974 | local_irq_enable(); | |
975 | } | |
976 | ||
977 | /* | |
978 | * When NOHZ is enabled and the tick is stopped, we need to kick the | |
979 | * tick timer from irq_enter() so that the jiffies update is kept | |
980 | * alive during long running softirqs. That's ugly as hell, but | |
981 | * correctness is key even if we need to fix the offending softirq in | |
982 | * the first place. | |
983 | * | |
984 | * Note, this is different to tick_nohz_restart. We just kick the | |
985 | * timer and do not touch the other magic bits which need to be done | |
986 | * when idle is left. | |
987 | */ | |
988 | static void tick_nohz_kick_tick(int cpu, ktime_t now) | |
989 | { | |
990 | #if 0 | |
991 | /* Switch back to 2.6.27 behaviour */ | |
992 | ||
993 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
994 | ktime_t delta; | |
995 | ||
996 | /* | |
997 | * Do not touch the tick device, when the next expiry is either | |
998 | * already reached or less/equal than the tick period. | |
999 | */ | |
1000 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); | |
1001 | if (delta.tv64 <= tick_period.tv64) | |
1002 | return; | |
1003 | ||
1004 | tick_nohz_restart(ts, now); | |
1005 | #endif | |
1006 | } | |
1007 | ||
1008 | static inline void tick_check_nohz(int cpu) | |
1009 | { | |
1010 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
1011 | ktime_t now; | |
1012 | ||
1013 | if (!ts->idle_active && !ts->tick_stopped) | |
1014 | return; | |
1015 | now = ktime_get(); | |
1016 | if (ts->idle_active) | |
1017 | tick_nohz_stop_idle(cpu, now); | |
1018 | if (ts->tick_stopped) { | |
1019 | tick_nohz_update_jiffies(now); | |
1020 | tick_nohz_kick_tick(cpu, now); | |
1021 | } | |
1022 | } | |
1023 | ||
1024 | #else | |
1025 | ||
1026 | static inline void tick_nohz_switch_to_nohz(void) { } | |
1027 | static inline void tick_check_nohz(int cpu) { } | |
1028 | ||
1029 | #endif /* CONFIG_NO_HZ_COMMON */ | |
1030 | ||
1031 | /* | |
1032 | * Called from irq_enter to notify about the possible interruption of idle() | |
1033 | */ | |
1034 | void tick_check_idle(int cpu) | |
1035 | { | |
1036 | tick_check_oneshot_broadcast(cpu); | |
1037 | tick_check_nohz(cpu); | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * High resolution timer specific code | |
1042 | */ | |
1043 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1044 | /* | |
1045 | * We rearm the timer until we get disabled by the idle code. | |
1046 | * Called with interrupts disabled. | |
1047 | */ | |
1048 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
1049 | { | |
1050 | struct tick_sched *ts = | |
1051 | container_of(timer, struct tick_sched, sched_timer); | |
1052 | struct pt_regs *regs = get_irq_regs(); | |
1053 | ktime_t now = ktime_get(); | |
1054 | ||
1055 | tick_sched_do_timer(now); | |
1056 | ||
1057 | /* | |
1058 | * Do not call, when we are not in irq context and have | |
1059 | * no valid regs pointer | |
1060 | */ | |
1061 | if (regs) | |
1062 | tick_sched_handle(ts, regs); | |
1063 | ||
1064 | hrtimer_forward(timer, now, tick_period); | |
1065 | ||
1066 | return HRTIMER_RESTART; | |
1067 | } | |
1068 | ||
1069 | static int sched_skew_tick; | |
1070 | ||
1071 | static int __init skew_tick(char *str) | |
1072 | { | |
1073 | get_option(&str, &sched_skew_tick); | |
1074 | ||
1075 | return 0; | |
1076 | } | |
1077 | early_param("skew_tick", skew_tick); | |
1078 | ||
1079 | /** | |
1080 | * tick_setup_sched_timer - setup the tick emulation timer | |
1081 | */ | |
1082 | void tick_setup_sched_timer(void) | |
1083 | { | |
1084 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1085 | ktime_t now = ktime_get(); | |
1086 | ||
1087 | /* | |
1088 | * Emulate tick processing via per-CPU hrtimers: | |
1089 | */ | |
1090 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1091 | ts->sched_timer.function = tick_sched_timer; | |
1092 | ||
1093 | /* Get the next period (per cpu) */ | |
1094 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | |
1095 | ||
1096 | /* Offset the tick to avert jiffies_lock contention. */ | |
1097 | if (sched_skew_tick) { | |
1098 | u64 offset = ktime_to_ns(tick_period) >> 1; | |
1099 | do_div(offset, num_possible_cpus()); | |
1100 | offset *= smp_processor_id(); | |
1101 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | |
1102 | } | |
1103 | ||
1104 | for (;;) { | |
1105 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
1106 | hrtimer_start_expires(&ts->sched_timer, | |
1107 | HRTIMER_MODE_ABS_PINNED); | |
1108 | /* Check, if the timer was already in the past */ | |
1109 | if (hrtimer_active(&ts->sched_timer)) | |
1110 | break; | |
1111 | now = ktime_get(); | |
1112 | } | |
1113 | ||
1114 | #ifdef CONFIG_NO_HZ_COMMON | |
1115 | if (tick_nohz_enabled) | |
1116 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | |
1117 | #endif | |
1118 | } | |
1119 | #endif /* HIGH_RES_TIMERS */ | |
1120 | ||
1121 | #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS | |
1122 | void tick_cancel_sched_timer(int cpu) | |
1123 | { | |
1124 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
1125 | ||
1126 | # ifdef CONFIG_HIGH_RES_TIMERS | |
1127 | if (ts->sched_timer.base) | |
1128 | hrtimer_cancel(&ts->sched_timer); | |
1129 | # endif | |
1130 | ||
1131 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | |
1132 | } | |
1133 | #endif | |
1134 | ||
1135 | /** | |
1136 | * Async notification about clocksource changes | |
1137 | */ | |
1138 | void tick_clock_notify(void) | |
1139 | { | |
1140 | int cpu; | |
1141 | ||
1142 | for_each_possible_cpu(cpu) | |
1143 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * Async notification about clock event changes | |
1148 | */ | |
1149 | void tick_oneshot_notify(void) | |
1150 | { | |
1151 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1152 | ||
1153 | set_bit(0, &ts->check_clocks); | |
1154 | } | |
1155 | ||
1156 | /** | |
1157 | * Check, if a change happened, which makes oneshot possible. | |
1158 | * | |
1159 | * Called cyclic from the hrtimer softirq (driven by the timer | |
1160 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
1161 | * mode, because high resolution timers are disabled (either compile | |
1162 | * or runtime). | |
1163 | */ | |
1164 | int tick_check_oneshot_change(int allow_nohz) | |
1165 | { | |
1166 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1167 | ||
1168 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
1169 | return 0; | |
1170 | ||
1171 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
1172 | return 0; | |
1173 | ||
1174 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) | |
1175 | return 0; | |
1176 | ||
1177 | if (!allow_nohz) | |
1178 | return 1; | |
1179 | ||
1180 | tick_nohz_switch_to_nohz(); | |
1181 | return 0; | |
1182 | } |