]>
Commit | Line | Data |
---|---|---|
79bf2bb3 TG |
1 | /* |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
b10db7f0 | 12 | * Distribute under GPLv2. |
79bf2bb3 TG |
13 | */ |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
38b8d208 | 20 | #include <linux/nmi.h> |
79bf2bb3 | 21 | #include <linux/profile.h> |
3f07c014 | 22 | #include <linux/sched/signal.h> |
e6017571 | 23 | #include <linux/sched/clock.h> |
03441a34 | 24 | #include <linux/sched/stat.h> |
370c9135 | 25 | #include <linux/sched/nohz.h> |
8083e4ad | 26 | #include <linux/module.h> |
00b42959 | 27 | #include <linux/irq_work.h> |
9014c45d | 28 | #include <linux/posix-timers.h> |
2e709338 | 29 | #include <linux/context_tracking.h> |
62cb1188 | 30 | #include <linux/mm.h> |
79bf2bb3 | 31 | |
9e203bcc DM |
32 | #include <asm/irq_regs.h> |
33 | ||
79bf2bb3 TG |
34 | #include "tick-internal.h" |
35 | ||
cb41a290 FW |
36 | #include <trace/events/timer.h> |
37 | ||
79bf2bb3 | 38 | /* |
0de7611a | 39 | * Per-CPU nohz control structure |
79bf2bb3 | 40 | */ |
c1797baf | 41 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
79bf2bb3 | 42 | |
289f480a IM |
43 | struct tick_sched *tick_get_tick_sched(int cpu) |
44 | { | |
45 | return &per_cpu(tick_cpu_sched, cpu); | |
46 | } | |
47 | ||
7809998a AB |
48 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) |
49 | /* | |
50 | * The time, when the last jiffy update happened. Protected by jiffies_lock. | |
51 | */ | |
52 | static ktime_t last_jiffies_update; | |
53 | ||
d6ed449a TG |
54 | /* |
55 | * Called after resume. Make sure that jiffies are not fast forwarded due to | |
56 | * clock monotonic being forwarded by the suspended time. | |
57 | */ | |
58 | void tick_sched_forward_next_period(void) | |
59 | { | |
60 | last_jiffies_update = tick_next_period; | |
61 | } | |
62 | ||
79bf2bb3 TG |
63 | /* |
64 | * Must be called with interrupts disabled ! | |
65 | */ | |
66 | static void tick_do_update_jiffies64(ktime_t now) | |
67 | { | |
68 | unsigned long ticks = 0; | |
69 | ktime_t delta; | |
70 | ||
7a14ce1d | 71 | /* |
d6ad4187 | 72 | * Do a quick check without holding jiffies_lock: |
7a14ce1d IM |
73 | */ |
74 | delta = ktime_sub(now, last_jiffies_update); | |
2456e855 | 75 | if (delta < tick_period) |
7a14ce1d IM |
76 | return; |
77 | ||
6168f8ed | 78 | /* Reevaluate with jiffies_lock held */ |
d6ad4187 | 79 | write_seqlock(&jiffies_lock); |
79bf2bb3 TG |
80 | |
81 | delta = ktime_sub(now, last_jiffies_update); | |
2456e855 | 82 | if (delta >= tick_period) { |
79bf2bb3 TG |
83 | |
84 | delta = ktime_sub(delta, tick_period); | |
85 | last_jiffies_update = ktime_add(last_jiffies_update, | |
86 | tick_period); | |
87 | ||
88 | /* Slow path for long timeouts */ | |
2456e855 | 89 | if (unlikely(delta >= tick_period)) { |
79bf2bb3 TG |
90 | s64 incr = ktime_to_ns(tick_period); |
91 | ||
92 | ticks = ktime_divns(delta, incr); | |
93 | ||
94 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
95 | incr * ticks); | |
96 | } | |
97 | do_timer(++ticks); | |
49d670fb TG |
98 | |
99 | /* Keep the tick_next_period variable up to date */ | |
100 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | |
03e6bdc5 VK |
101 | } else { |
102 | write_sequnlock(&jiffies_lock); | |
103 | return; | |
79bf2bb3 | 104 | } |
d6ad4187 | 105 | write_sequnlock(&jiffies_lock); |
47a1b796 | 106 | update_wall_time(); |
79bf2bb3 TG |
107 | } |
108 | ||
109 | /* | |
110 | * Initialize and return retrieve the jiffies update. | |
111 | */ | |
112 | static ktime_t tick_init_jiffy_update(void) | |
113 | { | |
114 | ktime_t period; | |
115 | ||
d6ad4187 | 116 | write_seqlock(&jiffies_lock); |
79bf2bb3 | 117 | /* Did we start the jiffies update yet ? */ |
2456e855 | 118 | if (last_jiffies_update == 0) |
79bf2bb3 TG |
119 | last_jiffies_update = tick_next_period; |
120 | period = last_jiffies_update; | |
d6ad4187 | 121 | write_sequnlock(&jiffies_lock); |
79bf2bb3 TG |
122 | return period; |
123 | } | |
124 | ||
ff7de620 | 125 | static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) |
5bb96226 FW |
126 | { |
127 | int cpu = smp_processor_id(); | |
128 | ||
3451d024 | 129 | #ifdef CONFIG_NO_HZ_COMMON |
5bb96226 FW |
130 | /* |
131 | * Check if the do_timer duty was dropped. We don't care about | |
0de7611a IM |
132 | * concurrency: This happens only when the CPU in charge went |
133 | * into a long sleep. If two CPUs happen to assign themselves to | |
5bb96226 | 134 | * this duty, then the jiffies update is still serialized by |
9c3f9e28 | 135 | * jiffies_lock. |
5bb96226 | 136 | */ |
a382bf93 | 137 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE) |
c5bfece2 | 138 | && !tick_nohz_full_cpu(cpu)) |
5bb96226 FW |
139 | tick_do_timer_cpu = cpu; |
140 | #endif | |
141 | ||
142 | /* Check, if the jiffies need an update */ | |
143 | if (tick_do_timer_cpu == cpu) | |
144 | tick_do_update_jiffies64(now); | |
ff7de620 RW |
145 | |
146 | if (ts->inidle) | |
147 | ts->got_idle_tick = 1; | |
5bb96226 FW |
148 | } |
149 | ||
9e8f559b FW |
150 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
151 | { | |
3451d024 | 152 | #ifdef CONFIG_NO_HZ_COMMON |
9e8f559b FW |
153 | /* |
154 | * When we are idle and the tick is stopped, we have to touch | |
155 | * the watchdog as we might not schedule for a really long | |
156 | * time. This happens on complete idle SMP systems while | |
157 | * waiting on the login prompt. We also increment the "start of | |
158 | * idle" jiffy stamp so the idle accounting adjustment we do | |
159 | * when we go busy again does not account too much ticks. | |
160 | */ | |
161 | if (ts->tick_stopped) { | |
03e0d461 | 162 | touch_softlockup_watchdog_sched(); |
9e8f559b FW |
163 | if (is_idle_task(current)) |
164 | ts->idle_jiffies++; | |
411fe24e FW |
165 | /* |
166 | * In case the current tick fired too early past its expected | |
167 | * expiration, make sure we don't bypass the next clock reprogramming | |
168 | * to the same deadline. | |
169 | */ | |
170 | ts->next_tick = 0; | |
9e8f559b | 171 | } |
94a57140 | 172 | #endif |
9e8f559b FW |
173 | update_process_times(user_mode(regs)); |
174 | profile_tick(CPU_PROFILING); | |
175 | } | |
7809998a | 176 | #endif |
9e8f559b | 177 | |
c5bfece2 | 178 | #ifdef CONFIG_NO_HZ_FULL |
460775df | 179 | cpumask_var_t tick_nohz_full_mask; |
73867dcd | 180 | bool tick_nohz_full_running; |
f009a7a7 | 181 | static atomic_t tick_dep_mask; |
a831881b | 182 | |
f009a7a7 | 183 | static bool check_tick_dependency(atomic_t *dep) |
d027d45d | 184 | { |
f009a7a7 FW |
185 | int val = atomic_read(dep); |
186 | ||
187 | if (val & TICK_DEP_MASK_POSIX_TIMER) { | |
e6e6cc22 | 188 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); |
f009a7a7 | 189 | return true; |
d027d45d FW |
190 | } |
191 | ||
f009a7a7 | 192 | if (val & TICK_DEP_MASK_PERF_EVENTS) { |
e6e6cc22 | 193 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); |
f009a7a7 | 194 | return true; |
d027d45d FW |
195 | } |
196 | ||
f009a7a7 | 197 | if (val & TICK_DEP_MASK_SCHED) { |
e6e6cc22 | 198 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); |
f009a7a7 | 199 | return true; |
d027d45d FW |
200 | } |
201 | ||
f009a7a7 | 202 | if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { |
e6e6cc22 | 203 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); |
f009a7a7 FW |
204 | return true; |
205 | } | |
206 | ||
207 | return false; | |
d027d45d FW |
208 | } |
209 | ||
57ccdf44 | 210 | static bool can_stop_full_tick(int cpu, struct tick_sched *ts) |
9014c45d | 211 | { |
ebf3adba | 212 | lockdep_assert_irqs_disabled(); |
9014c45d | 213 | |
57ccdf44 WL |
214 | if (unlikely(!cpu_online(cpu))) |
215 | return false; | |
216 | ||
f009a7a7 | 217 | if (check_tick_dependency(&tick_dep_mask)) |
d027d45d | 218 | return false; |
d027d45d | 219 | |
f009a7a7 | 220 | if (check_tick_dependency(&ts->tick_dep_mask)) |
d027d45d | 221 | return false; |
d027d45d | 222 | |
f009a7a7 | 223 | if (check_tick_dependency(¤t->tick_dep_mask)) |
d027d45d | 224 | return false; |
d027d45d | 225 | |
f009a7a7 | 226 | if (check_tick_dependency(¤t->signal->tick_dep_mask)) |
d027d45d | 227 | return false; |
d027d45d | 228 | |
9014c45d FW |
229 | return true; |
230 | } | |
231 | ||
d027d45d | 232 | static void nohz_full_kick_func(struct irq_work *work) |
76c24fb0 | 233 | { |
73738a95 | 234 | /* Empty, the tick restart happens on tick_nohz_irq_exit() */ |
76c24fb0 FW |
235 | } |
236 | ||
237 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | |
d027d45d | 238 | .func = nohz_full_kick_func, |
76c24fb0 FW |
239 | }; |
240 | ||
40bea039 FW |
241 | /* |
242 | * Kick this CPU if it's full dynticks in order to force it to | |
243 | * re-evaluate its dependency on the tick and restart it if necessary. | |
244 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), | |
245 | * is NMI safe. | |
246 | */ | |
555e0c1e | 247 | static void tick_nohz_full_kick(void) |
40bea039 FW |
248 | { |
249 | if (!tick_nohz_full_cpu(smp_processor_id())) | |
250 | return; | |
251 | ||
56e4dea8 | 252 | irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); |
40bea039 FW |
253 | } |
254 | ||
76c24fb0 | 255 | /* |
3d36aebc | 256 | * Kick the CPU if it's full dynticks in order to force it to |
76c24fb0 FW |
257 | * re-evaluate its dependency on the tick and restart it if necessary. |
258 | */ | |
3d36aebc | 259 | void tick_nohz_full_kick_cpu(int cpu) |
76c24fb0 | 260 | { |
3d36aebc FW |
261 | if (!tick_nohz_full_cpu(cpu)) |
262 | return; | |
263 | ||
264 | irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); | |
76c24fb0 FW |
265 | } |
266 | ||
76c24fb0 FW |
267 | /* |
268 | * Kick all full dynticks CPUs in order to force these to re-evaluate | |
269 | * their dependency on the tick and restart it if necessary. | |
270 | */ | |
b7878300 | 271 | static void tick_nohz_full_kick_all(void) |
76c24fb0 | 272 | { |
8537bb95 FW |
273 | int cpu; |
274 | ||
73867dcd | 275 | if (!tick_nohz_full_running) |
76c24fb0 FW |
276 | return; |
277 | ||
278 | preempt_disable(); | |
8537bb95 FW |
279 | for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) |
280 | tick_nohz_full_kick_cpu(cpu); | |
76c24fb0 FW |
281 | preempt_enable(); |
282 | } | |
283 | ||
f009a7a7 | 284 | static void tick_nohz_dep_set_all(atomic_t *dep, |
d027d45d FW |
285 | enum tick_dep_bits bit) |
286 | { | |
f009a7a7 | 287 | int prev; |
d027d45d | 288 | |
a1cc5bcf | 289 | prev = atomic_fetch_or(BIT(bit), dep); |
d027d45d FW |
290 | if (!prev) |
291 | tick_nohz_full_kick_all(); | |
292 | } | |
293 | ||
294 | /* | |
295 | * Set a global tick dependency. Used by perf events that rely on freq and | |
296 | * by unstable clock. | |
297 | */ | |
298 | void tick_nohz_dep_set(enum tick_dep_bits bit) | |
299 | { | |
300 | tick_nohz_dep_set_all(&tick_dep_mask, bit); | |
301 | } | |
302 | ||
303 | void tick_nohz_dep_clear(enum tick_dep_bits bit) | |
304 | { | |
f009a7a7 | 305 | atomic_andnot(BIT(bit), &tick_dep_mask); |
d027d45d FW |
306 | } |
307 | ||
308 | /* | |
309 | * Set per-CPU tick dependency. Used by scheduler and perf events in order to | |
310 | * manage events throttling. | |
311 | */ | |
312 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) | |
313 | { | |
f009a7a7 | 314 | int prev; |
d027d45d FW |
315 | struct tick_sched *ts; |
316 | ||
317 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
318 | ||
a1cc5bcf | 319 | prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); |
d027d45d FW |
320 | if (!prev) { |
321 | preempt_disable(); | |
322 | /* Perf needs local kick that is NMI safe */ | |
323 | if (cpu == smp_processor_id()) { | |
324 | tick_nohz_full_kick(); | |
325 | } else { | |
326 | /* Remote irq work not NMI-safe */ | |
327 | if (!WARN_ON_ONCE(in_nmi())) | |
328 | tick_nohz_full_kick_cpu(cpu); | |
329 | } | |
330 | preempt_enable(); | |
331 | } | |
332 | } | |
333 | ||
334 | void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) | |
335 | { | |
336 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
337 | ||
f009a7a7 | 338 | atomic_andnot(BIT(bit), &ts->tick_dep_mask); |
d027d45d FW |
339 | } |
340 | ||
341 | /* | |
342 | * Set a per-task tick dependency. Posix CPU timers need this in order to elapse | |
343 | * per task timers. | |
344 | */ | |
345 | void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) | |
346 | { | |
347 | /* | |
348 | * We could optimize this with just kicking the target running the task | |
349 | * if that noise matters for nohz full users. | |
350 | */ | |
351 | tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit); | |
352 | } | |
353 | ||
354 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) | |
355 | { | |
f009a7a7 | 356 | atomic_andnot(BIT(bit), &tsk->tick_dep_mask); |
d027d45d FW |
357 | } |
358 | ||
359 | /* | |
360 | * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse | |
361 | * per process timers. | |
362 | */ | |
363 | void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) | |
364 | { | |
365 | tick_nohz_dep_set_all(&sig->tick_dep_mask, bit); | |
366 | } | |
367 | ||
368 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) | |
369 | { | |
f009a7a7 | 370 | atomic_andnot(BIT(bit), &sig->tick_dep_mask); |
d027d45d FW |
371 | } |
372 | ||
99e5ada9 FW |
373 | /* |
374 | * Re-evaluate the need for the tick as we switch the current task. | |
375 | * It might need the tick due to per task/process properties: | |
0de7611a | 376 | * perf events, posix CPU timers, ... |
99e5ada9 | 377 | */ |
de734f89 | 378 | void __tick_nohz_task_switch(void) |
99e5ada9 FW |
379 | { |
380 | unsigned long flags; | |
d027d45d | 381 | struct tick_sched *ts; |
99e5ada9 | 382 | |
99e5ada9 FW |
383 | local_irq_save(flags); |
384 | ||
6296ace4 LZ |
385 | if (!tick_nohz_full_cpu(smp_processor_id())) |
386 | goto out; | |
387 | ||
d027d45d | 388 | ts = this_cpu_ptr(&tick_cpu_sched); |
99e5ada9 | 389 | |
d027d45d | 390 | if (ts->tick_stopped) { |
f009a7a7 FW |
391 | if (atomic_read(¤t->tick_dep_mask) || |
392 | atomic_read(¤t->signal->tick_dep_mask)) | |
d027d45d FW |
393 | tick_nohz_full_kick(); |
394 | } | |
6296ace4 | 395 | out: |
99e5ada9 FW |
396 | local_irq_restore(flags); |
397 | } | |
398 | ||
6f1982fe FW |
399 | /* Get the boot-time nohz CPU list from the kernel parameters. */ |
400 | void __init tick_nohz_full_setup(cpumask_var_t cpumask) | |
a831881b | 401 | { |
73867dcd | 402 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
6f1982fe | 403 | cpumask_copy(tick_nohz_full_mask, cpumask); |
73867dcd | 404 | tick_nohz_full_running = true; |
a831881b | 405 | } |
a831881b | 406 | |
31eff243 | 407 | static int tick_nohz_cpu_down(unsigned int cpu) |
a382bf93 | 408 | { |
31eff243 SAS |
409 | /* |
410 | * The boot CPU handles housekeeping duty (unbound timers, | |
411 | * workqueues, timekeeping, ...) on behalf of full dynticks | |
412 | * CPUs. It must remain online when nohz full is enabled. | |
413 | */ | |
414 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) | |
415 | return -EBUSY; | |
416 | return 0; | |
a382bf93 FW |
417 | } |
418 | ||
d1e43fa5 | 419 | void __init tick_nohz_init(void) |
a831881b | 420 | { |
31eff243 | 421 | int cpu, ret; |
d1e43fa5 | 422 | |
a7c8655b PM |
423 | if (!tick_nohz_full_running) |
424 | return; | |
d1e43fa5 | 425 | |
9b01f5bf FW |
426 | /* |
427 | * Full dynticks uses irq work to drive the tick rescheduling on safe | |
428 | * locking contexts. But then we need irq work to raise its own | |
429 | * interrupts to avoid circular dependency on the tick | |
430 | */ | |
431 | if (!arch_irq_work_has_interrupt()) { | |
a395d6a7 | 432 | pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); |
9b01f5bf | 433 | cpumask_clear(tick_nohz_full_mask); |
9b01f5bf FW |
434 | tick_nohz_full_running = false; |
435 | return; | |
436 | } | |
437 | ||
4327b15f FW |
438 | cpu = smp_processor_id(); |
439 | ||
440 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { | |
a395d6a7 JP |
441 | pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", |
442 | cpu); | |
4327b15f FW |
443 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); |
444 | } | |
445 | ||
73867dcd | 446 | for_each_cpu(cpu, tick_nohz_full_mask) |
2e709338 FW |
447 | context_tracking_cpu_set(cpu); |
448 | ||
31eff243 SAS |
449 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
450 | "kernel/nohz:predown", NULL, | |
451 | tick_nohz_cpu_down); | |
452 | WARN_ON(ret < 0); | |
ffda22c1 TH |
453 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", |
454 | cpumask_pr_args(tick_nohz_full_mask)); | |
a831881b | 455 | } |
a831881b FW |
456 | #endif |
457 | ||
79bf2bb3 TG |
458 | /* |
459 | * NOHZ - aka dynamic tick functionality | |
460 | */ | |
3451d024 | 461 | #ifdef CONFIG_NO_HZ_COMMON |
79bf2bb3 TG |
462 | /* |
463 | * NO HZ enabled ? | |
464 | */ | |
4cc7ecb7 | 465 | bool tick_nohz_enabled __read_mostly = true; |
bc7a34b8 | 466 | unsigned long tick_nohz_active __read_mostly; |
79bf2bb3 TG |
467 | /* |
468 | * Enable / Disable tickless mode | |
469 | */ | |
470 | static int __init setup_tick_nohz(char *str) | |
471 | { | |
4cc7ecb7 | 472 | return (kstrtobool(str, &tick_nohz_enabled) == 0); |
79bf2bb3 TG |
473 | } |
474 | ||
475 | __setup("nohz=", setup_tick_nohz); | |
476 | ||
a3642983 | 477 | bool tick_nohz_tick_stopped(void) |
c1797baf | 478 | { |
2bc629a6 FW |
479 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
480 | ||
481 | return ts->tick_stopped; | |
c1797baf TG |
482 | } |
483 | ||
22ab8bc0 FW |
484 | bool tick_nohz_tick_stopped_cpu(int cpu) |
485 | { | |
486 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
487 | ||
488 | return ts->tick_stopped; | |
489 | } | |
490 | ||
79bf2bb3 TG |
491 | /** |
492 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
493 | * | |
494 | * Called from interrupt entry when the CPU was idle | |
495 | * | |
496 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
497 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
0de7611a IM |
498 | * value. We do this unconditionally on any CPU, as we don't know whether the |
499 | * CPU, which has the update task assigned is in a long sleep. | |
79bf2bb3 | 500 | */ |
eed3b9cf | 501 | static void tick_nohz_update_jiffies(ktime_t now) |
79bf2bb3 | 502 | { |
79bf2bb3 | 503 | unsigned long flags; |
79bf2bb3 | 504 | |
e8fcaa5c | 505 | __this_cpu_write(tick_cpu_sched.idle_waketime, now); |
79bf2bb3 TG |
506 | |
507 | local_irq_save(flags); | |
508 | tick_do_update_jiffies64(now); | |
509 | local_irq_restore(flags); | |
02ff3755 | 510 | |
03e0d461 | 511 | touch_softlockup_watchdog_sched(); |
79bf2bb3 TG |
512 | } |
513 | ||
595aac48 | 514 | /* |
0de7611a | 515 | * Updates the per-CPU time idle statistics counters |
595aac48 | 516 | */ |
8d63bf94 | 517 | static void |
8c215bd3 | 518 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
6378ddb5 | 519 | { |
eed3b9cf | 520 | ktime_t delta; |
6378ddb5 | 521 | |
595aac48 AV |
522 | if (ts->idle_active) { |
523 | delta = ktime_sub(now, ts->idle_entrytime); | |
8c215bd3 | 524 | if (nr_iowait_cpu(cpu) > 0) |
0224cf4c | 525 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
6beea0cd MH |
526 | else |
527 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
8c7b09f4 | 528 | ts->idle_entrytime = now; |
595aac48 | 529 | } |
8d63bf94 | 530 | |
e0e37c20 | 531 | if (last_update_time) |
8d63bf94 AV |
532 | *last_update_time = ktime_to_us(now); |
533 | ||
595aac48 AV |
534 | } |
535 | ||
e8fcaa5c | 536 | static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) |
595aac48 | 537 | { |
e8fcaa5c | 538 | update_ts_time_stats(smp_processor_id(), ts, now, NULL); |
eed3b9cf | 539 | ts->idle_active = 0; |
56c7426b | 540 | |
ac1e843f | 541 | sched_clock_idle_wakeup_event(); |
6378ddb5 VP |
542 | } |
543 | ||
0e776768 | 544 | static void tick_nohz_start_idle(struct tick_sched *ts) |
6378ddb5 | 545 | { |
0e776768 | 546 | ts->idle_entrytime = ktime_get(); |
6378ddb5 | 547 | ts->idle_active = 1; |
56c7426b | 548 | sched_clock_idle_sleep_event(); |
6378ddb5 VP |
549 | } |
550 | ||
b1f724c3 | 551 | /** |
0de7611a | 552 | * get_cpu_idle_time_us - get the total idle time of a CPU |
b1f724c3 | 553 | * @cpu: CPU number to query |
09a1d34f MH |
554 | * @last_update_time: variable to store update time in. Do not update |
555 | * counters if NULL. | |
b1f724c3 | 556 | * |
6168f8ed | 557 | * Return the cumulative idle time (since boot) for a given |
6beea0cd | 558 | * CPU, in microseconds. |
b1f724c3 AV |
559 | * |
560 | * This time is measured via accounting rather than sampling, | |
561 | * and is as accurate as ktime_get() is. | |
562 | * | |
563 | * This function returns -1 if NOHZ is not enabled. | |
564 | */ | |
6378ddb5 VP |
565 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
566 | { | |
567 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 568 | ktime_t now, idle; |
6378ddb5 | 569 | |
d689fe22 | 570 | if (!tick_nohz_active) |
8083e4ad | 571 | return -1; |
572 | ||
09a1d34f MH |
573 | now = ktime_get(); |
574 | if (last_update_time) { | |
575 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
576 | idle = ts->idle_sleeptime; | |
577 | } else { | |
578 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { | |
579 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
580 | ||
581 | idle = ktime_add(ts->idle_sleeptime, delta); | |
582 | } else { | |
583 | idle = ts->idle_sleeptime; | |
584 | } | |
585 | } | |
586 | ||
587 | return ktime_to_us(idle); | |
8083e4ad | 588 | |
6378ddb5 | 589 | } |
8083e4ad | 590 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
6378ddb5 | 591 | |
6beea0cd | 592 | /** |
0de7611a | 593 | * get_cpu_iowait_time_us - get the total iowait time of a CPU |
0224cf4c | 594 | * @cpu: CPU number to query |
09a1d34f MH |
595 | * @last_update_time: variable to store update time in. Do not update |
596 | * counters if NULL. | |
0224cf4c | 597 | * |
6168f8ed | 598 | * Return the cumulative iowait time (since boot) for a given |
0224cf4c AV |
599 | * CPU, in microseconds. |
600 | * | |
601 | * This time is measured via accounting rather than sampling, | |
602 | * and is as accurate as ktime_get() is. | |
603 | * | |
604 | * This function returns -1 if NOHZ is not enabled. | |
605 | */ | |
606 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |
607 | { | |
608 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 609 | ktime_t now, iowait; |
0224cf4c | 610 | |
d689fe22 | 611 | if (!tick_nohz_active) |
0224cf4c AV |
612 | return -1; |
613 | ||
09a1d34f MH |
614 | now = ktime_get(); |
615 | if (last_update_time) { | |
616 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
617 | iowait = ts->iowait_sleeptime; | |
618 | } else { | |
619 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { | |
620 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
0224cf4c | 621 | |
09a1d34f MH |
622 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
623 | } else { | |
624 | iowait = ts->iowait_sleeptime; | |
625 | } | |
626 | } | |
0224cf4c | 627 | |
09a1d34f | 628 | return ktime_to_us(iowait); |
0224cf4c AV |
629 | } |
630 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |
631 | ||
0ff53d09 TG |
632 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
633 | { | |
634 | hrtimer_cancel(&ts->sched_timer); | |
635 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); | |
636 | ||
637 | /* Forward the time to expire in the future */ | |
638 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
639 | ||
640 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) | |
641 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); | |
642 | else | |
643 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); | |
411fe24e FW |
644 | |
645 | /* | |
646 | * Reset to make sure next tick stop doesn't get fooled by past | |
647 | * cached clock deadline. | |
648 | */ | |
649 | ts->next_tick = 0; | |
0ff53d09 TG |
650 | } |
651 | ||
5d62c183 TG |
652 | static inline bool local_timer_softirq_pending(void) |
653 | { | |
654 | return local_softirq_pending() & TIMER_SOFTIRQ; | |
655 | } | |
656 | ||
23a8d888 | 657 | static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) |
79bf2bb3 | 658 | { |
c1ad348b TG |
659 | u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; |
660 | unsigned long seq, basejiff; | |
855a0fc3 | 661 | |
79bf2bb3 TG |
662 | /* Read jiffies and the time when jiffies were updated last */ |
663 | do { | |
d6ad4187 | 664 | seq = read_seqbegin(&jiffies_lock); |
2456e855 | 665 | basemono = last_jiffies_update; |
c1ad348b | 666 | basejiff = jiffies; |
d6ad4187 | 667 | } while (read_seqretry(&jiffies_lock, seq)); |
c1ad348b | 668 | ts->last_jiffies = basejiff; |
23a8d888 | 669 | ts->timer_expires_base = basemono; |
79bf2bb3 | 670 | |
5d62c183 TG |
671 | /* |
672 | * Keep the periodic tick, when RCU, architecture or irq_work | |
673 | * requests it. | |
674 | * Aside of that check whether the local timer softirq is | |
675 | * pending. If so its a bad idea to call get_next_timer_interrupt() | |
676 | * because there is an already expired timer, so it will request | |
677 | * immeditate expiry, which rearms the hardware timer with a | |
678 | * minimal delta which brings us back to this place | |
679 | * immediately. Lather, rinse and repeat... | |
680 | */ | |
681 | if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | |
682 | irq_work_needs_cpu() || local_timer_softirq_pending()) { | |
c1ad348b | 683 | next_tick = basemono + TICK_NSEC; |
3c5d92a0 | 684 | } else { |
c1ad348b TG |
685 | /* |
686 | * Get the next pending timer. If high resolution | |
687 | * timers are enabled this only takes the timer wheel | |
688 | * timers into account. If high resolution timers are | |
689 | * disabled this also looks at the next expiring | |
690 | * hrtimer. | |
691 | */ | |
692 | next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
693 | ts->next_timer = next_tmr; | |
694 | /* Take the next rcu event into account */ | |
695 | next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
3c5d92a0 | 696 | } |
47aa8b6c | 697 | |
c1ad348b TG |
698 | /* |
699 | * If the tick is due in the next period, keep it ticking or | |
82bbe34b | 700 | * force prod the timer. |
c1ad348b TG |
701 | */ |
702 | delta = next_tick - basemono; | |
703 | if (delta <= (u64)TICK_NSEC) { | |
a683f390 TG |
704 | /* |
705 | * Tell the timer code that the base is not idle, i.e. undo | |
706 | * the effect of get_next_timer_interrupt(): | |
707 | */ | |
708 | timer_clear_idle(); | |
82bbe34b PZ |
709 | /* |
710 | * We've not stopped the tick yet, and there's a timer in the | |
711 | * next period, so no point in stopping it either, bail. | |
712 | */ | |
f99973e1 | 713 | if (!ts->tick_stopped) { |
23a8d888 | 714 | ts->timer_expires = 0; |
157d29e1 TG |
715 | goto out; |
716 | } | |
717 | } | |
718 | ||
23a8d888 RW |
719 | /* |
720 | * If this CPU is the one which had the do_timer() duty last, we limit | |
721 | * the sleep time to the timekeeping max_deferment value. | |
722 | * Otherwise we can sleep as long as we want. | |
723 | */ | |
724 | delta = timekeeping_max_deferment(); | |
725 | if (cpu != tick_do_timer_cpu && | |
726 | (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) | |
727 | delta = KTIME_MAX; | |
728 | ||
729 | /* Calculate the next expiry time */ | |
730 | if (delta < (KTIME_MAX - basemono)) | |
731 | expires = basemono + delta; | |
732 | else | |
733 | expires = KTIME_MAX; | |
734 | ||
735 | ts->timer_expires = min_t(u64, expires, next_tick); | |
736 | ||
737 | out: | |
738 | return ts->timer_expires; | |
739 | } | |
740 | ||
741 | static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) | |
742 | { | |
743 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
744 | u64 basemono = ts->timer_expires_base; | |
745 | u64 expires = ts->timer_expires; | |
746 | ktime_t tick = expires; | |
747 | ||
748 | /* Make sure we won't be trying to stop it twice in a row. */ | |
749 | ts->timer_expires_base = 0; | |
750 | ||
79bf2bb3 | 751 | /* |
0de7611a IM |
752 | * If this CPU is the one which updates jiffies, then give up |
753 | * the assignment and let it be taken by the CPU which runs | |
754 | * the tick timer next, which might be this CPU as well. If we | |
157d29e1 TG |
755 | * don't drop this here the jiffies might be stale and |
756 | * do_timer() never invoked. Keep track of the fact that it | |
23a8d888 | 757 | * was the one which had the do_timer() duty last. |
79bf2bb3 | 758 | */ |
157d29e1 TG |
759 | if (cpu == tick_do_timer_cpu) { |
760 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
761 | ts->do_timer_last = 1; | |
762 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
157d29e1 | 763 | ts->do_timer_last = 0; |
157d29e1 | 764 | } |
27185016 | 765 | |
157d29e1 | 766 | /* Skip reprogram of event if its not changed */ |
411fe24e FW |
767 | if (ts->tick_stopped && (expires == ts->next_tick)) { |
768 | /* Sanity check: make sure clockevent is actually programmed */ | |
d4af6d93 | 769 | if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) |
23a8d888 | 770 | return; |
411fe24e FW |
771 | |
772 | WARN_ON_ONCE(1); | |
773 | printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", | |
774 | basemono, ts->next_tick, dev->next_event, | |
775 | hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); | |
ce6cf9a1 | 776 | } |
84bf1bcc | 777 | |
157d29e1 TG |
778 | /* |
779 | * nohz_stop_sched_tick can be called several times before | |
780 | * the nohz_restart_sched_tick is called. This happens when | |
781 | * interrupts arrive which do not cause a reschedule. In the | |
782 | * first call we save the current tick time, so we can restart | |
783 | * the scheduler tick in nohz_restart_sched_tick. | |
784 | */ | |
785 | if (!ts->tick_stopped) { | |
3c85d6db | 786 | calc_load_nohz_start(); |
1f41906a | 787 | cpu_load_update_nohz_start(); |
62cb1188 | 788 | quiet_vmstat(); |
d3ed7824 | 789 | |
157d29e1 TG |
790 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
791 | ts->tick_stopped = 1; | |
e6e6cc22 | 792 | trace_tick_stop(1, TICK_DEP_MASK_NONE); |
157d29e1 | 793 | } |
eaad084b | 794 | |
411fe24e FW |
795 | ts->next_tick = tick; |
796 | ||
157d29e1 | 797 | /* |
c1ad348b TG |
798 | * If the expiration time == KTIME_MAX, then we simply stop |
799 | * the tick timer. | |
157d29e1 | 800 | */ |
c1ad348b | 801 | if (unlikely(expires == KTIME_MAX)) { |
157d29e1 TG |
802 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
803 | hrtimer_cancel(&ts->sched_timer); | |
23a8d888 | 804 | return; |
79bf2bb3 | 805 | } |
0ff53d09 | 806 | |
1f71addd TG |
807 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
808 | hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED); | |
809 | } else { | |
810 | hrtimer_set_expires(&ts->sched_timer, tick); | |
c1ad348b | 811 | tick_program_event(tick, 1); |
1f71addd | 812 | } |
280f0677 FW |
813 | } |
814 | ||
23a8d888 RW |
815 | static void tick_nohz_retain_tick(struct tick_sched *ts) |
816 | { | |
817 | ts->timer_expires_base = 0; | |
818 | } | |
819 | ||
820 | #ifdef CONFIG_NO_HZ_FULL | |
821 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) | |
822 | { | |
823 | if (tick_nohz_next_event(ts, cpu)) | |
824 | tick_nohz_stop_tick(ts, cpu); | |
825 | else | |
826 | tick_nohz_retain_tick(ts); | |
827 | } | |
828 | #endif /* CONFIG_NO_HZ_FULL */ | |
829 | ||
1f41906a | 830 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
59d2c7ca FW |
831 | { |
832 | /* Update jiffies first */ | |
833 | tick_do_update_jiffies64(now); | |
1f41906a | 834 | cpu_load_update_nohz_stop(); |
59d2c7ca | 835 | |
a683f390 TG |
836 | /* |
837 | * Clear the timer idle flag, so we avoid IPIs on remote queueing and | |
838 | * the clock forward checks in the enqueue path: | |
839 | */ | |
840 | timer_clear_idle(); | |
841 | ||
3c85d6db | 842 | calc_load_nohz_stop(); |
03e0d461 | 843 | touch_softlockup_watchdog_sched(); |
59d2c7ca FW |
844 | /* |
845 | * Cancel the scheduled timer and restore the tick | |
846 | */ | |
847 | ts->tick_stopped = 0; | |
848 | ts->idle_exittime = now; | |
849 | ||
850 | tick_nohz_restart(ts, now); | |
851 | } | |
73738a95 FW |
852 | |
853 | static void tick_nohz_full_update_tick(struct tick_sched *ts) | |
5811d996 FW |
854 | { |
855 | #ifdef CONFIG_NO_HZ_FULL | |
e9a2eb40 | 856 | int cpu = smp_processor_id(); |
5811d996 | 857 | |
59449359 | 858 | if (!tick_nohz_full_cpu(cpu)) |
e9a2eb40 | 859 | return; |
5811d996 | 860 | |
e9a2eb40 AS |
861 | if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) |
862 | return; | |
5811d996 | 863 | |
57ccdf44 | 864 | if (can_stop_full_tick(cpu, ts)) |
23a8d888 | 865 | tick_nohz_stop_sched_tick(ts, cpu); |
73738a95 | 866 | else if (ts->tick_stopped) |
1f41906a | 867 | tick_nohz_restart_sched_tick(ts, ktime_get()); |
5811d996 FW |
868 | #endif |
869 | } | |
870 | ||
5b39939a FW |
871 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
872 | { | |
873 | /* | |
0de7611a | 874 | * If this CPU is offline and it is the one which updates |
5b39939a | 875 | * jiffies, then give up the assignment and let it be taken by |
0de7611a | 876 | * the CPU which runs the tick timer next. If we don't drop |
5b39939a FW |
877 | * this here the jiffies might be stale and do_timer() never |
878 | * invoked. | |
879 | */ | |
880 | if (unlikely(!cpu_online(cpu))) { | |
881 | if (cpu == tick_do_timer_cpu) | |
882 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
411fe24e FW |
883 | /* |
884 | * Make sure the CPU doesn't get fooled by obsolete tick | |
885 | * deadline if it comes back online later. | |
886 | */ | |
887 | ts->next_tick = 0; | |
f7ea0fd6 | 888 | return false; |
5b39939a FW |
889 | } |
890 | ||
23a8d888 | 891 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
5b39939a FW |
892 | return false; |
893 | ||
894 | if (need_resched()) | |
895 | return false; | |
896 | ||
897 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { | |
898 | static int ratelimit; | |
899 | ||
803b0eba PM |
900 | if (ratelimit < 10 && |
901 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { | |
cfea7d7e RV |
902 | pr_warn("NOHZ: local_softirq_pending %02x\n", |
903 | (unsigned int) local_softirq_pending()); | |
5b39939a FW |
904 | ratelimit++; |
905 | } | |
906 | return false; | |
907 | } | |
908 | ||
460775df | 909 | if (tick_nohz_full_enabled()) { |
a382bf93 FW |
910 | /* |
911 | * Keep the tick alive to guarantee timekeeping progression | |
912 | * if there are full dynticks CPUs around | |
913 | */ | |
914 | if (tick_do_timer_cpu == cpu) | |
915 | return false; | |
916 | /* | |
917 | * Boot safety: make sure the timekeeping duty has been | |
918 | * assigned before entering dyntick-idle mode, | |
919 | */ | |
920 | if (tick_do_timer_cpu == TICK_DO_TIMER_NONE) | |
921 | return false; | |
922 | } | |
923 | ||
5b39939a FW |
924 | return true; |
925 | } | |
926 | ||
0e776768 | 927 | static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) |
19f5f736 | 928 | { |
0e776768 | 929 | ktime_t expires; |
5b39939a | 930 | int cpu = smp_processor_id(); |
19f5f736 | 931 | |
554c8aa8 RW |
932 | /* |
933 | * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the | |
934 | * tick timer expiration time is known already. | |
935 | */ | |
936 | if (ts->timer_expires_base) | |
937 | expires = ts->timer_expires; | |
938 | else if (can_stop_idle_tick(cpu, ts)) | |
939 | expires = tick_nohz_next_event(ts, cpu); | |
940 | else | |
941 | return; | |
23a8d888 RW |
942 | |
943 | ts->idle_calls++; | |
08d07259 | 944 | |
23a8d888 | 945 | if (expires > 0LL) { |
5b39939a FW |
946 | int was_stopped = ts->tick_stopped; |
947 | ||
23a8d888 | 948 | tick_nohz_stop_tick(ts, cpu); |
84bf1bcc | 949 | |
23a8d888 RW |
950 | ts->idle_sleeps++; |
951 | ts->idle_expires = expires; | |
5b39939a | 952 | |
a0db971e | 953 | if (!was_stopped && ts->tick_stopped) { |
5b39939a | 954 | ts->idle_jiffies = ts->last_jiffies; |
a0db971e FW |
955 | nohz_balance_enter_idle(cpu); |
956 | } | |
23a8d888 RW |
957 | } else { |
958 | tick_nohz_retain_tick(ts); | |
5b39939a | 959 | } |
280f0677 FW |
960 | } |
961 | ||
962 | /** | |
0e776768 | 963 | * tick_nohz_idle_stop_tick - stop the idle tick from the idle task |
280f0677 FW |
964 | * |
965 | * When the next event is more than a tick into the future, stop the idle tick | |
0e776768 RW |
966 | */ |
967 | void tick_nohz_idle_stop_tick(void) | |
968 | { | |
969 | __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); | |
970 | } | |
971 | ||
554c8aa8 RW |
972 | void tick_nohz_idle_retain_tick(void) |
973 | { | |
974 | tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); | |
975 | /* | |
976 | * Undo the effect of get_next_timer_interrupt() called from | |
977 | * tick_nohz_next_event(). | |
978 | */ | |
979 | timer_clear_idle(); | |
980 | } | |
981 | ||
0e776768 RW |
982 | /** |
983 | * tick_nohz_idle_enter - prepare for entering idle on the current CPU | |
2bbb6817 | 984 | * |
0e776768 | 985 | * Called when we start the idle loop. |
280f0677 | 986 | */ |
1268fbc7 | 987 | void tick_nohz_idle_enter(void) |
280f0677 FW |
988 | { |
989 | struct tick_sched *ts; | |
990 | ||
ebf3adba | 991 | lockdep_assert_irqs_enabled(); |
0db49b72 | 992 | |
1268fbc7 FW |
993 | local_irq_disable(); |
994 | ||
22127e93 | 995 | ts = this_cpu_ptr(&tick_cpu_sched); |
23a8d888 RW |
996 | |
997 | WARN_ON_ONCE(ts->timer_expires_base); | |
998 | ||
280f0677 | 999 | ts->inidle = 1; |
0e776768 | 1000 | tick_nohz_start_idle(ts); |
1268fbc7 FW |
1001 | |
1002 | local_irq_enable(); | |
280f0677 FW |
1003 | } |
1004 | ||
1005 | /** | |
1006 | * tick_nohz_irq_exit - update next tick event from interrupt exit | |
1007 | * | |
1008 | * When an interrupt fires while we are idle and it doesn't cause | |
1009 | * a reschedule, it may still add, modify or delete a timer, enqueue | |
1010 | * an RCU callback, etc... | |
1011 | * So we need to re-calculate and reprogram the next tick event. | |
1012 | */ | |
1013 | void tick_nohz_irq_exit(void) | |
1014 | { | |
22127e93 | 1015 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
280f0677 | 1016 | |
14851912 | 1017 | if (ts->inidle) |
0e776768 | 1018 | tick_nohz_start_idle(ts); |
14851912 | 1019 | else |
73738a95 | 1020 | tick_nohz_full_update_tick(ts); |
79bf2bb3 TG |
1021 | } |
1022 | ||
4f86d3a8 | 1023 | /** |
45f1ff59 RW |
1024 | * tick_nohz_idle_got_tick - Check whether or not the tick handler has run |
1025 | */ | |
1026 | bool tick_nohz_idle_got_tick(void) | |
1027 | { | |
1028 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); | |
1029 | ||
2bc629a6 FW |
1030 | if (ts->got_idle_tick) { |
1031 | ts->got_idle_tick = 0; | |
45f1ff59 RW |
1032 | return true; |
1033 | } | |
1034 | return false; | |
1035 | } | |
1036 | ||
4f86d3a8 | 1037 | /** |
554c8aa8 | 1038 | * tick_nohz_get_sleep_length - return the expected length of the current sleep |
296bb1e5 | 1039 | * @delta_next: duration until the next event if the tick cannot be stopped |
4f86d3a8 LB |
1040 | * |
1041 | * Called from power state control code with interrupts disabled | |
1042 | */ | |
296bb1e5 | 1043 | ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) |
4f86d3a8 | 1044 | { |
554c8aa8 | 1045 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
22127e93 | 1046 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
554c8aa8 RW |
1047 | int cpu = smp_processor_id(); |
1048 | /* | |
1049 | * The idle entry time is expected to be a sufficient approximation of | |
1050 | * the current time at this point. | |
1051 | */ | |
1052 | ktime_t now = ts->idle_entrytime; | |
1053 | ktime_t next_event; | |
1054 | ||
1055 | WARN_ON_ONCE(!ts->inidle); | |
1056 | ||
296bb1e5 RW |
1057 | *delta_next = ktime_sub(dev->next_event, now); |
1058 | ||
554c8aa8 | 1059 | if (!can_stop_idle_tick(cpu, ts)) |
296bb1e5 | 1060 | return *delta_next; |
554c8aa8 RW |
1061 | |
1062 | next_event = tick_nohz_next_event(ts, cpu); | |
1063 | if (!next_event) | |
296bb1e5 | 1064 | return *delta_next; |
554c8aa8 RW |
1065 | |
1066 | /* | |
1067 | * If the next highres timer to expire is earlier than next_event, the | |
1068 | * idle governor needs to know that. | |
1069 | */ | |
1070 | next_event = min_t(u64, next_event, | |
1071 | hrtimer_next_event_without(&ts->sched_timer)); | |
4f86d3a8 | 1072 | |
554c8aa8 | 1073 | return ktime_sub(next_event, now); |
4f86d3a8 LB |
1074 | } |
1075 | ||
466a2b42 JF |
1076 | /** |
1077 | * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value | |
1078 | * for a particular CPU. | |
1079 | * | |
1080 | * Called from the schedutil frequency scaling governor in scheduler context. | |
1081 | */ | |
1082 | unsigned long tick_nohz_get_idle_calls_cpu(int cpu) | |
1083 | { | |
1084 | struct tick_sched *ts = tick_get_tick_sched(cpu); | |
1085 | ||
1086 | return ts->idle_calls; | |
1087 | } | |
1088 | ||
b7eaf1aa RW |
1089 | /** |
1090 | * tick_nohz_get_idle_calls - return the current idle calls counter value | |
1091 | * | |
1092 | * Called from the schedutil frequency scaling governor in scheduler context. | |
1093 | */ | |
1094 | unsigned long tick_nohz_get_idle_calls(void) | |
1095 | { | |
1096 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); | |
1097 | ||
1098 | return ts->idle_calls; | |
1099 | } | |
1100 | ||
2ac0d98f FW |
1101 | static void tick_nohz_account_idle_ticks(struct tick_sched *ts) |
1102 | { | |
3f4724ea | 1103 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
2ac0d98f | 1104 | unsigned long ticks; |
3f4724ea | 1105 | |
55dbdcfa | 1106 | if (vtime_accounting_cpu_enabled()) |
3f4724ea | 1107 | return; |
79bf2bb3 TG |
1108 | /* |
1109 | * We stopped the tick in idle. Update process times would miss the | |
1110 | * time we slept as update_process_times does only a 1 tick | |
1111 | * accounting. Enforce that this is accounted to idle ! | |
1112 | */ | |
1113 | ticks = jiffies - ts->idle_jiffies; | |
1114 | /* | |
1115 | * We might be one off. Do not randomly account a huge number of ticks! | |
1116 | */ | |
79741dd3 MS |
1117 | if (ticks && ticks < LONG_MAX) |
1118 | account_idle_ticks(ticks); | |
1119 | #endif | |
19f5f736 FW |
1120 | } |
1121 | ||
2aaf709a RW |
1122 | static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now) |
1123 | { | |
1124 | tick_nohz_restart_sched_tick(ts, now); | |
1125 | tick_nohz_account_idle_ticks(ts); | |
1126 | } | |
1127 | ||
1128 | void tick_nohz_idle_restart_tick(void) | |
1129 | { | |
1130 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); | |
1131 | ||
1132 | if (ts->tick_stopped) | |
1133 | __tick_nohz_idle_restart_tick(ts, ktime_get()); | |
1134 | } | |
1135 | ||
79bf2bb3 | 1136 | /** |
280f0677 | 1137 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
79bf2bb3 TG |
1138 | * |
1139 | * Restart the idle tick when the CPU is woken up from idle | |
280f0677 FW |
1140 | * This also exit the RCU extended quiescent state. The CPU |
1141 | * can use RCU again after this function is called. | |
79bf2bb3 | 1142 | */ |
280f0677 | 1143 | void tick_nohz_idle_exit(void) |
79bf2bb3 | 1144 | { |
4a32fea9 | 1145 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
bbe9a70a | 1146 | bool idle_active, tick_stopped; |
6378ddb5 | 1147 | ktime_t now; |
79bf2bb3 | 1148 | |
6378ddb5 | 1149 | local_irq_disable(); |
2bbb6817 | 1150 | |
15f827be | 1151 | WARN_ON_ONCE(!ts->inidle); |
23a8d888 | 1152 | WARN_ON_ONCE(ts->timer_expires_base); |
15f827be FW |
1153 | |
1154 | ts->inidle = 0; | |
bbe9a70a AB |
1155 | idle_active = ts->idle_active; |
1156 | tick_stopped = ts->tick_stopped; | |
15f827be | 1157 | |
bbe9a70a | 1158 | if (idle_active || tick_stopped) |
eed3b9cf MS |
1159 | now = ktime_get(); |
1160 | ||
bbe9a70a | 1161 | if (idle_active) |
e8fcaa5c | 1162 | tick_nohz_stop_idle(ts, now); |
6378ddb5 | 1163 | |
bbe9a70a | 1164 | if (tick_stopped) |
2aaf709a | 1165 | __tick_nohz_idle_restart_tick(ts, now); |
79bf2bb3 | 1166 | |
79bf2bb3 TG |
1167 | local_irq_enable(); |
1168 | } | |
1169 | ||
79bf2bb3 TG |
1170 | /* |
1171 | * The nohz low res interrupt handler | |
1172 | */ | |
1173 | static void tick_nohz_handler(struct clock_event_device *dev) | |
1174 | { | |
22127e93 | 1175 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1176 | struct pt_regs *regs = get_irq_regs(); |
1177 | ktime_t now = ktime_get(); | |
1178 | ||
2456e855 | 1179 | dev->next_event = KTIME_MAX; |
79bf2bb3 | 1180 | |
ff7de620 | 1181 | tick_sched_do_timer(ts, now); |
9e8f559b | 1182 | tick_sched_handle(ts, regs); |
79bf2bb3 | 1183 | |
b5e995e6 VK |
1184 | /* No need to reprogram if we are running tickless */ |
1185 | if (unlikely(ts->tick_stopped)) | |
1186 | return; | |
1187 | ||
0ff53d09 TG |
1188 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
1189 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); | |
79bf2bb3 TG |
1190 | } |
1191 | ||
bc7a34b8 TG |
1192 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) |
1193 | { | |
1194 | if (!tick_nohz_enabled) | |
1195 | return; | |
1196 | ts->nohz_mode = mode; | |
1197 | /* One update is enough */ | |
1198 | if (!test_and_set_bit(0, &tick_nohz_active)) | |
ae67bada | 1199 | timers_update_nohz(); |
bc7a34b8 TG |
1200 | } |
1201 | ||
79bf2bb3 TG |
1202 | /** |
1203 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
1204 | */ | |
1205 | static void tick_nohz_switch_to_nohz(void) | |
1206 | { | |
22127e93 | 1207 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1208 | ktime_t next; |
1209 | ||
27630532 | 1210 | if (!tick_nohz_enabled) |
79bf2bb3 TG |
1211 | return; |
1212 | ||
6b442bc8 | 1213 | if (tick_switch_to_oneshot(tick_nohz_handler)) |
79bf2bb3 | 1214 | return; |
6b442bc8 | 1215 | |
79bf2bb3 TG |
1216 | /* |
1217 | * Recycle the hrtimer in ts, so we can share the | |
1218 | * hrtimer_forward with the highres code. | |
1219 | */ | |
1220 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1221 | /* Get the next period */ | |
1222 | next = tick_init_jiffy_update(); | |
1223 | ||
0ff53d09 | 1224 | hrtimer_set_expires(&ts->sched_timer, next); |
1ca8ec53 WL |
1225 | hrtimer_forward_now(&ts->sched_timer, tick_period); |
1226 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); | |
bc7a34b8 | 1227 | tick_nohz_activate(ts, NOHZ_MODE_LOWRES); |
79bf2bb3 TG |
1228 | } |
1229 | ||
5acac1be | 1230 | static inline void tick_nohz_irq_enter(void) |
eed3b9cf | 1231 | { |
4a32fea9 | 1232 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
eed3b9cf MS |
1233 | ktime_t now; |
1234 | ||
1235 | if (!ts->idle_active && !ts->tick_stopped) | |
1236 | return; | |
1237 | now = ktime_get(); | |
1238 | if (ts->idle_active) | |
e8fcaa5c | 1239 | tick_nohz_stop_idle(ts, now); |
ff006732 | 1240 | if (ts->tick_stopped) |
eed3b9cf | 1241 | tick_nohz_update_jiffies(now); |
eed3b9cf MS |
1242 | } |
1243 | ||
79bf2bb3 TG |
1244 | #else |
1245 | ||
1246 | static inline void tick_nohz_switch_to_nohz(void) { } | |
5acac1be | 1247 | static inline void tick_nohz_irq_enter(void) { } |
bc7a34b8 | 1248 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } |
79bf2bb3 | 1249 | |
3451d024 | 1250 | #endif /* CONFIG_NO_HZ_COMMON */ |
79bf2bb3 | 1251 | |
719254fa TG |
1252 | /* |
1253 | * Called from irq_enter to notify about the possible interruption of idle() | |
1254 | */ | |
5acac1be | 1255 | void tick_irq_enter(void) |
719254fa | 1256 | { |
e8fcaa5c | 1257 | tick_check_oneshot_broadcast_this_cpu(); |
5acac1be | 1258 | tick_nohz_irq_enter(); |
719254fa TG |
1259 | } |
1260 | ||
79bf2bb3 TG |
1261 | /* |
1262 | * High resolution timer specific code | |
1263 | */ | |
1264 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1265 | /* | |
4c9dc641 | 1266 | * We rearm the timer until we get disabled by the idle code. |
351f181f | 1267 | * Called with interrupts disabled. |
79bf2bb3 TG |
1268 | */ |
1269 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
1270 | { | |
1271 | struct tick_sched *ts = | |
1272 | container_of(timer, struct tick_sched, sched_timer); | |
79bf2bb3 TG |
1273 | struct pt_regs *regs = get_irq_regs(); |
1274 | ktime_t now = ktime_get(); | |
d3ed7824 | 1275 | |
ff7de620 | 1276 | tick_sched_do_timer(ts, now); |
79bf2bb3 TG |
1277 | |
1278 | /* | |
1279 | * Do not call, when we are not in irq context and have | |
1280 | * no valid regs pointer | |
1281 | */ | |
9e8f559b FW |
1282 | if (regs) |
1283 | tick_sched_handle(ts, regs); | |
7c259045 FW |
1284 | else |
1285 | ts->next_tick = 0; | |
79bf2bb3 | 1286 | |
2a16fc93 VK |
1287 | /* No need to reprogram if we are in idle or full dynticks mode */ |
1288 | if (unlikely(ts->tick_stopped)) | |
1289 | return HRTIMER_NORESTART; | |
1290 | ||
79bf2bb3 TG |
1291 | hrtimer_forward(timer, now, tick_period); |
1292 | ||
1293 | return HRTIMER_RESTART; | |
1294 | } | |
1295 | ||
5307c955 MG |
1296 | static int sched_skew_tick; |
1297 | ||
62cf20b3 TG |
1298 | static int __init skew_tick(char *str) |
1299 | { | |
1300 | get_option(&str, &sched_skew_tick); | |
1301 | ||
1302 | return 0; | |
1303 | } | |
1304 | early_param("skew_tick", skew_tick); | |
1305 | ||
79bf2bb3 TG |
1306 | /** |
1307 | * tick_setup_sched_timer - setup the tick emulation timer | |
1308 | */ | |
1309 | void tick_setup_sched_timer(void) | |
1310 | { | |
22127e93 | 1311 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1312 | ktime_t now = ktime_get(); |
1313 | ||
1314 | /* | |
1315 | * Emulate tick processing via per-CPU hrtimers: | |
1316 | */ | |
1317 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1318 | ts->sched_timer.function = tick_sched_timer; | |
79bf2bb3 | 1319 | |
0de7611a | 1320 | /* Get the next period (per-CPU) */ |
cc584b21 | 1321 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
79bf2bb3 | 1322 | |
9c3f9e28 | 1323 | /* Offset the tick to avert jiffies_lock contention. */ |
5307c955 MG |
1324 | if (sched_skew_tick) { |
1325 | u64 offset = ktime_to_ns(tick_period) >> 1; | |
1326 | do_div(offset, num_possible_cpus()); | |
1327 | offset *= smp_processor_id(); | |
1328 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | |
1329 | } | |
1330 | ||
afc08b15 TG |
1331 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
1332 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); | |
bc7a34b8 | 1333 | tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); |
79bf2bb3 | 1334 | } |
3c4fbe5e | 1335 | #endif /* HIGH_RES_TIMERS */ |
79bf2bb3 | 1336 | |
3451d024 | 1337 | #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1338 | void tick_cancel_sched_timer(int cpu) |
1339 | { | |
1340 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
1341 | ||
3c4fbe5e | 1342 | # ifdef CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1343 | if (ts->sched_timer.base) |
1344 | hrtimer_cancel(&ts->sched_timer); | |
3c4fbe5e | 1345 | # endif |
a7901766 | 1346 | |
4b0c0f29 | 1347 | memset(ts, 0, sizeof(*ts)); |
79bf2bb3 | 1348 | } |
3c4fbe5e | 1349 | #endif |
79bf2bb3 TG |
1350 | |
1351 | /** | |
1352 | * Async notification about clocksource changes | |
1353 | */ | |
1354 | void tick_clock_notify(void) | |
1355 | { | |
1356 | int cpu; | |
1357 | ||
1358 | for_each_possible_cpu(cpu) | |
1359 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
1360 | } | |
1361 | ||
1362 | /* | |
1363 | * Async notification about clock event changes | |
1364 | */ | |
1365 | void tick_oneshot_notify(void) | |
1366 | { | |
22127e93 | 1367 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1368 | |
1369 | set_bit(0, &ts->check_clocks); | |
1370 | } | |
1371 | ||
1372 | /** | |
1373 | * Check, if a change happened, which makes oneshot possible. | |
1374 | * | |
1375 | * Called cyclic from the hrtimer softirq (driven by the timer | |
1376 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
1377 | * mode, because high resolution timers are disabled (either compile | |
6b442bc8 | 1378 | * or runtime). Called with interrupts disabled. |
79bf2bb3 TG |
1379 | */ |
1380 | int tick_check_oneshot_change(int allow_nohz) | |
1381 | { | |
22127e93 | 1382 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1383 | |
1384 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
1385 | return 0; | |
1386 | ||
1387 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
1388 | return 0; | |
1389 | ||
cf4fc6cb | 1390 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
79bf2bb3 TG |
1391 | return 0; |
1392 | ||
1393 | if (!allow_nohz) | |
1394 | return 1; | |
1395 | ||
1396 | tick_nohz_switch_to_nohz(); | |
1397 | return 0; | |
1398 | } |