]>
Commit | Line | Data |
---|---|---|
79bf2bb3 TG |
1 | /* |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
b10db7f0 | 12 | * Distribute under GPLv2. |
79bf2bb3 TG |
13 | */ |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/profile.h> | |
21 | #include <linux/sched.h> | |
8083e4ad | 22 | #include <linux/module.h> |
00b42959 | 23 | #include <linux/irq_work.h> |
9014c45d | 24 | #include <linux/posix-timers.h> |
2e709338 | 25 | #include <linux/context_tracking.h> |
79bf2bb3 | 26 | |
9e203bcc DM |
27 | #include <asm/irq_regs.h> |
28 | ||
79bf2bb3 TG |
29 | #include "tick-internal.h" |
30 | ||
cb41a290 FW |
31 | #include <trace/events/timer.h> |
32 | ||
79bf2bb3 TG |
33 | /* |
34 | * Per cpu nohz control structure | |
35 | */ | |
c1797baf | 36 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
79bf2bb3 | 37 | |
289f480a IM |
38 | struct tick_sched *tick_get_tick_sched(int cpu) |
39 | { | |
40 | return &per_cpu(tick_cpu_sched, cpu); | |
41 | } | |
42 | ||
7809998a AB |
43 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) |
44 | /* | |
45 | * The time, when the last jiffy update happened. Protected by jiffies_lock. | |
46 | */ | |
47 | static ktime_t last_jiffies_update; | |
48 | ||
79bf2bb3 TG |
49 | /* |
50 | * Must be called with interrupts disabled ! | |
51 | */ | |
52 | static void tick_do_update_jiffies64(ktime_t now) | |
53 | { | |
54 | unsigned long ticks = 0; | |
55 | ktime_t delta; | |
56 | ||
7a14ce1d | 57 | /* |
d6ad4187 | 58 | * Do a quick check without holding jiffies_lock: |
7a14ce1d IM |
59 | */ |
60 | delta = ktime_sub(now, last_jiffies_update); | |
61 | if (delta.tv64 < tick_period.tv64) | |
62 | return; | |
63 | ||
d6ad4187 JS |
64 | /* Reevalute with jiffies_lock held */ |
65 | write_seqlock(&jiffies_lock); | |
79bf2bb3 TG |
66 | |
67 | delta = ktime_sub(now, last_jiffies_update); | |
68 | if (delta.tv64 >= tick_period.tv64) { | |
69 | ||
70 | delta = ktime_sub(delta, tick_period); | |
71 | last_jiffies_update = ktime_add(last_jiffies_update, | |
72 | tick_period); | |
73 | ||
74 | /* Slow path for long timeouts */ | |
75 | if (unlikely(delta.tv64 >= tick_period.tv64)) { | |
76 | s64 incr = ktime_to_ns(tick_period); | |
77 | ||
78 | ticks = ktime_divns(delta, incr); | |
79 | ||
80 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
81 | incr * ticks); | |
82 | } | |
83 | do_timer(++ticks); | |
49d670fb TG |
84 | |
85 | /* Keep the tick_next_period variable up to date */ | |
86 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | |
03e6bdc5 VK |
87 | } else { |
88 | write_sequnlock(&jiffies_lock); | |
89 | return; | |
79bf2bb3 | 90 | } |
d6ad4187 | 91 | write_sequnlock(&jiffies_lock); |
47a1b796 | 92 | update_wall_time(); |
79bf2bb3 TG |
93 | } |
94 | ||
95 | /* | |
96 | * Initialize and return retrieve the jiffies update. | |
97 | */ | |
98 | static ktime_t tick_init_jiffy_update(void) | |
99 | { | |
100 | ktime_t period; | |
101 | ||
d6ad4187 | 102 | write_seqlock(&jiffies_lock); |
79bf2bb3 TG |
103 | /* Did we start the jiffies update yet ? */ |
104 | if (last_jiffies_update.tv64 == 0) | |
105 | last_jiffies_update = tick_next_period; | |
106 | period = last_jiffies_update; | |
d6ad4187 | 107 | write_sequnlock(&jiffies_lock); |
79bf2bb3 TG |
108 | return period; |
109 | } | |
110 | ||
5bb96226 FW |
111 | |
112 | static void tick_sched_do_timer(ktime_t now) | |
113 | { | |
114 | int cpu = smp_processor_id(); | |
115 | ||
3451d024 | 116 | #ifdef CONFIG_NO_HZ_COMMON |
5bb96226 FW |
117 | /* |
118 | * Check if the do_timer duty was dropped. We don't care about | |
119 | * concurrency: This happens only when the cpu in charge went | |
120 | * into a long sleep. If two cpus happen to assign themself to | |
121 | * this duty, then the jiffies update is still serialized by | |
9c3f9e28 | 122 | * jiffies_lock. |
5bb96226 | 123 | */ |
a382bf93 | 124 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE) |
c5bfece2 | 125 | && !tick_nohz_full_cpu(cpu)) |
5bb96226 FW |
126 | tick_do_timer_cpu = cpu; |
127 | #endif | |
128 | ||
129 | /* Check, if the jiffies need an update */ | |
130 | if (tick_do_timer_cpu == cpu) | |
131 | tick_do_update_jiffies64(now); | |
132 | } | |
133 | ||
9e8f559b FW |
134 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
135 | { | |
3451d024 | 136 | #ifdef CONFIG_NO_HZ_COMMON |
9e8f559b FW |
137 | /* |
138 | * When we are idle and the tick is stopped, we have to touch | |
139 | * the watchdog as we might not schedule for a really long | |
140 | * time. This happens on complete idle SMP systems while | |
141 | * waiting on the login prompt. We also increment the "start of | |
142 | * idle" jiffy stamp so the idle accounting adjustment we do | |
143 | * when we go busy again does not account too much ticks. | |
144 | */ | |
145 | if (ts->tick_stopped) { | |
03e0d461 | 146 | touch_softlockup_watchdog_sched(); |
9e8f559b FW |
147 | if (is_idle_task(current)) |
148 | ts->idle_jiffies++; | |
149 | } | |
94a57140 | 150 | #endif |
9e8f559b FW |
151 | update_process_times(user_mode(regs)); |
152 | profile_tick(CPU_PROFILING); | |
153 | } | |
7809998a | 154 | #endif |
9e8f559b | 155 | |
c5bfece2 | 156 | #ifdef CONFIG_NO_HZ_FULL |
460775df | 157 | cpumask_var_t tick_nohz_full_mask; |
c0f489d2 | 158 | cpumask_var_t housekeeping_mask; |
73867dcd | 159 | bool tick_nohz_full_running; |
d027d45d | 160 | static unsigned long tick_dep_mask; |
a831881b | 161 | |
d027d45d FW |
162 | static void trace_tick_dependency(unsigned long dep) |
163 | { | |
164 | if (dep & TICK_DEP_MASK_POSIX_TIMER) { | |
e6e6cc22 | 165 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); |
d027d45d FW |
166 | return; |
167 | } | |
168 | ||
169 | if (dep & TICK_DEP_MASK_PERF_EVENTS) { | |
e6e6cc22 | 170 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); |
d027d45d FW |
171 | return; |
172 | } | |
173 | ||
174 | if (dep & TICK_DEP_MASK_SCHED) { | |
e6e6cc22 | 175 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); |
d027d45d FW |
176 | return; |
177 | } | |
178 | ||
179 | if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE) | |
e6e6cc22 | 180 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); |
d027d45d FW |
181 | } |
182 | ||
183 | static bool can_stop_full_tick(struct tick_sched *ts) | |
9014c45d FW |
184 | { |
185 | WARN_ON_ONCE(!irqs_disabled()); | |
186 | ||
d027d45d FW |
187 | if (tick_dep_mask) { |
188 | trace_tick_dependency(tick_dep_mask); | |
189 | return false; | |
190 | } | |
191 | ||
192 | if (ts->tick_dep_mask) { | |
193 | trace_tick_dependency(ts->tick_dep_mask); | |
194 | return false; | |
195 | } | |
196 | ||
197 | if (current->tick_dep_mask) { | |
198 | trace_tick_dependency(current->tick_dep_mask); | |
199 | return false; | |
200 | } | |
201 | ||
202 | if (current->signal->tick_dep_mask) { | |
203 | trace_tick_dependency(current->signal->tick_dep_mask); | |
204 | return false; | |
205 | } | |
206 | ||
9014c45d FW |
207 | return true; |
208 | } | |
209 | ||
d027d45d | 210 | static void nohz_full_kick_func(struct irq_work *work) |
76c24fb0 | 211 | { |
73738a95 | 212 | /* Empty, the tick restart happens on tick_nohz_irq_exit() */ |
76c24fb0 FW |
213 | } |
214 | ||
215 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | |
d027d45d | 216 | .func = nohz_full_kick_func, |
76c24fb0 FW |
217 | }; |
218 | ||
40bea039 FW |
219 | /* |
220 | * Kick this CPU if it's full dynticks in order to force it to | |
221 | * re-evaluate its dependency on the tick and restart it if necessary. | |
222 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), | |
223 | * is NMI safe. | |
224 | */ | |
555e0c1e | 225 | static void tick_nohz_full_kick(void) |
40bea039 FW |
226 | { |
227 | if (!tick_nohz_full_cpu(smp_processor_id())) | |
228 | return; | |
229 | ||
56e4dea8 | 230 | irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); |
40bea039 FW |
231 | } |
232 | ||
76c24fb0 | 233 | /* |
3d36aebc | 234 | * Kick the CPU if it's full dynticks in order to force it to |
76c24fb0 FW |
235 | * re-evaluate its dependency on the tick and restart it if necessary. |
236 | */ | |
3d36aebc | 237 | void tick_nohz_full_kick_cpu(int cpu) |
76c24fb0 | 238 | { |
3d36aebc FW |
239 | if (!tick_nohz_full_cpu(cpu)) |
240 | return; | |
241 | ||
242 | irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); | |
76c24fb0 FW |
243 | } |
244 | ||
76c24fb0 FW |
245 | /* |
246 | * Kick all full dynticks CPUs in order to force these to re-evaluate | |
247 | * their dependency on the tick and restart it if necessary. | |
248 | */ | |
b7878300 | 249 | static void tick_nohz_full_kick_all(void) |
76c24fb0 | 250 | { |
8537bb95 FW |
251 | int cpu; |
252 | ||
73867dcd | 253 | if (!tick_nohz_full_running) |
76c24fb0 FW |
254 | return; |
255 | ||
256 | preempt_disable(); | |
8537bb95 FW |
257 | for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) |
258 | tick_nohz_full_kick_cpu(cpu); | |
76c24fb0 FW |
259 | preempt_enable(); |
260 | } | |
261 | ||
d027d45d FW |
262 | static void tick_nohz_dep_set_all(unsigned long *dep, |
263 | enum tick_dep_bits bit) | |
264 | { | |
265 | unsigned long prev; | |
266 | ||
267 | prev = fetch_or(dep, BIT_MASK(bit)); | |
268 | if (!prev) | |
269 | tick_nohz_full_kick_all(); | |
270 | } | |
271 | ||
272 | /* | |
273 | * Set a global tick dependency. Used by perf events that rely on freq and | |
274 | * by unstable clock. | |
275 | */ | |
276 | void tick_nohz_dep_set(enum tick_dep_bits bit) | |
277 | { | |
278 | tick_nohz_dep_set_all(&tick_dep_mask, bit); | |
279 | } | |
280 | ||
281 | void tick_nohz_dep_clear(enum tick_dep_bits bit) | |
282 | { | |
283 | clear_bit(bit, &tick_dep_mask); | |
284 | } | |
285 | ||
286 | /* | |
287 | * Set per-CPU tick dependency. Used by scheduler and perf events in order to | |
288 | * manage events throttling. | |
289 | */ | |
290 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) | |
291 | { | |
292 | unsigned long prev; | |
293 | struct tick_sched *ts; | |
294 | ||
295 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
296 | ||
297 | prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit)); | |
298 | if (!prev) { | |
299 | preempt_disable(); | |
300 | /* Perf needs local kick that is NMI safe */ | |
301 | if (cpu == smp_processor_id()) { | |
302 | tick_nohz_full_kick(); | |
303 | } else { | |
304 | /* Remote irq work not NMI-safe */ | |
305 | if (!WARN_ON_ONCE(in_nmi())) | |
306 | tick_nohz_full_kick_cpu(cpu); | |
307 | } | |
308 | preempt_enable(); | |
309 | } | |
310 | } | |
311 | ||
312 | void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) | |
313 | { | |
314 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
315 | ||
316 | clear_bit(bit, &ts->tick_dep_mask); | |
317 | } | |
318 | ||
319 | /* | |
320 | * Set a per-task tick dependency. Posix CPU timers need this in order to elapse | |
321 | * per task timers. | |
322 | */ | |
323 | void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) | |
324 | { | |
325 | /* | |
326 | * We could optimize this with just kicking the target running the task | |
327 | * if that noise matters for nohz full users. | |
328 | */ | |
329 | tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit); | |
330 | } | |
331 | ||
332 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) | |
333 | { | |
334 | clear_bit(bit, &tsk->tick_dep_mask); | |
335 | } | |
336 | ||
337 | /* | |
338 | * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse | |
339 | * per process timers. | |
340 | */ | |
341 | void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) | |
342 | { | |
343 | tick_nohz_dep_set_all(&sig->tick_dep_mask, bit); | |
344 | } | |
345 | ||
346 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) | |
347 | { | |
348 | clear_bit(bit, &sig->tick_dep_mask); | |
349 | } | |
350 | ||
99e5ada9 FW |
351 | /* |
352 | * Re-evaluate the need for the tick as we switch the current task. | |
353 | * It might need the tick due to per task/process properties: | |
354 | * perf events, posix cpu timers, ... | |
355 | */ | |
de734f89 | 356 | void __tick_nohz_task_switch(void) |
99e5ada9 FW |
357 | { |
358 | unsigned long flags; | |
d027d45d | 359 | struct tick_sched *ts; |
99e5ada9 | 360 | |
99e5ada9 FW |
361 | local_irq_save(flags); |
362 | ||
6296ace4 LZ |
363 | if (!tick_nohz_full_cpu(smp_processor_id())) |
364 | goto out; | |
365 | ||
d027d45d | 366 | ts = this_cpu_ptr(&tick_cpu_sched); |
99e5ada9 | 367 | |
d027d45d FW |
368 | if (ts->tick_stopped) { |
369 | if (current->tick_dep_mask || current->signal->tick_dep_mask) | |
370 | tick_nohz_full_kick(); | |
371 | } | |
6296ace4 | 372 | out: |
99e5ada9 FW |
373 | local_irq_restore(flags); |
374 | } | |
375 | ||
a831881b | 376 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ |
c5bfece2 | 377 | static int __init tick_nohz_full_setup(char *str) |
a831881b | 378 | { |
73867dcd FW |
379 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
380 | if (cpulist_parse(str, tick_nohz_full_mask) < 0) { | |
a395d6a7 | 381 | pr_warn("NO_HZ: Incorrect nohz_full cpumask\n"); |
4327b15f | 382 | free_bootmem_cpumask_var(tick_nohz_full_mask); |
0453b435 FW |
383 | return 1; |
384 | } | |
73867dcd | 385 | tick_nohz_full_running = true; |
0453b435 | 386 | |
a831881b FW |
387 | return 1; |
388 | } | |
c5bfece2 | 389 | __setup("nohz_full=", tick_nohz_full_setup); |
a831881b | 390 | |
0db0628d | 391 | static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, |
7c8bb6cb FW |
392 | unsigned long action, |
393 | void *hcpu) | |
a382bf93 FW |
394 | { |
395 | unsigned int cpu = (unsigned long)hcpu; | |
396 | ||
397 | switch (action & ~CPU_TASKS_FROZEN) { | |
398 | case CPU_DOWN_PREPARE: | |
399 | /* | |
7c8bb6cb FW |
400 | * The boot CPU handles housekeeping duty (unbound timers, |
401 | * workqueues, timekeeping, ...) on behalf of full dynticks | |
402 | * CPUs. It must remain online when nohz full is enabled. | |
a382bf93 | 403 | */ |
73867dcd | 404 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) |
1a7f829f | 405 | return NOTIFY_BAD; |
a382bf93 FW |
406 | break; |
407 | } | |
408 | return NOTIFY_OK; | |
409 | } | |
410 | ||
f98823ac FW |
411 | static int tick_nohz_init_all(void) |
412 | { | |
413 | int err = -1; | |
414 | ||
415 | #ifdef CONFIG_NO_HZ_FULL_ALL | |
73867dcd | 416 | if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) { |
4327b15f | 417 | WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n"); |
c0f489d2 PM |
418 | return err; |
419 | } | |
f98823ac | 420 | err = 0; |
73867dcd | 421 | cpumask_setall(tick_nohz_full_mask); |
73867dcd | 422 | tick_nohz_full_running = true; |
f98823ac FW |
423 | #endif |
424 | return err; | |
425 | } | |
426 | ||
d1e43fa5 | 427 | void __init tick_nohz_init(void) |
a831881b | 428 | { |
d1e43fa5 FW |
429 | int cpu; |
430 | ||
73867dcd | 431 | if (!tick_nohz_full_running) { |
f98823ac FW |
432 | if (tick_nohz_init_all() < 0) |
433 | return; | |
434 | } | |
d1e43fa5 | 435 | |
4327b15f FW |
436 | if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) { |
437 | WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n"); | |
438 | cpumask_clear(tick_nohz_full_mask); | |
439 | tick_nohz_full_running = false; | |
440 | return; | |
441 | } | |
442 | ||
9b01f5bf FW |
443 | /* |
444 | * Full dynticks uses irq work to drive the tick rescheduling on safe | |
445 | * locking contexts. But then we need irq work to raise its own | |
446 | * interrupts to avoid circular dependency on the tick | |
447 | */ | |
448 | if (!arch_irq_work_has_interrupt()) { | |
a395d6a7 | 449 | pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); |
9b01f5bf FW |
450 | cpumask_clear(tick_nohz_full_mask); |
451 | cpumask_copy(housekeeping_mask, cpu_possible_mask); | |
452 | tick_nohz_full_running = false; | |
453 | return; | |
454 | } | |
455 | ||
4327b15f FW |
456 | cpu = smp_processor_id(); |
457 | ||
458 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { | |
a395d6a7 JP |
459 | pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", |
460 | cpu); | |
4327b15f FW |
461 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); |
462 | } | |
463 | ||
464 | cpumask_andnot(housekeeping_mask, | |
465 | cpu_possible_mask, tick_nohz_full_mask); | |
466 | ||
73867dcd | 467 | for_each_cpu(cpu, tick_nohz_full_mask) |
2e709338 FW |
468 | context_tracking_cpu_set(cpu); |
469 | ||
d1e43fa5 | 470 | cpu_notifier(tick_nohz_cpu_down_callback, 0); |
ffda22c1 TH |
471 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", |
472 | cpumask_pr_args(tick_nohz_full_mask)); | |
7c8bb6cb FW |
473 | |
474 | /* | |
475 | * We need at least one CPU to handle housekeeping work such | |
476 | * as timekeeping, unbound timers, workqueues, ... | |
477 | */ | |
478 | WARN_ON_ONCE(cpumask_empty(housekeeping_mask)); | |
a831881b | 479 | } |
a831881b FW |
480 | #endif |
481 | ||
79bf2bb3 TG |
482 | /* |
483 | * NOHZ - aka dynamic tick functionality | |
484 | */ | |
3451d024 | 485 | #ifdef CONFIG_NO_HZ_COMMON |
79bf2bb3 TG |
486 | /* |
487 | * NO HZ enabled ? | |
488 | */ | |
4cc7ecb7 | 489 | bool tick_nohz_enabled __read_mostly = true; |
bc7a34b8 | 490 | unsigned long tick_nohz_active __read_mostly; |
79bf2bb3 TG |
491 | /* |
492 | * Enable / Disable tickless mode | |
493 | */ | |
494 | static int __init setup_tick_nohz(char *str) | |
495 | { | |
4cc7ecb7 | 496 | return (kstrtobool(str, &tick_nohz_enabled) == 0); |
79bf2bb3 TG |
497 | } |
498 | ||
499 | __setup("nohz=", setup_tick_nohz); | |
500 | ||
c1797baf TG |
501 | int tick_nohz_tick_stopped(void) |
502 | { | |
503 | return __this_cpu_read(tick_cpu_sched.tick_stopped); | |
504 | } | |
505 | ||
79bf2bb3 TG |
506 | /** |
507 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
508 | * | |
509 | * Called from interrupt entry when the CPU was idle | |
510 | * | |
511 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
512 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
513 | * value. We do this unconditionally on any cpu, as we don't know whether the | |
514 | * cpu, which has the update task assigned is in a long sleep. | |
515 | */ | |
eed3b9cf | 516 | static void tick_nohz_update_jiffies(ktime_t now) |
79bf2bb3 | 517 | { |
79bf2bb3 | 518 | unsigned long flags; |
79bf2bb3 | 519 | |
e8fcaa5c | 520 | __this_cpu_write(tick_cpu_sched.idle_waketime, now); |
79bf2bb3 TG |
521 | |
522 | local_irq_save(flags); | |
523 | tick_do_update_jiffies64(now); | |
524 | local_irq_restore(flags); | |
02ff3755 | 525 | |
03e0d461 | 526 | touch_softlockup_watchdog_sched(); |
79bf2bb3 TG |
527 | } |
528 | ||
595aac48 AV |
529 | /* |
530 | * Updates the per cpu time idle statistics counters | |
531 | */ | |
8d63bf94 | 532 | static void |
8c215bd3 | 533 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
6378ddb5 | 534 | { |
eed3b9cf | 535 | ktime_t delta; |
6378ddb5 | 536 | |
595aac48 AV |
537 | if (ts->idle_active) { |
538 | delta = ktime_sub(now, ts->idle_entrytime); | |
8c215bd3 | 539 | if (nr_iowait_cpu(cpu) > 0) |
0224cf4c | 540 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
6beea0cd MH |
541 | else |
542 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
8c7b09f4 | 543 | ts->idle_entrytime = now; |
595aac48 | 544 | } |
8d63bf94 | 545 | |
e0e37c20 | 546 | if (last_update_time) |
8d63bf94 AV |
547 | *last_update_time = ktime_to_us(now); |
548 | ||
595aac48 AV |
549 | } |
550 | ||
e8fcaa5c | 551 | static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) |
595aac48 | 552 | { |
e8fcaa5c | 553 | update_ts_time_stats(smp_processor_id(), ts, now, NULL); |
eed3b9cf | 554 | ts->idle_active = 0; |
56c7426b | 555 | |
eed3b9cf | 556 | sched_clock_idle_wakeup_event(0); |
6378ddb5 VP |
557 | } |
558 | ||
e8fcaa5c | 559 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
6378ddb5 | 560 | { |
430ee881 | 561 | ktime_t now = ktime_get(); |
595aac48 | 562 | |
6378ddb5 VP |
563 | ts->idle_entrytime = now; |
564 | ts->idle_active = 1; | |
56c7426b | 565 | sched_clock_idle_sleep_event(); |
6378ddb5 VP |
566 | return now; |
567 | } | |
568 | ||
b1f724c3 AV |
569 | /** |
570 | * get_cpu_idle_time_us - get the total idle time of a cpu | |
571 | * @cpu: CPU number to query | |
09a1d34f MH |
572 | * @last_update_time: variable to store update time in. Do not update |
573 | * counters if NULL. | |
b1f724c3 AV |
574 | * |
575 | * Return the cummulative idle time (since boot) for a given | |
6beea0cd | 576 | * CPU, in microseconds. |
b1f724c3 AV |
577 | * |
578 | * This time is measured via accounting rather than sampling, | |
579 | * and is as accurate as ktime_get() is. | |
580 | * | |
581 | * This function returns -1 if NOHZ is not enabled. | |
582 | */ | |
6378ddb5 VP |
583 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
584 | { | |
585 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 586 | ktime_t now, idle; |
6378ddb5 | 587 | |
d689fe22 | 588 | if (!tick_nohz_active) |
8083e4ad | 589 | return -1; |
590 | ||
09a1d34f MH |
591 | now = ktime_get(); |
592 | if (last_update_time) { | |
593 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
594 | idle = ts->idle_sleeptime; | |
595 | } else { | |
596 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { | |
597 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
598 | ||
599 | idle = ktime_add(ts->idle_sleeptime, delta); | |
600 | } else { | |
601 | idle = ts->idle_sleeptime; | |
602 | } | |
603 | } | |
604 | ||
605 | return ktime_to_us(idle); | |
8083e4ad | 606 | |
6378ddb5 | 607 | } |
8083e4ad | 608 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
6378ddb5 | 609 | |
6beea0cd | 610 | /** |
0224cf4c AV |
611 | * get_cpu_iowait_time_us - get the total iowait time of a cpu |
612 | * @cpu: CPU number to query | |
09a1d34f MH |
613 | * @last_update_time: variable to store update time in. Do not update |
614 | * counters if NULL. | |
0224cf4c AV |
615 | * |
616 | * Return the cummulative iowait time (since boot) for a given | |
617 | * CPU, in microseconds. | |
618 | * | |
619 | * This time is measured via accounting rather than sampling, | |
620 | * and is as accurate as ktime_get() is. | |
621 | * | |
622 | * This function returns -1 if NOHZ is not enabled. | |
623 | */ | |
624 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |
625 | { | |
626 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 627 | ktime_t now, iowait; |
0224cf4c | 628 | |
d689fe22 | 629 | if (!tick_nohz_active) |
0224cf4c AV |
630 | return -1; |
631 | ||
09a1d34f MH |
632 | now = ktime_get(); |
633 | if (last_update_time) { | |
634 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
635 | iowait = ts->iowait_sleeptime; | |
636 | } else { | |
637 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { | |
638 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
0224cf4c | 639 | |
09a1d34f MH |
640 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
641 | } else { | |
642 | iowait = ts->iowait_sleeptime; | |
643 | } | |
644 | } | |
0224cf4c | 645 | |
09a1d34f | 646 | return ktime_to_us(iowait); |
0224cf4c AV |
647 | } |
648 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |
649 | ||
0ff53d09 TG |
650 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
651 | { | |
652 | hrtimer_cancel(&ts->sched_timer); | |
653 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); | |
654 | ||
655 | /* Forward the time to expire in the future */ | |
656 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
657 | ||
658 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) | |
659 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); | |
660 | else | |
661 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); | |
662 | } | |
663 | ||
84bf1bcc FW |
664 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, |
665 | ktime_t now, int cpu) | |
79bf2bb3 | 666 | { |
22127e93 | 667 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
c1ad348b TG |
668 | u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; |
669 | unsigned long seq, basejiff; | |
670 | ktime_t tick; | |
855a0fc3 | 671 | |
79bf2bb3 TG |
672 | /* Read jiffies and the time when jiffies were updated last */ |
673 | do { | |
d6ad4187 | 674 | seq = read_seqbegin(&jiffies_lock); |
c1ad348b TG |
675 | basemono = last_jiffies_update.tv64; |
676 | basejiff = jiffies; | |
d6ad4187 | 677 | } while (read_seqretry(&jiffies_lock, seq)); |
c1ad348b | 678 | ts->last_jiffies = basejiff; |
79bf2bb3 | 679 | |
c1ad348b | 680 | if (rcu_needs_cpu(basemono, &next_rcu) || |
fe0f4976 | 681 | arch_needs_cpu() || irq_work_needs_cpu()) { |
c1ad348b | 682 | next_tick = basemono + TICK_NSEC; |
3c5d92a0 | 683 | } else { |
c1ad348b TG |
684 | /* |
685 | * Get the next pending timer. If high resolution | |
686 | * timers are enabled this only takes the timer wheel | |
687 | * timers into account. If high resolution timers are | |
688 | * disabled this also looks at the next expiring | |
689 | * hrtimer. | |
690 | */ | |
691 | next_tmr = get_next_timer_interrupt(basejiff, basemono); | |
692 | ts->next_timer = next_tmr; | |
693 | /* Take the next rcu event into account */ | |
694 | next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; | |
3c5d92a0 | 695 | } |
47aa8b6c | 696 | |
c1ad348b TG |
697 | /* |
698 | * If the tick is due in the next period, keep it ticking or | |
82bbe34b | 699 | * force prod the timer. |
c1ad348b TG |
700 | */ |
701 | delta = next_tick - basemono; | |
702 | if (delta <= (u64)TICK_NSEC) { | |
703 | tick.tv64 = 0; | |
82bbe34b PZ |
704 | /* |
705 | * We've not stopped the tick yet, and there's a timer in the | |
706 | * next period, so no point in stopping it either, bail. | |
707 | */ | |
157d29e1 TG |
708 | if (!ts->tick_stopped) |
709 | goto out; | |
82bbe34b PZ |
710 | |
711 | /* | |
712 | * If, OTOH, we did stop it, but there's a pending (expired) | |
713 | * timer reprogram the timer hardware to fire now. | |
714 | * | |
715 | * We will not restart the tick proper, just prod the timer | |
716 | * hardware into firing an interrupt to process the pending | |
717 | * timers. Just like tick_irq_exit() will not restart the tick | |
718 | * for 'normal' interrupts. | |
719 | * | |
720 | * Only once we exit the idle loop will we re-enable the tick, | |
721 | * see tick_nohz_idle_exit(). | |
722 | */ | |
c1ad348b | 723 | if (delta == 0) { |
157d29e1 TG |
724 | tick_nohz_restart(ts, now); |
725 | goto out; | |
726 | } | |
727 | } | |
728 | ||
79bf2bb3 | 729 | /* |
157d29e1 TG |
730 | * If this cpu is the one which updates jiffies, then give up |
731 | * the assignment and let it be taken by the cpu which runs | |
732 | * the tick timer next, which might be this cpu as well. If we | |
733 | * don't drop this here the jiffies might be stale and | |
734 | * do_timer() never invoked. Keep track of the fact that it | |
735 | * was the one which had the do_timer() duty last. If this cpu | |
736 | * is the one which had the do_timer() duty last, we limit the | |
c1ad348b TG |
737 | * sleep time to the timekeeping max_deferement value. |
738 | * Otherwise we can sleep as long as we want. | |
79bf2bb3 | 739 | */ |
c1ad348b | 740 | delta = timekeeping_max_deferment(); |
157d29e1 TG |
741 | if (cpu == tick_do_timer_cpu) { |
742 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
743 | ts->do_timer_last = 1; | |
744 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
c1ad348b | 745 | delta = KTIME_MAX; |
157d29e1 TG |
746 | ts->do_timer_last = 0; |
747 | } else if (!ts->do_timer_last) { | |
c1ad348b | 748 | delta = KTIME_MAX; |
157d29e1 | 749 | } |
27185016 | 750 | |
265f22a9 | 751 | #ifdef CONFIG_NO_HZ_FULL |
c1ad348b | 752 | /* Limit the tick delta to the maximum scheduler deferment */ |
157d29e1 | 753 | if (!ts->inidle) |
c1ad348b | 754 | delta = min(delta, scheduler_tick_max_deferment()); |
265f22a9 FW |
755 | #endif |
756 | ||
c1ad348b TG |
757 | /* Calculate the next expiry time */ |
758 | if (delta < (KTIME_MAX - basemono)) | |
759 | expires = basemono + delta; | |
157d29e1 | 760 | else |
c1ad348b TG |
761 | expires = KTIME_MAX; |
762 | ||
763 | expires = min_t(u64, expires, next_tick); | |
764 | tick.tv64 = expires; | |
00147449 | 765 | |
157d29e1 | 766 | /* Skip reprogram of event if its not changed */ |
c1ad348b | 767 | if (ts->tick_stopped && (expires == dev->next_event.tv64)) |
157d29e1 | 768 | goto out; |
84bf1bcc | 769 | |
157d29e1 TG |
770 | /* |
771 | * nohz_stop_sched_tick can be called several times before | |
772 | * the nohz_restart_sched_tick is called. This happens when | |
773 | * interrupts arrive which do not cause a reschedule. In the | |
774 | * first call we save the current tick time, so we can restart | |
775 | * the scheduler tick in nohz_restart_sched_tick. | |
776 | */ | |
777 | if (!ts->tick_stopped) { | |
778 | nohz_balance_enter_idle(cpu); | |
779 | calc_load_enter_idle(); | |
d3ed7824 | 780 | |
157d29e1 TG |
781 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
782 | ts->tick_stopped = 1; | |
e6e6cc22 | 783 | trace_tick_stop(1, TICK_DEP_MASK_NONE); |
157d29e1 | 784 | } |
eaad084b | 785 | |
157d29e1 | 786 | /* |
c1ad348b TG |
787 | * If the expiration time == KTIME_MAX, then we simply stop |
788 | * the tick timer. | |
157d29e1 | 789 | */ |
c1ad348b | 790 | if (unlikely(expires == KTIME_MAX)) { |
157d29e1 TG |
791 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
792 | hrtimer_cancel(&ts->sched_timer); | |
793 | goto out; | |
79bf2bb3 | 794 | } |
0ff53d09 | 795 | |
157d29e1 | 796 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
c1ad348b | 797 | hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED); |
157d29e1 | 798 | else |
c1ad348b | 799 | tick_program_event(tick, 1); |
79bf2bb3 | 800 | out: |
c1ad348b | 801 | /* Update the estimated sleep length */ |
4f86d3a8 | 802 | ts->sleep_length = ktime_sub(dev->next_event, now); |
c1ad348b | 803 | return tick; |
280f0677 FW |
804 | } |
805 | ||
525705d1 | 806 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now, int active) |
59d2c7ca FW |
807 | { |
808 | /* Update jiffies first */ | |
809 | tick_do_update_jiffies64(now); | |
525705d1 | 810 | update_cpu_load_nohz(active); |
59d2c7ca FW |
811 | |
812 | calc_load_exit_idle(); | |
03e0d461 | 813 | touch_softlockup_watchdog_sched(); |
59d2c7ca FW |
814 | /* |
815 | * Cancel the scheduled timer and restore the tick | |
816 | */ | |
817 | ts->tick_stopped = 0; | |
818 | ts->idle_exittime = now; | |
819 | ||
820 | tick_nohz_restart(ts, now); | |
821 | } | |
73738a95 FW |
822 | |
823 | static void tick_nohz_full_update_tick(struct tick_sched *ts) | |
5811d996 FW |
824 | { |
825 | #ifdef CONFIG_NO_HZ_FULL | |
e9a2eb40 | 826 | int cpu = smp_processor_id(); |
5811d996 | 827 | |
59449359 | 828 | if (!tick_nohz_full_cpu(cpu)) |
e9a2eb40 | 829 | return; |
5811d996 | 830 | |
e9a2eb40 AS |
831 | if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) |
832 | return; | |
5811d996 | 833 | |
d027d45d | 834 | if (can_stop_full_tick(ts)) |
73738a95 FW |
835 | tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); |
836 | else if (ts->tick_stopped) | |
525705d1 | 837 | tick_nohz_restart_sched_tick(ts, ktime_get(), 1); |
5811d996 FW |
838 | #endif |
839 | } | |
840 | ||
5b39939a FW |
841 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
842 | { | |
843 | /* | |
844 | * If this cpu is offline and it is the one which updates | |
845 | * jiffies, then give up the assignment and let it be taken by | |
846 | * the cpu which runs the tick timer next. If we don't drop | |
847 | * this here the jiffies might be stale and do_timer() never | |
848 | * invoked. | |
849 | */ | |
850 | if (unlikely(!cpu_online(cpu))) { | |
851 | if (cpu == tick_do_timer_cpu) | |
852 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
f7ea0fd6 | 853 | return false; |
5b39939a FW |
854 | } |
855 | ||
0e576acb TG |
856 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { |
857 | ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; | |
5b39939a | 858 | return false; |
0e576acb | 859 | } |
5b39939a FW |
860 | |
861 | if (need_resched()) | |
862 | return false; | |
863 | ||
864 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { | |
865 | static int ratelimit; | |
866 | ||
803b0eba PM |
867 | if (ratelimit < 10 && |
868 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { | |
cfea7d7e RV |
869 | pr_warn("NOHZ: local_softirq_pending %02x\n", |
870 | (unsigned int) local_softirq_pending()); | |
5b39939a FW |
871 | ratelimit++; |
872 | } | |
873 | return false; | |
874 | } | |
875 | ||
460775df | 876 | if (tick_nohz_full_enabled()) { |
a382bf93 FW |
877 | /* |
878 | * Keep the tick alive to guarantee timekeeping progression | |
879 | * if there are full dynticks CPUs around | |
880 | */ | |
881 | if (tick_do_timer_cpu == cpu) | |
882 | return false; | |
883 | /* | |
884 | * Boot safety: make sure the timekeeping duty has been | |
885 | * assigned before entering dyntick-idle mode, | |
886 | */ | |
887 | if (tick_do_timer_cpu == TICK_DO_TIMER_NONE) | |
888 | return false; | |
889 | } | |
890 | ||
5b39939a FW |
891 | return true; |
892 | } | |
893 | ||
19f5f736 FW |
894 | static void __tick_nohz_idle_enter(struct tick_sched *ts) |
895 | { | |
84bf1bcc | 896 | ktime_t now, expires; |
5b39939a | 897 | int cpu = smp_processor_id(); |
19f5f736 | 898 | |
e8fcaa5c | 899 | now = tick_nohz_start_idle(ts); |
2ac0d98f | 900 | |
5b39939a FW |
901 | if (can_stop_idle_tick(cpu, ts)) { |
902 | int was_stopped = ts->tick_stopped; | |
903 | ||
904 | ts->idle_calls++; | |
84bf1bcc FW |
905 | |
906 | expires = tick_nohz_stop_sched_tick(ts, now, cpu); | |
907 | if (expires.tv64 > 0LL) { | |
908 | ts->idle_sleeps++; | |
909 | ts->idle_expires = expires; | |
910 | } | |
5b39939a FW |
911 | |
912 | if (!was_stopped && ts->tick_stopped) | |
913 | ts->idle_jiffies = ts->last_jiffies; | |
914 | } | |
280f0677 FW |
915 | } |
916 | ||
917 | /** | |
918 | * tick_nohz_idle_enter - stop the idle tick from the idle task | |
919 | * | |
920 | * When the next event is more than a tick into the future, stop the idle tick | |
921 | * Called when we start the idle loop. | |
2bbb6817 | 922 | * |
1268fbc7 | 923 | * The arch is responsible of calling: |
2bbb6817 FW |
924 | * |
925 | * - rcu_idle_enter() after its last use of RCU before the CPU is put | |
926 | * to sleep. | |
927 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. | |
280f0677 | 928 | */ |
1268fbc7 | 929 | void tick_nohz_idle_enter(void) |
280f0677 FW |
930 | { |
931 | struct tick_sched *ts; | |
932 | ||
1268fbc7 FW |
933 | WARN_ON_ONCE(irqs_disabled()); |
934 | ||
0db49b72 LT |
935 | /* |
936 | * Update the idle state in the scheduler domain hierarchy | |
937 | * when tick_nohz_stop_sched_tick() is called from the idle loop. | |
938 | * State will be updated to busy during the first busy tick after | |
939 | * exiting idle. | |
940 | */ | |
941 | set_cpu_sd_state_idle(); | |
942 | ||
1268fbc7 FW |
943 | local_irq_disable(); |
944 | ||
22127e93 | 945 | ts = this_cpu_ptr(&tick_cpu_sched); |
280f0677 | 946 | ts->inidle = 1; |
19f5f736 | 947 | __tick_nohz_idle_enter(ts); |
1268fbc7 FW |
948 | |
949 | local_irq_enable(); | |
280f0677 FW |
950 | } |
951 | ||
952 | /** | |
953 | * tick_nohz_irq_exit - update next tick event from interrupt exit | |
954 | * | |
955 | * When an interrupt fires while we are idle and it doesn't cause | |
956 | * a reschedule, it may still add, modify or delete a timer, enqueue | |
957 | * an RCU callback, etc... | |
958 | * So we need to re-calculate and reprogram the next tick event. | |
959 | */ | |
960 | void tick_nohz_irq_exit(void) | |
961 | { | |
22127e93 | 962 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
280f0677 | 963 | |
14851912 | 964 | if (ts->inidle) |
5811d996 | 965 | __tick_nohz_idle_enter(ts); |
14851912 | 966 | else |
73738a95 | 967 | tick_nohz_full_update_tick(ts); |
79bf2bb3 TG |
968 | } |
969 | ||
4f86d3a8 LB |
970 | /** |
971 | * tick_nohz_get_sleep_length - return the length of the current sleep | |
972 | * | |
973 | * Called from power state control code with interrupts disabled | |
974 | */ | |
975 | ktime_t tick_nohz_get_sleep_length(void) | |
976 | { | |
22127e93 | 977 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
4f86d3a8 LB |
978 | |
979 | return ts->sleep_length; | |
980 | } | |
981 | ||
2ac0d98f FW |
982 | static void tick_nohz_account_idle_ticks(struct tick_sched *ts) |
983 | { | |
3f4724ea | 984 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
2ac0d98f | 985 | unsigned long ticks; |
3f4724ea | 986 | |
55dbdcfa | 987 | if (vtime_accounting_cpu_enabled()) |
3f4724ea | 988 | return; |
79bf2bb3 TG |
989 | /* |
990 | * We stopped the tick in idle. Update process times would miss the | |
991 | * time we slept as update_process_times does only a 1 tick | |
992 | * accounting. Enforce that this is accounted to idle ! | |
993 | */ | |
994 | ticks = jiffies - ts->idle_jiffies; | |
995 | /* | |
996 | * We might be one off. Do not randomly account a huge number of ticks! | |
997 | */ | |
79741dd3 MS |
998 | if (ticks && ticks < LONG_MAX) |
999 | account_idle_ticks(ticks); | |
1000 | #endif | |
19f5f736 FW |
1001 | } |
1002 | ||
79bf2bb3 | 1003 | /** |
280f0677 | 1004 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
79bf2bb3 TG |
1005 | * |
1006 | * Restart the idle tick when the CPU is woken up from idle | |
280f0677 FW |
1007 | * This also exit the RCU extended quiescent state. The CPU |
1008 | * can use RCU again after this function is called. | |
79bf2bb3 | 1009 | */ |
280f0677 | 1010 | void tick_nohz_idle_exit(void) |
79bf2bb3 | 1011 | { |
4a32fea9 | 1012 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
6378ddb5 | 1013 | ktime_t now; |
79bf2bb3 | 1014 | |
6378ddb5 | 1015 | local_irq_disable(); |
2bbb6817 | 1016 | |
15f827be FW |
1017 | WARN_ON_ONCE(!ts->inidle); |
1018 | ||
1019 | ts->inidle = 0; | |
1020 | ||
1021 | if (ts->idle_active || ts->tick_stopped) | |
eed3b9cf MS |
1022 | now = ktime_get(); |
1023 | ||
1024 | if (ts->idle_active) | |
e8fcaa5c | 1025 | tick_nohz_stop_idle(ts, now); |
6378ddb5 | 1026 | |
2ac0d98f | 1027 | if (ts->tick_stopped) { |
525705d1 | 1028 | tick_nohz_restart_sched_tick(ts, now, 0); |
2ac0d98f | 1029 | tick_nohz_account_idle_ticks(ts); |
6378ddb5 | 1030 | } |
79bf2bb3 | 1031 | |
79bf2bb3 TG |
1032 | local_irq_enable(); |
1033 | } | |
1034 | ||
79bf2bb3 TG |
1035 | /* |
1036 | * The nohz low res interrupt handler | |
1037 | */ | |
1038 | static void tick_nohz_handler(struct clock_event_device *dev) | |
1039 | { | |
22127e93 | 1040 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1041 | struct pt_regs *regs = get_irq_regs(); |
1042 | ktime_t now = ktime_get(); | |
1043 | ||
1044 | dev->next_event.tv64 = KTIME_MAX; | |
1045 | ||
5bb96226 | 1046 | tick_sched_do_timer(now); |
9e8f559b | 1047 | tick_sched_handle(ts, regs); |
79bf2bb3 | 1048 | |
b5e995e6 VK |
1049 | /* No need to reprogram if we are running tickless */ |
1050 | if (unlikely(ts->tick_stopped)) | |
1051 | return; | |
1052 | ||
0ff53d09 TG |
1053 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
1054 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); | |
79bf2bb3 TG |
1055 | } |
1056 | ||
bc7a34b8 TG |
1057 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) |
1058 | { | |
1059 | if (!tick_nohz_enabled) | |
1060 | return; | |
1061 | ts->nohz_mode = mode; | |
1062 | /* One update is enough */ | |
1063 | if (!test_and_set_bit(0, &tick_nohz_active)) | |
683be13a | 1064 | timers_update_migration(true); |
bc7a34b8 TG |
1065 | } |
1066 | ||
79bf2bb3 TG |
1067 | /** |
1068 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
1069 | */ | |
1070 | static void tick_nohz_switch_to_nohz(void) | |
1071 | { | |
22127e93 | 1072 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1073 | ktime_t next; |
1074 | ||
27630532 | 1075 | if (!tick_nohz_enabled) |
79bf2bb3 TG |
1076 | return; |
1077 | ||
6b442bc8 | 1078 | if (tick_switch_to_oneshot(tick_nohz_handler)) |
79bf2bb3 | 1079 | return; |
6b442bc8 | 1080 | |
79bf2bb3 TG |
1081 | /* |
1082 | * Recycle the hrtimer in ts, so we can share the | |
1083 | * hrtimer_forward with the highres code. | |
1084 | */ | |
1085 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1086 | /* Get the next period */ | |
1087 | next = tick_init_jiffy_update(); | |
1088 | ||
0ff53d09 | 1089 | hrtimer_set_expires(&ts->sched_timer, next); |
1ca8ec53 WL |
1090 | hrtimer_forward_now(&ts->sched_timer, tick_period); |
1091 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); | |
bc7a34b8 | 1092 | tick_nohz_activate(ts, NOHZ_MODE_LOWRES); |
79bf2bb3 TG |
1093 | } |
1094 | ||
fb02fbc1 TG |
1095 | /* |
1096 | * When NOHZ is enabled and the tick is stopped, we need to kick the | |
1097 | * tick timer from irq_enter() so that the jiffies update is kept | |
1098 | * alive during long running softirqs. That's ugly as hell, but | |
1099 | * correctness is key even if we need to fix the offending softirq in | |
1100 | * the first place. | |
1101 | * | |
1102 | * Note, this is different to tick_nohz_restart. We just kick the | |
1103 | * timer and do not touch the other magic bits which need to be done | |
1104 | * when idle is left. | |
1105 | */ | |
e8fcaa5c | 1106 | static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) |
fb02fbc1 | 1107 | { |
ae99286b TG |
1108 | #if 0 |
1109 | /* Switch back to 2.6.27 behaviour */ | |
eed3b9cf | 1110 | ktime_t delta; |
fb02fbc1 | 1111 | |
c4bd822e TG |
1112 | /* |
1113 | * Do not touch the tick device, when the next expiry is either | |
1114 | * already reached or less/equal than the tick period. | |
1115 | */ | |
268a3dcf | 1116 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
c4bd822e TG |
1117 | if (delta.tv64 <= tick_period.tv64) |
1118 | return; | |
1119 | ||
1120 | tick_nohz_restart(ts, now); | |
ae99286b | 1121 | #endif |
fb02fbc1 TG |
1122 | } |
1123 | ||
5acac1be | 1124 | static inline void tick_nohz_irq_enter(void) |
eed3b9cf | 1125 | { |
4a32fea9 | 1126 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
eed3b9cf MS |
1127 | ktime_t now; |
1128 | ||
1129 | if (!ts->idle_active && !ts->tick_stopped) | |
1130 | return; | |
1131 | now = ktime_get(); | |
1132 | if (ts->idle_active) | |
e8fcaa5c | 1133 | tick_nohz_stop_idle(ts, now); |
eed3b9cf MS |
1134 | if (ts->tick_stopped) { |
1135 | tick_nohz_update_jiffies(now); | |
e8fcaa5c | 1136 | tick_nohz_kick_tick(ts, now); |
eed3b9cf MS |
1137 | } |
1138 | } | |
1139 | ||
79bf2bb3 TG |
1140 | #else |
1141 | ||
1142 | static inline void tick_nohz_switch_to_nohz(void) { } | |
5acac1be | 1143 | static inline void tick_nohz_irq_enter(void) { } |
bc7a34b8 | 1144 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } |
79bf2bb3 | 1145 | |
3451d024 | 1146 | #endif /* CONFIG_NO_HZ_COMMON */ |
79bf2bb3 | 1147 | |
719254fa TG |
1148 | /* |
1149 | * Called from irq_enter to notify about the possible interruption of idle() | |
1150 | */ | |
5acac1be | 1151 | void tick_irq_enter(void) |
719254fa | 1152 | { |
e8fcaa5c | 1153 | tick_check_oneshot_broadcast_this_cpu(); |
5acac1be | 1154 | tick_nohz_irq_enter(); |
719254fa TG |
1155 | } |
1156 | ||
79bf2bb3 TG |
1157 | /* |
1158 | * High resolution timer specific code | |
1159 | */ | |
1160 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1161 | /* | |
4c9dc641 | 1162 | * We rearm the timer until we get disabled by the idle code. |
351f181f | 1163 | * Called with interrupts disabled. |
79bf2bb3 TG |
1164 | */ |
1165 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
1166 | { | |
1167 | struct tick_sched *ts = | |
1168 | container_of(timer, struct tick_sched, sched_timer); | |
79bf2bb3 TG |
1169 | struct pt_regs *regs = get_irq_regs(); |
1170 | ktime_t now = ktime_get(); | |
d3ed7824 | 1171 | |
5bb96226 | 1172 | tick_sched_do_timer(now); |
79bf2bb3 TG |
1173 | |
1174 | /* | |
1175 | * Do not call, when we are not in irq context and have | |
1176 | * no valid regs pointer | |
1177 | */ | |
9e8f559b FW |
1178 | if (regs) |
1179 | tick_sched_handle(ts, regs); | |
79bf2bb3 | 1180 | |
2a16fc93 VK |
1181 | /* No need to reprogram if we are in idle or full dynticks mode */ |
1182 | if (unlikely(ts->tick_stopped)) | |
1183 | return HRTIMER_NORESTART; | |
1184 | ||
79bf2bb3 TG |
1185 | hrtimer_forward(timer, now, tick_period); |
1186 | ||
1187 | return HRTIMER_RESTART; | |
1188 | } | |
1189 | ||
5307c955 MG |
1190 | static int sched_skew_tick; |
1191 | ||
62cf20b3 TG |
1192 | static int __init skew_tick(char *str) |
1193 | { | |
1194 | get_option(&str, &sched_skew_tick); | |
1195 | ||
1196 | return 0; | |
1197 | } | |
1198 | early_param("skew_tick", skew_tick); | |
1199 | ||
79bf2bb3 TG |
1200 | /** |
1201 | * tick_setup_sched_timer - setup the tick emulation timer | |
1202 | */ | |
1203 | void tick_setup_sched_timer(void) | |
1204 | { | |
22127e93 | 1205 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1206 | ktime_t now = ktime_get(); |
1207 | ||
1208 | /* | |
1209 | * Emulate tick processing via per-CPU hrtimers: | |
1210 | */ | |
1211 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1212 | ts->sched_timer.function = tick_sched_timer; | |
79bf2bb3 | 1213 | |
3704540b | 1214 | /* Get the next period (per cpu) */ |
cc584b21 | 1215 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
79bf2bb3 | 1216 | |
9c3f9e28 | 1217 | /* Offset the tick to avert jiffies_lock contention. */ |
5307c955 MG |
1218 | if (sched_skew_tick) { |
1219 | u64 offset = ktime_to_ns(tick_period) >> 1; | |
1220 | do_div(offset, num_possible_cpus()); | |
1221 | offset *= smp_processor_id(); | |
1222 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | |
1223 | } | |
1224 | ||
afc08b15 TG |
1225 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
1226 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); | |
bc7a34b8 | 1227 | tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); |
79bf2bb3 | 1228 | } |
3c4fbe5e | 1229 | #endif /* HIGH_RES_TIMERS */ |
79bf2bb3 | 1230 | |
3451d024 | 1231 | #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1232 | void tick_cancel_sched_timer(int cpu) |
1233 | { | |
1234 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
1235 | ||
3c4fbe5e | 1236 | # ifdef CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1237 | if (ts->sched_timer.base) |
1238 | hrtimer_cancel(&ts->sched_timer); | |
3c4fbe5e | 1239 | # endif |
a7901766 | 1240 | |
4b0c0f29 | 1241 | memset(ts, 0, sizeof(*ts)); |
79bf2bb3 | 1242 | } |
3c4fbe5e | 1243 | #endif |
79bf2bb3 TG |
1244 | |
1245 | /** | |
1246 | * Async notification about clocksource changes | |
1247 | */ | |
1248 | void tick_clock_notify(void) | |
1249 | { | |
1250 | int cpu; | |
1251 | ||
1252 | for_each_possible_cpu(cpu) | |
1253 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
1254 | } | |
1255 | ||
1256 | /* | |
1257 | * Async notification about clock event changes | |
1258 | */ | |
1259 | void tick_oneshot_notify(void) | |
1260 | { | |
22127e93 | 1261 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1262 | |
1263 | set_bit(0, &ts->check_clocks); | |
1264 | } | |
1265 | ||
1266 | /** | |
1267 | * Check, if a change happened, which makes oneshot possible. | |
1268 | * | |
1269 | * Called cyclic from the hrtimer softirq (driven by the timer | |
1270 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
1271 | * mode, because high resolution timers are disabled (either compile | |
6b442bc8 | 1272 | * or runtime). Called with interrupts disabled. |
79bf2bb3 TG |
1273 | */ |
1274 | int tick_check_oneshot_change(int allow_nohz) | |
1275 | { | |
22127e93 | 1276 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1277 | |
1278 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
1279 | return 0; | |
1280 | ||
1281 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
1282 | return 0; | |
1283 | ||
cf4fc6cb | 1284 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
79bf2bb3 TG |
1285 | return 0; |
1286 | ||
1287 | if (!allow_nohz) | |
1288 | return 1; | |
1289 | ||
1290 | tick_nohz_switch_to_nohz(); | |
1291 | return 0; | |
1292 | } |