]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/time/tick-sched.c
tick: sched: Force tick interrupt and get rid of softirq magic
[mirror_ubuntu-bionic-kernel.git] / kernel / time / tick-sched.c
CommitLineData
79bf2bb3
TG
1/*
2 * linux/kernel/time/tick-sched.c
3 *
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 *
8 * No idle tick implementation for low and high resolution timers
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
b10db7f0 12 * Distribute under GPLv2.
79bf2bb3
TG
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/kernel_stat.h>
19#include <linux/percpu.h>
20#include <linux/profile.h>
21#include <linux/sched.h>
8083e4ad 22#include <linux/module.h>
00b42959 23#include <linux/irq_work.h>
9014c45d
FW
24#include <linux/posix-timers.h>
25#include <linux/perf_event.h>
2e709338 26#include <linux/context_tracking.h>
79bf2bb3 27
9e203bcc
DM
28#include <asm/irq_regs.h>
29
79bf2bb3
TG
30#include "tick-internal.h"
31
cb41a290
FW
32#include <trace/events/timer.h>
33
79bf2bb3
TG
34/*
35 * Per cpu nohz control structure
36 */
c1797baf 37static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
79bf2bb3
TG
38
39/*
d6ad4187 40 * The time, when the last jiffy update happened. Protected by jiffies_lock.
79bf2bb3
TG
41 */
42static ktime_t last_jiffies_update;
43
289f480a
IM
44struct tick_sched *tick_get_tick_sched(int cpu)
45{
46 return &per_cpu(tick_cpu_sched, cpu);
47}
48
79bf2bb3
TG
49/*
50 * Must be called with interrupts disabled !
51 */
52static void tick_do_update_jiffies64(ktime_t now)
53{
54 unsigned long ticks = 0;
55 ktime_t delta;
56
7a14ce1d 57 /*
d6ad4187 58 * Do a quick check without holding jiffies_lock:
7a14ce1d
IM
59 */
60 delta = ktime_sub(now, last_jiffies_update);
61 if (delta.tv64 < tick_period.tv64)
62 return;
63
d6ad4187
JS
64 /* Reevalute with jiffies_lock held */
65 write_seqlock(&jiffies_lock);
79bf2bb3
TG
66
67 delta = ktime_sub(now, last_jiffies_update);
68 if (delta.tv64 >= tick_period.tv64) {
69
70 delta = ktime_sub(delta, tick_period);
71 last_jiffies_update = ktime_add(last_jiffies_update,
72 tick_period);
73
74 /* Slow path for long timeouts */
75 if (unlikely(delta.tv64 >= tick_period.tv64)) {
76 s64 incr = ktime_to_ns(tick_period);
77
78 ticks = ktime_divns(delta, incr);
79
80 last_jiffies_update = ktime_add_ns(last_jiffies_update,
81 incr * ticks);
82 }
83 do_timer(++ticks);
49d670fb
TG
84
85 /* Keep the tick_next_period variable up to date */
86 tick_next_period = ktime_add(last_jiffies_update, tick_period);
03e6bdc5
VK
87 } else {
88 write_sequnlock(&jiffies_lock);
89 return;
79bf2bb3 90 }
d6ad4187 91 write_sequnlock(&jiffies_lock);
47a1b796 92 update_wall_time();
79bf2bb3
TG
93}
94
95/*
96 * Initialize and return retrieve the jiffies update.
97 */
98static ktime_t tick_init_jiffy_update(void)
99{
100 ktime_t period;
101
d6ad4187 102 write_seqlock(&jiffies_lock);
79bf2bb3
TG
103 /* Did we start the jiffies update yet ? */
104 if (last_jiffies_update.tv64 == 0)
105 last_jiffies_update = tick_next_period;
106 period = last_jiffies_update;
d6ad4187 107 write_sequnlock(&jiffies_lock);
79bf2bb3
TG
108 return period;
109}
110
5bb96226
FW
111
112static void tick_sched_do_timer(ktime_t now)
113{
114 int cpu = smp_processor_id();
115
3451d024 116#ifdef CONFIG_NO_HZ_COMMON
5bb96226
FW
117 /*
118 * Check if the do_timer duty was dropped. We don't care about
119 * concurrency: This happens only when the cpu in charge went
120 * into a long sleep. If two cpus happen to assign themself to
121 * this duty, then the jiffies update is still serialized by
9c3f9e28 122 * jiffies_lock.
5bb96226 123 */
a382bf93 124 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
c5bfece2 125 && !tick_nohz_full_cpu(cpu))
5bb96226
FW
126 tick_do_timer_cpu = cpu;
127#endif
128
129 /* Check, if the jiffies need an update */
130 if (tick_do_timer_cpu == cpu)
131 tick_do_update_jiffies64(now);
132}
133
9e8f559b
FW
134static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
135{
3451d024 136#ifdef CONFIG_NO_HZ_COMMON
9e8f559b
FW
137 /*
138 * When we are idle and the tick is stopped, we have to touch
139 * the watchdog as we might not schedule for a really long
140 * time. This happens on complete idle SMP systems while
141 * waiting on the login prompt. We also increment the "start of
142 * idle" jiffy stamp so the idle accounting adjustment we do
143 * when we go busy again does not account too much ticks.
144 */
145 if (ts->tick_stopped) {
146 touch_softlockup_watchdog();
147 if (is_idle_task(current))
148 ts->idle_jiffies++;
149 }
94a57140 150#endif
9e8f559b
FW
151 update_process_times(user_mode(regs));
152 profile_tick(CPU_PROFILING);
153}
154
c5bfece2 155#ifdef CONFIG_NO_HZ_FULL
460775df 156cpumask_var_t tick_nohz_full_mask;
c0f489d2 157cpumask_var_t housekeeping_mask;
73867dcd 158bool tick_nohz_full_running;
a831881b 159
9014c45d
FW
160static bool can_stop_full_tick(void)
161{
162 WARN_ON_ONCE(!irqs_disabled());
163
cb41a290
FW
164 if (!sched_can_stop_tick()) {
165 trace_tick_stop(0, "more than 1 task in runqueue\n");
9014c45d 166 return false;
cb41a290 167 }
9014c45d 168
cb41a290
FW
169 if (!posix_cpu_timers_can_stop_tick(current)) {
170 trace_tick_stop(0, "posix timers running\n");
9014c45d 171 return false;
cb41a290 172 }
9014c45d 173
cb41a290
FW
174 if (!perf_event_can_stop_tick()) {
175 trace_tick_stop(0, "perf events running\n");
9014c45d 176 return false;
cb41a290 177 }
9014c45d
FW
178
179 /* sched_clock_tick() needs us? */
180#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
181 /*
182 * TODO: kick full dynticks CPUs when
183 * sched_clock_stable is set.
184 */
35af99e6 185 if (!sched_clock_stable()) {
cb41a290 186 trace_tick_stop(0, "unstable sched clock\n");
e12d0271
SR
187 /*
188 * Don't allow the user to think they can get
189 * full NO_HZ with this machine.
190 */
73867dcd 191 WARN_ONCE(tick_nohz_full_running,
543487c7 192 "NO_HZ FULL will not work with unstable sched clock");
9014c45d 193 return false;
cb41a290 194 }
9014c45d
FW
195#endif
196
197 return true;
198}
199
200static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
201
76c24fb0
FW
202/*
203 * Re-evaluate the need for the tick on the current CPU
204 * and restart it if necessary.
205 */
d13508f9 206void __tick_nohz_full_check(void)
76c24fb0 207{
22127e93 208 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
9014c45d
FW
209
210 if (tick_nohz_full_cpu(smp_processor_id())) {
211 if (ts->tick_stopped && !is_idle_task(current)) {
212 if (!can_stop_full_tick())
213 tick_nohz_restart_sched_tick(ts, ktime_get());
214 }
215 }
76c24fb0
FW
216}
217
218static void nohz_full_kick_work_func(struct irq_work *work)
219{
d13508f9 220 __tick_nohz_full_check();
76c24fb0
FW
221}
222
223static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
224 .func = nohz_full_kick_work_func,
225};
226
40bea039
FW
227/*
228 * Kick this CPU if it's full dynticks in order to force it to
229 * re-evaluate its dependency on the tick and restart it if necessary.
230 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
231 * is NMI safe.
232 */
233void tick_nohz_full_kick(void)
234{
235 if (!tick_nohz_full_cpu(smp_processor_id()))
236 return;
237
56e4dea8 238 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
40bea039
FW
239}
240
76c24fb0 241/*
3d36aebc 242 * Kick the CPU if it's full dynticks in order to force it to
76c24fb0
FW
243 * re-evaluate its dependency on the tick and restart it if necessary.
244 */
3d36aebc 245void tick_nohz_full_kick_cpu(int cpu)
76c24fb0 246{
3d36aebc
FW
247 if (!tick_nohz_full_cpu(cpu))
248 return;
249
250 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
76c24fb0
FW
251}
252
253static void nohz_full_kick_ipi(void *info)
254{
d13508f9 255 __tick_nohz_full_check();
76c24fb0
FW
256}
257
258/*
259 * Kick all full dynticks CPUs in order to force these to re-evaluate
260 * their dependency on the tick and restart it if necessary.
261 */
262void tick_nohz_full_kick_all(void)
263{
73867dcd 264 if (!tick_nohz_full_running)
76c24fb0
FW
265 return;
266
267 preempt_disable();
73867dcd 268 smp_call_function_many(tick_nohz_full_mask,
76c24fb0 269 nohz_full_kick_ipi, NULL, false);
c2e7fcf5 270 tick_nohz_full_kick();
76c24fb0
FW
271 preempt_enable();
272}
273
99e5ada9
FW
274/*
275 * Re-evaluate the need for the tick as we switch the current task.
276 * It might need the tick due to per task/process properties:
277 * perf events, posix cpu timers, ...
278 */
d13508f9 279void __tick_nohz_task_switch(struct task_struct *tsk)
99e5ada9
FW
280{
281 unsigned long flags;
282
99e5ada9
FW
283 local_irq_save(flags);
284
6296ace4
LZ
285 if (!tick_nohz_full_cpu(smp_processor_id()))
286 goto out;
287
99e5ada9
FW
288 if (tick_nohz_tick_stopped() && !can_stop_full_tick())
289 tick_nohz_full_kick();
290
6296ace4 291out:
99e5ada9
FW
292 local_irq_restore(flags);
293}
294
a831881b 295/* Parse the boot-time nohz CPU list from the kernel parameters. */
c5bfece2 296static int __init tick_nohz_full_setup(char *str)
a831881b 297{
73867dcd
FW
298 alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
299 if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
c5bfece2 300 pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
4327b15f 301 free_bootmem_cpumask_var(tick_nohz_full_mask);
0453b435
FW
302 return 1;
303 }
73867dcd 304 tick_nohz_full_running = true;
0453b435 305
a831881b
FW
306 return 1;
307}
c5bfece2 308__setup("nohz_full=", tick_nohz_full_setup);
a831881b 309
0db0628d 310static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
a382bf93
FW
311 unsigned long action,
312 void *hcpu)
313{
314 unsigned int cpu = (unsigned long)hcpu;
315
316 switch (action & ~CPU_TASKS_FROZEN) {
317 case CPU_DOWN_PREPARE:
318 /*
319 * If we handle the timekeeping duty for full dynticks CPUs,
320 * we can't safely shutdown that CPU.
321 */
73867dcd 322 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
1a7f829f 323 return NOTIFY_BAD;
a382bf93
FW
324 break;
325 }
326 return NOTIFY_OK;
327}
328
f98823ac
FW
329static int tick_nohz_init_all(void)
330{
331 int err = -1;
332
333#ifdef CONFIG_NO_HZ_FULL_ALL
73867dcd 334 if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
4327b15f 335 WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
c0f489d2
PM
336 return err;
337 }
f98823ac 338 err = 0;
73867dcd 339 cpumask_setall(tick_nohz_full_mask);
73867dcd 340 tick_nohz_full_running = true;
f98823ac
FW
341#endif
342 return err;
343}
344
d1e43fa5 345void __init tick_nohz_init(void)
a831881b 346{
d1e43fa5
FW
347 int cpu;
348
73867dcd 349 if (!tick_nohz_full_running) {
f98823ac
FW
350 if (tick_nohz_init_all() < 0)
351 return;
352 }
d1e43fa5 353
4327b15f
FW
354 if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
355 WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
356 cpumask_clear(tick_nohz_full_mask);
357 tick_nohz_full_running = false;
358 return;
359 }
360
9b01f5bf
FW
361 /*
362 * Full dynticks uses irq work to drive the tick rescheduling on safe
363 * locking contexts. But then we need irq work to raise its own
364 * interrupts to avoid circular dependency on the tick
365 */
366 if (!arch_irq_work_has_interrupt()) {
367 pr_warning("NO_HZ: Can't run full dynticks because arch doesn't "
368 "support irq work self-IPIs\n");
369 cpumask_clear(tick_nohz_full_mask);
370 cpumask_copy(housekeeping_mask, cpu_possible_mask);
371 tick_nohz_full_running = false;
372 return;
373 }
374
4327b15f
FW
375 cpu = smp_processor_id();
376
377 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
378 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
379 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
380 }
381
382 cpumask_andnot(housekeeping_mask,
383 cpu_possible_mask, tick_nohz_full_mask);
384
73867dcd 385 for_each_cpu(cpu, tick_nohz_full_mask)
2e709338
FW
386 context_tracking_cpu_set(cpu);
387
d1e43fa5 388 cpu_notifier(tick_nohz_cpu_down_callback, 0);
ffda22c1
TH
389 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
390 cpumask_pr_args(tick_nohz_full_mask));
a831881b 391}
a831881b
FW
392#endif
393
79bf2bb3
TG
394/*
395 * NOHZ - aka dynamic tick functionality
396 */
3451d024 397#ifdef CONFIG_NO_HZ_COMMON
79bf2bb3
TG
398/*
399 * NO HZ enabled ?
400 */
d689fe22
TG
401static int tick_nohz_enabled __read_mostly = 1;
402int tick_nohz_active __read_mostly;
79bf2bb3
TG
403/*
404 * Enable / Disable tickless mode
405 */
406static int __init setup_tick_nohz(char *str)
407{
408 if (!strcmp(str, "off"))
409 tick_nohz_enabled = 0;
410 else if (!strcmp(str, "on"))
411 tick_nohz_enabled = 1;
412 else
413 return 0;
414 return 1;
415}
416
417__setup("nohz=", setup_tick_nohz);
418
c1797baf
TG
419int tick_nohz_tick_stopped(void)
420{
421 return __this_cpu_read(tick_cpu_sched.tick_stopped);
422}
423
79bf2bb3
TG
424/**
425 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
426 *
427 * Called from interrupt entry when the CPU was idle
428 *
429 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
430 * must be updated. Otherwise an interrupt handler could use a stale jiffy
431 * value. We do this unconditionally on any cpu, as we don't know whether the
432 * cpu, which has the update task assigned is in a long sleep.
433 */
eed3b9cf 434static void tick_nohz_update_jiffies(ktime_t now)
79bf2bb3 435{
79bf2bb3 436 unsigned long flags;
79bf2bb3 437
e8fcaa5c 438 __this_cpu_write(tick_cpu_sched.idle_waketime, now);
79bf2bb3
TG
439
440 local_irq_save(flags);
441 tick_do_update_jiffies64(now);
442 local_irq_restore(flags);
02ff3755
IM
443
444 touch_softlockup_watchdog();
79bf2bb3
TG
445}
446
595aac48
AV
447/*
448 * Updates the per cpu time idle statistics counters
449 */
8d63bf94 450static void
8c215bd3 451update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
6378ddb5 452{
eed3b9cf 453 ktime_t delta;
6378ddb5 454
595aac48
AV
455 if (ts->idle_active) {
456 delta = ktime_sub(now, ts->idle_entrytime);
8c215bd3 457 if (nr_iowait_cpu(cpu) > 0)
0224cf4c 458 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
6beea0cd
MH
459 else
460 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
8c7b09f4 461 ts->idle_entrytime = now;
595aac48 462 }
8d63bf94 463
e0e37c20 464 if (last_update_time)
8d63bf94
AV
465 *last_update_time = ktime_to_us(now);
466
595aac48
AV
467}
468
e8fcaa5c 469static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
595aac48 470{
e8fcaa5c 471 update_ts_time_stats(smp_processor_id(), ts, now, NULL);
eed3b9cf 472 ts->idle_active = 0;
56c7426b 473
eed3b9cf 474 sched_clock_idle_wakeup_event(0);
6378ddb5
VP
475}
476
e8fcaa5c 477static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
6378ddb5 478{
430ee881 479 ktime_t now = ktime_get();
595aac48 480
6378ddb5
VP
481 ts->idle_entrytime = now;
482 ts->idle_active = 1;
56c7426b 483 sched_clock_idle_sleep_event();
6378ddb5
VP
484 return now;
485}
486
b1f724c3
AV
487/**
488 * get_cpu_idle_time_us - get the total idle time of a cpu
489 * @cpu: CPU number to query
09a1d34f
MH
490 * @last_update_time: variable to store update time in. Do not update
491 * counters if NULL.
b1f724c3
AV
492 *
493 * Return the cummulative idle time (since boot) for a given
6beea0cd 494 * CPU, in microseconds.
b1f724c3
AV
495 *
496 * This time is measured via accounting rather than sampling,
497 * and is as accurate as ktime_get() is.
498 *
499 * This function returns -1 if NOHZ is not enabled.
500 */
6378ddb5
VP
501u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
502{
503 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
09a1d34f 504 ktime_t now, idle;
6378ddb5 505
d689fe22 506 if (!tick_nohz_active)
8083e4ad 507 return -1;
508
09a1d34f
MH
509 now = ktime_get();
510 if (last_update_time) {
511 update_ts_time_stats(cpu, ts, now, last_update_time);
512 idle = ts->idle_sleeptime;
513 } else {
514 if (ts->idle_active && !nr_iowait_cpu(cpu)) {
515 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
516
517 idle = ktime_add(ts->idle_sleeptime, delta);
518 } else {
519 idle = ts->idle_sleeptime;
520 }
521 }
522
523 return ktime_to_us(idle);
8083e4ad 524
6378ddb5 525}
8083e4ad 526EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
6378ddb5 527
6beea0cd 528/**
0224cf4c
AV
529 * get_cpu_iowait_time_us - get the total iowait time of a cpu
530 * @cpu: CPU number to query
09a1d34f
MH
531 * @last_update_time: variable to store update time in. Do not update
532 * counters if NULL.
0224cf4c
AV
533 *
534 * Return the cummulative iowait time (since boot) for a given
535 * CPU, in microseconds.
536 *
537 * This time is measured via accounting rather than sampling,
538 * and is as accurate as ktime_get() is.
539 *
540 * This function returns -1 if NOHZ is not enabled.
541 */
542u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
543{
544 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
09a1d34f 545 ktime_t now, iowait;
0224cf4c 546
d689fe22 547 if (!tick_nohz_active)
0224cf4c
AV
548 return -1;
549
09a1d34f
MH
550 now = ktime_get();
551 if (last_update_time) {
552 update_ts_time_stats(cpu, ts, now, last_update_time);
553 iowait = ts->iowait_sleeptime;
554 } else {
555 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
556 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
0224cf4c 557
09a1d34f
MH
558 iowait = ktime_add(ts->iowait_sleeptime, delta);
559 } else {
560 iowait = ts->iowait_sleeptime;
561 }
562 }
0224cf4c 563
09a1d34f 564 return ktime_to_us(iowait);
0224cf4c
AV
565}
566EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
567
0ff53d09
TG
568static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
569{
570 hrtimer_cancel(&ts->sched_timer);
571 hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
572
573 /* Forward the time to expire in the future */
574 hrtimer_forward(&ts->sched_timer, now, tick_period);
575
576 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
577 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
578 else
579 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
580}
581
84bf1bcc
FW
582static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
583 ktime_t now, int cpu)
79bf2bb3 584{
280f0677 585 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
84bf1bcc 586 ktime_t last_update, expires, ret = { .tv64 = 0 };
aa9b1630 587 unsigned long rcu_delta_jiffies;
22127e93 588 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
98962465 589 u64 time_delta;
79bf2bb3 590
855a0fc3
FW
591 time_delta = timekeeping_max_deferment();
592
79bf2bb3
TG
593 /* Read jiffies and the time when jiffies were updated last */
594 do {
d6ad4187 595 seq = read_seqbegin(&jiffies_lock);
79bf2bb3
TG
596 last_update = last_jiffies_update;
597 last_jiffies = jiffies;
d6ad4187 598 } while (read_seqretry(&jiffies_lock, seq));
79bf2bb3 599
aa6da514 600 if (rcu_needs_cpu(&rcu_delta_jiffies) ||
fe0f4976 601 arch_needs_cpu() || irq_work_needs_cpu()) {
3c5d92a0 602 next_jiffies = last_jiffies + 1;
6ba9b346 603 delta_jiffies = 1;
3c5d92a0
MS
604 } else {
605 /* Get the next timer wheel timer */
606 next_jiffies = get_next_timer_interrupt(last_jiffies);
607 delta_jiffies = next_jiffies - last_jiffies;
aa9b1630
PM
608 if (rcu_delta_jiffies < delta_jiffies) {
609 next_jiffies = last_jiffies + rcu_delta_jiffies;
610 delta_jiffies = rcu_delta_jiffies;
611 }
3c5d92a0 612 }
47aa8b6c 613
79bf2bb3 614 /*
47aa8b6c
IM
615 * Do not stop the tick, if we are only one off (or less)
616 * or if the cpu is required for RCU:
79bf2bb3 617 */
47aa8b6c 618 if (!ts->tick_stopped && delta_jiffies <= 1)
79bf2bb3
TG
619 goto out;
620
621 /* Schedule the tick, if we are at least one jiffie off */
622 if ((long)delta_jiffies >= 1) {
623
00147449
WR
624 /*
625 * If this cpu is the one which updates jiffies, then
626 * give up the assignment and let it be taken by the
627 * cpu which runs the tick timer next, which might be
628 * this cpu as well. If we don't drop this here the
629 * jiffies might be stale and do_timer() never
27185016
TG
630 * invoked. Keep track of the fact that it was the one
631 * which had the do_timer() duty last. If this cpu is
632 * the one which had the do_timer() duty last, we
633 * limit the sleep time to the timekeeping
634 * max_deferement value which we retrieved
635 * above. Otherwise we can sleep as long as we want.
00147449 636 */
27185016 637 if (cpu == tick_do_timer_cpu) {
00147449 638 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
27185016
TG
639 ts->do_timer_last = 1;
640 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
641 time_delta = KTIME_MAX;
642 ts->do_timer_last = 0;
643 } else if (!ts->do_timer_last) {
644 time_delta = KTIME_MAX;
645 }
646
265f22a9
FW
647#ifdef CONFIG_NO_HZ_FULL
648 if (!ts->inidle) {
649 time_delta = min(time_delta,
650 scheduler_tick_max_deferment());
651 }
652#endif
653
00147449 654 /*
98962465
JH
655 * calculate the expiry time for the next timer wheel
656 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
657 * that there is no timer pending or at least extremely
658 * far into the future (12 days for HZ=1000). In this
659 * case we set the expiry to the end of time.
660 */
661 if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
662 /*
663 * Calculate the time delta for the next timer event.
664 * If the time delta exceeds the maximum time delta
665 * permitted by the current clocksource then adjust
666 * the time delta accordingly to ensure the
667 * clocksource does not wrap.
668 */
669 time_delta = min_t(u64, time_delta,
670 tick_period.tv64 * delta_jiffies);
98962465 671 }
00147449 672
27185016
TG
673 if (time_delta < KTIME_MAX)
674 expires = ktime_add_ns(last_update, time_delta);
675 else
676 expires.tv64 = KTIME_MAX;
00147449 677
00147449
WR
678 /* Skip reprogram of event if its not changed */
679 if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
680 goto out;
681
84bf1bcc
FW
682 ret = expires;
683
79bf2bb3
TG
684 /*
685 * nohz_stop_sched_tick can be called several times before
686 * the nohz_restart_sched_tick is called. This happens when
687 * interrupts arrive which do not cause a reschedule. In the
688 * first call we save the current tick time, so we can restart
689 * the scheduler tick in nohz_restart_sched_tick.
690 */
691 if (!ts->tick_stopped) {
c1cc017c 692 nohz_balance_enter_idle(cpu);
5167e8d5 693 calc_load_enter_idle();
46cb4b7c 694
f5d411c9 695 ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
79bf2bb3 696 ts->tick_stopped = 1;
cb41a290 697 trace_tick_stop(1, " ");
79bf2bb3 698 }
d3ed7824 699
eaad084b 700 /*
98962465
JH
701 * If the expiration time == KTIME_MAX, then
702 * in this case we simply stop the tick timer.
eaad084b 703 */
98962465 704 if (unlikely(expires.tv64 == KTIME_MAX)) {
eaad084b
TG
705 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
706 hrtimer_cancel(&ts->sched_timer);
707 goto out;
0ff53d09 708 }
eaad084b 709
0ff53d09
TG
710 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
711 hrtimer_start(&ts->sched_timer, expires,
712 HRTIMER_MODE_ABS_PINNED);
713 else
714 tick_program_event(expires, 1);
715 } else {
716 /* Tick is stopped, but required now. Enforce it */
717 tick_nohz_restart(ts, now);
79bf2bb3 718 }
0ff53d09 719
79bf2bb3
TG
720out:
721 ts->next_jiffies = next_jiffies;
722 ts->last_jiffies = last_jiffies;
4f86d3a8 723 ts->sleep_length = ktime_sub(dev->next_event, now);
84bf1bcc
FW
724
725 return ret;
280f0677
FW
726}
727
5811d996
FW
728static void tick_nohz_full_stop_tick(struct tick_sched *ts)
729{
730#ifdef CONFIG_NO_HZ_FULL
e9a2eb40 731 int cpu = smp_processor_id();
5811d996 732
e9a2eb40
AS
733 if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
734 return;
5811d996 735
e9a2eb40
AS
736 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
737 return;
5811d996 738
e9a2eb40
AS
739 if (!can_stop_full_tick())
740 return;
5811d996 741
e9a2eb40 742 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
5811d996
FW
743#endif
744}
745
5b39939a
FW
746static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
747{
748 /*
749 * If this cpu is offline and it is the one which updates
750 * jiffies, then give up the assignment and let it be taken by
751 * the cpu which runs the tick timer next. If we don't drop
752 * this here the jiffies might be stale and do_timer() never
753 * invoked.
754 */
755 if (unlikely(!cpu_online(cpu))) {
756 if (cpu == tick_do_timer_cpu)
757 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
f7ea0fd6 758 return false;
5b39939a
FW
759 }
760
0e576acb
TG
761 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
762 ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
5b39939a 763 return false;
0e576acb 764 }
5b39939a
FW
765
766 if (need_resched())
767 return false;
768
769 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
770 static int ratelimit;
771
803b0eba
PM
772 if (ratelimit < 10 &&
773 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
cfea7d7e
RV
774 pr_warn("NOHZ: local_softirq_pending %02x\n",
775 (unsigned int) local_softirq_pending());
5b39939a
FW
776 ratelimit++;
777 }
778 return false;
779 }
780
460775df 781 if (tick_nohz_full_enabled()) {
a382bf93
FW
782 /*
783 * Keep the tick alive to guarantee timekeeping progression
784 * if there are full dynticks CPUs around
785 */
786 if (tick_do_timer_cpu == cpu)
787 return false;
788 /*
789 * Boot safety: make sure the timekeeping duty has been
790 * assigned before entering dyntick-idle mode,
791 */
792 if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
793 return false;
794 }
795
5b39939a
FW
796 return true;
797}
798
19f5f736
FW
799static void __tick_nohz_idle_enter(struct tick_sched *ts)
800{
84bf1bcc 801 ktime_t now, expires;
5b39939a 802 int cpu = smp_processor_id();
19f5f736 803
e8fcaa5c 804 now = tick_nohz_start_idle(ts);
2ac0d98f 805
5b39939a
FW
806 if (can_stop_idle_tick(cpu, ts)) {
807 int was_stopped = ts->tick_stopped;
808
809 ts->idle_calls++;
84bf1bcc
FW
810
811 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
812 if (expires.tv64 > 0LL) {
813 ts->idle_sleeps++;
814 ts->idle_expires = expires;
815 }
5b39939a
FW
816
817 if (!was_stopped && ts->tick_stopped)
818 ts->idle_jiffies = ts->last_jiffies;
819 }
280f0677
FW
820}
821
822/**
823 * tick_nohz_idle_enter - stop the idle tick from the idle task
824 *
825 * When the next event is more than a tick into the future, stop the idle tick
826 * Called when we start the idle loop.
2bbb6817 827 *
1268fbc7 828 * The arch is responsible of calling:
2bbb6817
FW
829 *
830 * - rcu_idle_enter() after its last use of RCU before the CPU is put
831 * to sleep.
832 * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
280f0677 833 */
1268fbc7 834void tick_nohz_idle_enter(void)
280f0677
FW
835{
836 struct tick_sched *ts;
837
1268fbc7
FW
838 WARN_ON_ONCE(irqs_disabled());
839
0db49b72
LT
840 /*
841 * Update the idle state in the scheduler domain hierarchy
842 * when tick_nohz_stop_sched_tick() is called from the idle loop.
843 * State will be updated to busy during the first busy tick after
844 * exiting idle.
845 */
846 set_cpu_sd_state_idle();
847
1268fbc7
FW
848 local_irq_disable();
849
22127e93 850 ts = this_cpu_ptr(&tick_cpu_sched);
280f0677 851 ts->inidle = 1;
19f5f736 852 __tick_nohz_idle_enter(ts);
1268fbc7
FW
853
854 local_irq_enable();
280f0677
FW
855}
856
857/**
858 * tick_nohz_irq_exit - update next tick event from interrupt exit
859 *
860 * When an interrupt fires while we are idle and it doesn't cause
861 * a reschedule, it may still add, modify or delete a timer, enqueue
862 * an RCU callback, etc...
863 * So we need to re-calculate and reprogram the next tick event.
864 */
865void tick_nohz_irq_exit(void)
866{
22127e93 867 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
280f0677 868
14851912 869 if (ts->inidle)
5811d996 870 __tick_nohz_idle_enter(ts);
14851912 871 else
5811d996 872 tick_nohz_full_stop_tick(ts);
79bf2bb3
TG
873}
874
4f86d3a8
LB
875/**
876 * tick_nohz_get_sleep_length - return the length of the current sleep
877 *
878 * Called from power state control code with interrupts disabled
879 */
880ktime_t tick_nohz_get_sleep_length(void)
881{
22127e93 882 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
4f86d3a8
LB
883
884 return ts->sleep_length;
885}
886
19f5f736 887static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
79bf2bb3 888{
79bf2bb3 889 /* Update jiffies first */
79bf2bb3 890 tick_do_update_jiffies64(now);
5aaa0b7a 891 update_cpu_load_nohz();
79bf2bb3 892
749c8814 893 calc_load_exit_idle();
2ac0d98f
FW
894 touch_softlockup_watchdog();
895 /*
896 * Cancel the scheduled timer and restore the tick
897 */
898 ts->tick_stopped = 0;
899 ts->idle_exittime = now;
900
901 tick_nohz_restart(ts, now);
902}
903
904static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
905{
3f4724ea 906#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
2ac0d98f 907 unsigned long ticks;
3f4724ea
FW
908
909 if (vtime_accounting_enabled())
910 return;
79bf2bb3
TG
911 /*
912 * We stopped the tick in idle. Update process times would miss the
913 * time we slept as update_process_times does only a 1 tick
914 * accounting. Enforce that this is accounted to idle !
915 */
916 ticks = jiffies - ts->idle_jiffies;
917 /*
918 * We might be one off. Do not randomly account a huge number of ticks!
919 */
79741dd3
MS
920 if (ticks && ticks < LONG_MAX)
921 account_idle_ticks(ticks);
922#endif
19f5f736
FW
923}
924
79bf2bb3 925/**
280f0677 926 * tick_nohz_idle_exit - restart the idle tick from the idle task
79bf2bb3
TG
927 *
928 * Restart the idle tick when the CPU is woken up from idle
280f0677
FW
929 * This also exit the RCU extended quiescent state. The CPU
930 * can use RCU again after this function is called.
79bf2bb3 931 */
280f0677 932void tick_nohz_idle_exit(void)
79bf2bb3 933{
4a32fea9 934 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
6378ddb5 935 ktime_t now;
79bf2bb3 936
6378ddb5 937 local_irq_disable();
2bbb6817 938
15f827be
FW
939 WARN_ON_ONCE(!ts->inidle);
940
941 ts->inidle = 0;
942
943 if (ts->idle_active || ts->tick_stopped)
eed3b9cf
MS
944 now = ktime_get();
945
946 if (ts->idle_active)
e8fcaa5c 947 tick_nohz_stop_idle(ts, now);
6378ddb5 948
2ac0d98f 949 if (ts->tick_stopped) {
19f5f736 950 tick_nohz_restart_sched_tick(ts, now);
2ac0d98f 951 tick_nohz_account_idle_ticks(ts);
6378ddb5 952 }
79bf2bb3 953
79bf2bb3
TG
954 local_irq_enable();
955}
956
79bf2bb3
TG
957/*
958 * The nohz low res interrupt handler
959 */
960static void tick_nohz_handler(struct clock_event_device *dev)
961{
22127e93 962 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
79bf2bb3
TG
963 struct pt_regs *regs = get_irq_regs();
964 ktime_t now = ktime_get();
965
966 dev->next_event.tv64 = KTIME_MAX;
967
5bb96226 968 tick_sched_do_timer(now);
9e8f559b 969 tick_sched_handle(ts, regs);
79bf2bb3 970
b5e995e6
VK
971 /* No need to reprogram if we are running tickless */
972 if (unlikely(ts->tick_stopped))
973 return;
974
0ff53d09
TG
975 hrtimer_forward(&ts->sched_timer, now, tick_period);
976 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
79bf2bb3
TG
977}
978
979/**
980 * tick_nohz_switch_to_nohz - switch to nohz mode
981 */
982static void tick_nohz_switch_to_nohz(void)
983{
22127e93 984 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
79bf2bb3
TG
985 ktime_t next;
986
27630532 987 if (!tick_nohz_enabled)
79bf2bb3
TG
988 return;
989
990 local_irq_disable();
991 if (tick_switch_to_oneshot(tick_nohz_handler)) {
992 local_irq_enable();
993 return;
994 }
d689fe22 995 tick_nohz_active = 1;
79bf2bb3
TG
996 ts->nohz_mode = NOHZ_MODE_LOWRES;
997
998 /*
999 * Recycle the hrtimer in ts, so we can share the
1000 * hrtimer_forward with the highres code.
1001 */
1002 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1003 /* Get the next period */
1004 next = tick_init_jiffy_update();
1005
0ff53d09
TG
1006 hrtimer_forward_now(&ts->sched_timer, tick_period);
1007 hrtimer_set_expires(&ts->sched_timer, next);
1008 tick_program_event(next, 1);
79bf2bb3 1009 local_irq_enable();
79bf2bb3
TG
1010}
1011
fb02fbc1
TG
1012/*
1013 * When NOHZ is enabled and the tick is stopped, we need to kick the
1014 * tick timer from irq_enter() so that the jiffies update is kept
1015 * alive during long running softirqs. That's ugly as hell, but
1016 * correctness is key even if we need to fix the offending softirq in
1017 * the first place.
1018 *
1019 * Note, this is different to tick_nohz_restart. We just kick the
1020 * timer and do not touch the other magic bits which need to be done
1021 * when idle is left.
1022 */
e8fcaa5c 1023static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
fb02fbc1 1024{
ae99286b
TG
1025#if 0
1026 /* Switch back to 2.6.27 behaviour */
eed3b9cf 1027 ktime_t delta;
fb02fbc1 1028
c4bd822e
TG
1029 /*
1030 * Do not touch the tick device, when the next expiry is either
1031 * already reached or less/equal than the tick period.
1032 */
268a3dcf 1033 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
c4bd822e
TG
1034 if (delta.tv64 <= tick_period.tv64)
1035 return;
1036
1037 tick_nohz_restart(ts, now);
ae99286b 1038#endif
fb02fbc1
TG
1039}
1040
5acac1be 1041static inline void tick_nohz_irq_enter(void)
eed3b9cf 1042{
4a32fea9 1043 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
eed3b9cf
MS
1044 ktime_t now;
1045
1046 if (!ts->idle_active && !ts->tick_stopped)
1047 return;
1048 now = ktime_get();
1049 if (ts->idle_active)
e8fcaa5c 1050 tick_nohz_stop_idle(ts, now);
eed3b9cf
MS
1051 if (ts->tick_stopped) {
1052 tick_nohz_update_jiffies(now);
e8fcaa5c 1053 tick_nohz_kick_tick(ts, now);
eed3b9cf
MS
1054 }
1055}
1056
79bf2bb3
TG
1057#else
1058
1059static inline void tick_nohz_switch_to_nohz(void) { }
5acac1be 1060static inline void tick_nohz_irq_enter(void) { }
79bf2bb3 1061
3451d024 1062#endif /* CONFIG_NO_HZ_COMMON */
79bf2bb3 1063
719254fa
TG
1064/*
1065 * Called from irq_enter to notify about the possible interruption of idle()
1066 */
5acac1be 1067void tick_irq_enter(void)
719254fa 1068{
e8fcaa5c 1069 tick_check_oneshot_broadcast_this_cpu();
5acac1be 1070 tick_nohz_irq_enter();
719254fa
TG
1071}
1072
79bf2bb3
TG
1073/*
1074 * High resolution timer specific code
1075 */
1076#ifdef CONFIG_HIGH_RES_TIMERS
1077/*
4c9dc641 1078 * We rearm the timer until we get disabled by the idle code.
351f181f 1079 * Called with interrupts disabled.
79bf2bb3
TG
1080 */
1081static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1082{
1083 struct tick_sched *ts =
1084 container_of(timer, struct tick_sched, sched_timer);
79bf2bb3
TG
1085 struct pt_regs *regs = get_irq_regs();
1086 ktime_t now = ktime_get();
d3ed7824 1087
5bb96226 1088 tick_sched_do_timer(now);
79bf2bb3
TG
1089
1090 /*
1091 * Do not call, when we are not in irq context and have
1092 * no valid regs pointer
1093 */
9e8f559b
FW
1094 if (regs)
1095 tick_sched_handle(ts, regs);
79bf2bb3 1096
2a16fc93
VK
1097 /* No need to reprogram if we are in idle or full dynticks mode */
1098 if (unlikely(ts->tick_stopped))
1099 return HRTIMER_NORESTART;
1100
79bf2bb3
TG
1101 hrtimer_forward(timer, now, tick_period);
1102
1103 return HRTIMER_RESTART;
1104}
1105
5307c955
MG
1106static int sched_skew_tick;
1107
62cf20b3
TG
1108static int __init skew_tick(char *str)
1109{
1110 get_option(&str, &sched_skew_tick);
1111
1112 return 0;
1113}
1114early_param("skew_tick", skew_tick);
1115
79bf2bb3
TG
1116/**
1117 * tick_setup_sched_timer - setup the tick emulation timer
1118 */
1119void tick_setup_sched_timer(void)
1120{
22127e93 1121 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
79bf2bb3
TG
1122 ktime_t now = ktime_get();
1123
1124 /*
1125 * Emulate tick processing via per-CPU hrtimers:
1126 */
1127 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1128 ts->sched_timer.function = tick_sched_timer;
79bf2bb3 1129
3704540b 1130 /* Get the next period (per cpu) */
cc584b21 1131 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
79bf2bb3 1132
9c3f9e28 1133 /* Offset the tick to avert jiffies_lock contention. */
5307c955
MG
1134 if (sched_skew_tick) {
1135 u64 offset = ktime_to_ns(tick_period) >> 1;
1136 do_div(offset, num_possible_cpus());
1137 offset *= smp_processor_id();
1138 hrtimer_add_expires_ns(&ts->sched_timer, offset);
1139 }
1140
afc08b15
TG
1141 hrtimer_forward(&ts->sched_timer, now, tick_period);
1142 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
79bf2bb3 1143
3451d024 1144#ifdef CONFIG_NO_HZ_COMMON
d689fe22 1145 if (tick_nohz_enabled) {
79bf2bb3 1146 ts->nohz_mode = NOHZ_MODE_HIGHRES;
d689fe22
TG
1147 tick_nohz_active = 1;
1148 }
79bf2bb3
TG
1149#endif
1150}
3c4fbe5e 1151#endif /* HIGH_RES_TIMERS */
79bf2bb3 1152
3451d024 1153#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
79bf2bb3
TG
1154void tick_cancel_sched_timer(int cpu)
1155{
1156 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1157
3c4fbe5e 1158# ifdef CONFIG_HIGH_RES_TIMERS
79bf2bb3
TG
1159 if (ts->sched_timer.base)
1160 hrtimer_cancel(&ts->sched_timer);
3c4fbe5e 1161# endif
a7901766 1162
4b0c0f29 1163 memset(ts, 0, sizeof(*ts));
79bf2bb3 1164}
3c4fbe5e 1165#endif
79bf2bb3
TG
1166
1167/**
1168 * Async notification about clocksource changes
1169 */
1170void tick_clock_notify(void)
1171{
1172 int cpu;
1173
1174 for_each_possible_cpu(cpu)
1175 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1176}
1177
1178/*
1179 * Async notification about clock event changes
1180 */
1181void tick_oneshot_notify(void)
1182{
22127e93 1183 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
79bf2bb3
TG
1184
1185 set_bit(0, &ts->check_clocks);
1186}
1187
1188/**
1189 * Check, if a change happened, which makes oneshot possible.
1190 *
1191 * Called cyclic from the hrtimer softirq (driven by the timer
1192 * softirq) allow_nohz signals, that we can switch into low-res nohz
1193 * mode, because high resolution timers are disabled (either compile
1194 * or runtime).
1195 */
1196int tick_check_oneshot_change(int allow_nohz)
1197{
22127e93 1198 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
79bf2bb3
TG
1199
1200 if (!test_and_clear_bit(0, &ts->check_clocks))
1201 return 0;
1202
1203 if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1204 return 0;
1205
cf4fc6cb 1206 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
79bf2bb3
TG
1207 return 0;
1208
1209 if (!allow_nohz)
1210 return 1;
1211
1212 tick_nohz_switch_to_nohz();
1213 return 0;
1214}