]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/timer.c
Merge tag 'nfs-for-3.16-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[mirror_ubuntu-zesty-kernel.git] / kernel / timer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/timer.c
3 *
4a22f166 4 * Kernel internal timers
1da177e4
LT
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
9984de1a 23#include <linux/export.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
b488893a 29#include <linux/pid_namespace.h>
1da177e4
LT
30#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
97a41e26 37#include <linux/delay.h>
79bf2bb3 38#include <linux/tick.h>
82f67cd9 39#include <linux/kallsyms.h>
e360adbe 40#include <linux/irq_work.h>
eea08f32 41#include <linux/sched.h>
cf4aebc2 42#include <linux/sched/sysctl.h>
5a0e3ad6 43#include <linux/slab.h>
1a0df594 44#include <linux/compat.h>
1da177e4
LT
45
46#include <asm/uaccess.h>
47#include <asm/unistd.h>
48#include <asm/div64.h>
49#include <asm/timex.h>
50#include <asm/io.h>
51
2b022e3d
XG
52#define CREATE_TRACE_POINTS
53#include <trace/events/timer.h>
54
40747ffa 55__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
ecea8d19
TG
56
57EXPORT_SYMBOL(jiffies_64);
58
1da177e4
LT
59/*
60 * per-CPU timer vector definitions:
61 */
1da177e4
LT
62#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
63#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
64#define TVN_SIZE (1 << TVN_BITS)
65#define TVR_SIZE (1 << TVR_BITS)
66#define TVN_MASK (TVN_SIZE - 1)
67#define TVR_MASK (TVR_SIZE - 1)
26cff4e2 68#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
1da177e4 69
a6fa8e5a 70struct tvec {
1da177e4 71 struct list_head vec[TVN_SIZE];
a6fa8e5a 72};
1da177e4 73
a6fa8e5a 74struct tvec_root {
1da177e4 75 struct list_head vec[TVR_SIZE];
a6fa8e5a 76};
1da177e4 77
a6fa8e5a 78struct tvec_base {
3691c519
ON
79 spinlock_t lock;
80 struct timer_list *running_timer;
1da177e4 81 unsigned long timer_jiffies;
97fd9ed4 82 unsigned long next_timer;
99d5f3aa 83 unsigned long active_timers;
fff42158 84 unsigned long all_timers;
a6fa8e5a
PM
85 struct tvec_root tv1;
86 struct tvec tv2;
87 struct tvec tv3;
88 struct tvec tv4;
89 struct tvec tv5;
6e453a67 90} ____cacheline_aligned;
1da177e4 91
a6fa8e5a 92struct tvec_base boot_tvec_bases;
3691c519 93EXPORT_SYMBOL(boot_tvec_bases);
a6fa8e5a 94static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
1da177e4 95
6e453a67 96/* Functions below help us manage 'deferrable' flag */
a6fa8e5a 97static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
6e453a67 98{
e52b1db3 99 return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
6e453a67
VP
100}
101
c5f66e99
TH
102static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
103{
104 return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
105}
106
a6fa8e5a 107static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
6e453a67 108{
e52b1db3 109 return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
6e453a67
VP
110}
111
6e453a67 112static inline void
a6fa8e5a 113timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
6e453a67 114{
e52b1db3
TH
115 unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
116
117 timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
6e453a67
VP
118}
119
9c133c46
AS
120static unsigned long round_jiffies_common(unsigned long j, int cpu,
121 bool force_up)
4c36a5de
AV
122{
123 int rem;
124 unsigned long original = j;
125
126 /*
127 * We don't want all cpus firing their timers at once hitting the
128 * same lock or cachelines, so we skew each extra cpu with an extra
129 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
130 * already did this.
131 * The skew is done by adding 3*cpunr, then round, then subtract this
132 * extra offset again.
133 */
134 j += cpu * 3;
135
136 rem = j % HZ;
137
138 /*
139 * If the target jiffie is just after a whole second (which can happen
140 * due to delays of the timer irq, long irq off times etc etc) then
141 * we should round down to the whole second, not up. Use 1/4th second
142 * as cutoff for this rounding as an extreme upper bound for this.
9c133c46 143 * But never round down if @force_up is set.
4c36a5de 144 */
9c133c46 145 if (rem < HZ/4 && !force_up) /* round down */
4c36a5de
AV
146 j = j - rem;
147 else /* round up */
148 j = j - rem + HZ;
149
150 /* now that we have rounded, subtract the extra skew again */
151 j -= cpu * 3;
152
9e04d380
BVA
153 /*
154 * Make sure j is still in the future. Otherwise return the
155 * unmodified value.
156 */
157 return time_is_after_jiffies(j) ? j : original;
4c36a5de 158}
9c133c46
AS
159
160/**
161 * __round_jiffies - function to round jiffies to a full second
162 * @j: the time in (absolute) jiffies that should be rounded
163 * @cpu: the processor number on which the timeout will happen
164 *
165 * __round_jiffies() rounds an absolute time in the future (in jiffies)
166 * up or down to (approximately) full seconds. This is useful for timers
167 * for which the exact time they fire does not matter too much, as long as
168 * they fire approximately every X seconds.
169 *
170 * By rounding these timers to whole seconds, all such timers will fire
171 * at the same time, rather than at various times spread out. The goal
172 * of this is to have the CPU wake up less, which saves power.
173 *
174 * The exact rounding is skewed for each processor to avoid all
175 * processors firing at the exact same time, which could lead
176 * to lock contention or spurious cache line bouncing.
177 *
178 * The return value is the rounded version of the @j parameter.
179 */
180unsigned long __round_jiffies(unsigned long j, int cpu)
181{
182 return round_jiffies_common(j, cpu, false);
183}
4c36a5de
AV
184EXPORT_SYMBOL_GPL(__round_jiffies);
185
186/**
187 * __round_jiffies_relative - function to round jiffies to a full second
188 * @j: the time in (relative) jiffies that should be rounded
189 * @cpu: the processor number on which the timeout will happen
190 *
72fd4a35 191 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
192 * up or down to (approximately) full seconds. This is useful for timers
193 * for which the exact time they fire does not matter too much, as long as
194 * they fire approximately every X seconds.
195 *
196 * By rounding these timers to whole seconds, all such timers will fire
197 * at the same time, rather than at various times spread out. The goal
198 * of this is to have the CPU wake up less, which saves power.
199 *
200 * The exact rounding is skewed for each processor to avoid all
201 * processors firing at the exact same time, which could lead
202 * to lock contention or spurious cache line bouncing.
203 *
72fd4a35 204 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
205 */
206unsigned long __round_jiffies_relative(unsigned long j, int cpu)
207{
9c133c46
AS
208 unsigned long j0 = jiffies;
209
210 /* Use j0 because jiffies might change while we run */
211 return round_jiffies_common(j + j0, cpu, false) - j0;
4c36a5de
AV
212}
213EXPORT_SYMBOL_GPL(__round_jiffies_relative);
214
215/**
216 * round_jiffies - function to round jiffies to a full second
217 * @j: the time in (absolute) jiffies that should be rounded
218 *
72fd4a35 219 * round_jiffies() rounds an absolute time in the future (in jiffies)
4c36a5de
AV
220 * up or down to (approximately) full seconds. This is useful for timers
221 * for which the exact time they fire does not matter too much, as long as
222 * they fire approximately every X seconds.
223 *
224 * By rounding these timers to whole seconds, all such timers will fire
225 * at the same time, rather than at various times spread out. The goal
226 * of this is to have the CPU wake up less, which saves power.
227 *
72fd4a35 228 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
229 */
230unsigned long round_jiffies(unsigned long j)
231{
9c133c46 232 return round_jiffies_common(j, raw_smp_processor_id(), false);
4c36a5de
AV
233}
234EXPORT_SYMBOL_GPL(round_jiffies);
235
236/**
237 * round_jiffies_relative - function to round jiffies to a full second
238 * @j: the time in (relative) jiffies that should be rounded
239 *
72fd4a35 240 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
241 * up or down to (approximately) full seconds. This is useful for timers
242 * for which the exact time they fire does not matter too much, as long as
243 * they fire approximately every X seconds.
244 *
245 * By rounding these timers to whole seconds, all such timers will fire
246 * at the same time, rather than at various times spread out. The goal
247 * of this is to have the CPU wake up less, which saves power.
248 *
72fd4a35 249 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
250 */
251unsigned long round_jiffies_relative(unsigned long j)
252{
253 return __round_jiffies_relative(j, raw_smp_processor_id());
254}
255EXPORT_SYMBOL_GPL(round_jiffies_relative);
256
9c133c46
AS
257/**
258 * __round_jiffies_up - function to round jiffies up to a full second
259 * @j: the time in (absolute) jiffies that should be rounded
260 * @cpu: the processor number on which the timeout will happen
261 *
262 * This is the same as __round_jiffies() except that it will never
263 * round down. This is useful for timeouts for which the exact time
264 * of firing does not matter too much, as long as they don't fire too
265 * early.
266 */
267unsigned long __round_jiffies_up(unsigned long j, int cpu)
268{
269 return round_jiffies_common(j, cpu, true);
270}
271EXPORT_SYMBOL_GPL(__round_jiffies_up);
272
273/**
274 * __round_jiffies_up_relative - function to round jiffies up to a full second
275 * @j: the time in (relative) jiffies that should be rounded
276 * @cpu: the processor number on which the timeout will happen
277 *
278 * This is the same as __round_jiffies_relative() except that it will never
279 * round down. This is useful for timeouts for which the exact time
280 * of firing does not matter too much, as long as they don't fire too
281 * early.
282 */
283unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
284{
285 unsigned long j0 = jiffies;
286
287 /* Use j0 because jiffies might change while we run */
288 return round_jiffies_common(j + j0, cpu, true) - j0;
289}
290EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
291
292/**
293 * round_jiffies_up - function to round jiffies up to a full second
294 * @j: the time in (absolute) jiffies that should be rounded
295 *
296 * This is the same as round_jiffies() except that it will never
297 * round down. This is useful for timeouts for which the exact time
298 * of firing does not matter too much, as long as they don't fire too
299 * early.
300 */
301unsigned long round_jiffies_up(unsigned long j)
302{
303 return round_jiffies_common(j, raw_smp_processor_id(), true);
304}
305EXPORT_SYMBOL_GPL(round_jiffies_up);
306
307/**
308 * round_jiffies_up_relative - function to round jiffies up to a full second
309 * @j: the time in (relative) jiffies that should be rounded
310 *
311 * This is the same as round_jiffies_relative() except that it will never
312 * round down. This is useful for timeouts for which the exact time
313 * of firing does not matter too much, as long as they don't fire too
314 * early.
315 */
316unsigned long round_jiffies_up_relative(unsigned long j)
317{
318 return __round_jiffies_up_relative(j, raw_smp_processor_id());
319}
320EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
321
3bbb9ec9
AV
322/**
323 * set_timer_slack - set the allowed slack for a timer
0caa6210 324 * @timer: the timer to be modified
3bbb9ec9
AV
325 * @slack_hz: the amount of time (in jiffies) allowed for rounding
326 *
327 * Set the amount of time, in jiffies, that a certain timer has
328 * in terms of slack. By setting this value, the timer subsystem
329 * will schedule the actual timer somewhere between
330 * the time mod_timer() asks for, and that time plus the slack.
331 *
332 * By setting the slack to -1, a percentage of the delay is used
333 * instead.
334 */
335void set_timer_slack(struct timer_list *timer, int slack_hz)
336{
337 timer->slack = slack_hz;
338}
339EXPORT_SYMBOL_GPL(set_timer_slack);
340
d550e81d
PM
341/*
342 * If the list is empty, catch up ->timer_jiffies to the current time.
343 * The caller must hold the tvec_base lock. Returns true if the list
344 * was empty and therefore ->timer_jiffies was updated.
345 */
346static bool catchup_timer_jiffies(struct tvec_base *base)
347{
348 if (!base->all_timers) {
349 base->timer_jiffies = jiffies;
350 return true;
351 }
352 return false;
353}
354
facbb4a7
TG
355static void
356__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
1da177e4
LT
357{
358 unsigned long expires = timer->expires;
359 unsigned long idx = expires - base->timer_jiffies;
360 struct list_head *vec;
361
362 if (idx < TVR_SIZE) {
363 int i = expires & TVR_MASK;
364 vec = base->tv1.vec + i;
365 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
366 int i = (expires >> TVR_BITS) & TVN_MASK;
367 vec = base->tv2.vec + i;
368 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
369 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
370 vec = base->tv3.vec + i;
371 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
372 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
373 vec = base->tv4.vec + i;
374 } else if ((signed long) idx < 0) {
375 /*
376 * Can happen if you add a timer with expires == jiffies,
377 * or you set a timer to go off in the past
378 */
379 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
380 } else {
381 int i;
26cff4e2
HC
382 /* If the timeout is larger than MAX_TVAL (on 64-bit
383 * architectures or with CONFIG_BASE_SMALL=1) then we
384 * use the maximum timeout.
1da177e4 385 */
26cff4e2
HC
386 if (idx > MAX_TVAL) {
387 idx = MAX_TVAL;
1da177e4
LT
388 expires = idx + base->timer_jiffies;
389 }
390 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
391 vec = base->tv5.vec + i;
392 }
393 /*
394 * Timers are FIFO:
395 */
396 list_add_tail(&timer->entry, vec);
397}
398
facbb4a7
TG
399static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
400{
18d8cb64 401 (void)catchup_timer_jiffies(base);
facbb4a7
TG
402 __internal_add_timer(base, timer);
403 /*
99d5f3aa 404 * Update base->active_timers and base->next_timer
facbb4a7 405 */
99d5f3aa 406 if (!tbase_get_deferrable(timer->base)) {
aea369b9
ON
407 if (!base->active_timers++ ||
408 time_before(timer->expires, base->next_timer))
99d5f3aa 409 base->next_timer = timer->expires;
99d5f3aa 410 }
fff42158 411 base->all_timers++;
facbb4a7
TG
412}
413
82f67cd9
IM
414#ifdef CONFIG_TIMER_STATS
415void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
416{
417 if (timer->start_site)
418 return;
419
420 timer->start_site = addr;
421 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
422 timer->start_pid = current->pid;
423}
c5c061b8
VP
424
425static void timer_stats_account_timer(struct timer_list *timer)
426{
427 unsigned int flag = 0;
428
507e1231
HC
429 if (likely(!timer->start_site))
430 return;
c5c061b8
VP
431 if (unlikely(tbase_get_deferrable(timer->base)))
432 flag |= TIMER_STATS_FLAG_DEFERRABLE;
433
434 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
435 timer->function, timer->start_comm, flag);
436}
437
438#else
439static void timer_stats_account_timer(struct timer_list *timer) {}
82f67cd9
IM
440#endif
441
c6f3a97f
TG
442#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
443
444static struct debug_obj_descr timer_debug_descr;
445
99777288
SG
446static void *timer_debug_hint(void *addr)
447{
448 return ((struct timer_list *) addr)->function;
449}
450
c6f3a97f
TG
451/*
452 * fixup_init is called when:
453 * - an active object is initialized
55c888d6 454 */
c6f3a97f
TG
455static int timer_fixup_init(void *addr, enum debug_obj_state state)
456{
457 struct timer_list *timer = addr;
458
459 switch (state) {
460 case ODEBUG_STATE_ACTIVE:
461 del_timer_sync(timer);
462 debug_object_init(timer, &timer_debug_descr);
463 return 1;
464 default:
465 return 0;
466 }
467}
468
fb16b8cf
SB
469/* Stub timer callback for improperly used timers. */
470static void stub_timer(unsigned long data)
471{
472 WARN_ON(1);
473}
474
c6f3a97f
TG
475/*
476 * fixup_activate is called when:
477 * - an active object is activated
478 * - an unknown object is activated (might be a statically initialized object)
479 */
480static int timer_fixup_activate(void *addr, enum debug_obj_state state)
481{
482 struct timer_list *timer = addr;
483
484 switch (state) {
485
486 case ODEBUG_STATE_NOTAVAILABLE:
487 /*
488 * This is not really a fixup. The timer was
489 * statically initialized. We just make sure that it
490 * is tracked in the object tracker.
491 */
492 if (timer->entry.next == NULL &&
493 timer->entry.prev == TIMER_ENTRY_STATIC) {
494 debug_object_init(timer, &timer_debug_descr);
495 debug_object_activate(timer, &timer_debug_descr);
496 return 0;
497 } else {
fb16b8cf
SB
498 setup_timer(timer, stub_timer, 0);
499 return 1;
c6f3a97f
TG
500 }
501 return 0;
502
503 case ODEBUG_STATE_ACTIVE:
504 WARN_ON(1);
505
506 default:
507 return 0;
508 }
509}
510
511/*
512 * fixup_free is called when:
513 * - an active object is freed
514 */
515static int timer_fixup_free(void *addr, enum debug_obj_state state)
516{
517 struct timer_list *timer = addr;
518
519 switch (state) {
520 case ODEBUG_STATE_ACTIVE:
521 del_timer_sync(timer);
522 debug_object_free(timer, &timer_debug_descr);
523 return 1;
524 default:
525 return 0;
526 }
527}
528
dc4218bd
CC
529/*
530 * fixup_assert_init is called when:
531 * - an untracked/uninit-ed object is found
532 */
533static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
534{
535 struct timer_list *timer = addr;
536
537 switch (state) {
538 case ODEBUG_STATE_NOTAVAILABLE:
539 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
540 /*
541 * This is not really a fixup. The timer was
542 * statically initialized. We just make sure that it
543 * is tracked in the object tracker.
544 */
545 debug_object_init(timer, &timer_debug_descr);
546 return 0;
547 } else {
548 setup_timer(timer, stub_timer, 0);
549 return 1;
550 }
551 default:
552 return 0;
553 }
554}
555
c6f3a97f 556static struct debug_obj_descr timer_debug_descr = {
dc4218bd
CC
557 .name = "timer_list",
558 .debug_hint = timer_debug_hint,
559 .fixup_init = timer_fixup_init,
560 .fixup_activate = timer_fixup_activate,
561 .fixup_free = timer_fixup_free,
562 .fixup_assert_init = timer_fixup_assert_init,
c6f3a97f
TG
563};
564
565static inline void debug_timer_init(struct timer_list *timer)
566{
567 debug_object_init(timer, &timer_debug_descr);
568}
569
570static inline void debug_timer_activate(struct timer_list *timer)
571{
572 debug_object_activate(timer, &timer_debug_descr);
573}
574
575static inline void debug_timer_deactivate(struct timer_list *timer)
576{
577 debug_object_deactivate(timer, &timer_debug_descr);
578}
579
580static inline void debug_timer_free(struct timer_list *timer)
581{
582 debug_object_free(timer, &timer_debug_descr);
583}
584
dc4218bd
CC
585static inline void debug_timer_assert_init(struct timer_list *timer)
586{
587 debug_object_assert_init(timer, &timer_debug_descr);
588}
589
fc683995
TH
590static void do_init_timer(struct timer_list *timer, unsigned int flags,
591 const char *name, struct lock_class_key *key);
c6f3a97f 592
fc683995
TH
593void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
594 const char *name, struct lock_class_key *key)
c6f3a97f
TG
595{
596 debug_object_init_on_stack(timer, &timer_debug_descr);
fc683995 597 do_init_timer(timer, flags, name, key);
c6f3a97f 598}
6f2b9b9a 599EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
c6f3a97f
TG
600
601void destroy_timer_on_stack(struct timer_list *timer)
602{
603 debug_object_free(timer, &timer_debug_descr);
604}
605EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
606
607#else
608static inline void debug_timer_init(struct timer_list *timer) { }
609static inline void debug_timer_activate(struct timer_list *timer) { }
610static inline void debug_timer_deactivate(struct timer_list *timer) { }
dc4218bd 611static inline void debug_timer_assert_init(struct timer_list *timer) { }
c6f3a97f
TG
612#endif
613
2b022e3d
XG
614static inline void debug_init(struct timer_list *timer)
615{
616 debug_timer_init(timer);
617 trace_timer_init(timer);
618}
619
620static inline void
621debug_activate(struct timer_list *timer, unsigned long expires)
622{
623 debug_timer_activate(timer);
624 trace_timer_start(timer, expires);
625}
626
627static inline void debug_deactivate(struct timer_list *timer)
628{
629 debug_timer_deactivate(timer);
630 trace_timer_cancel(timer);
631}
632
dc4218bd
CC
633static inline void debug_assert_init(struct timer_list *timer)
634{
635 debug_timer_assert_init(timer);
636}
637
fc683995
TH
638static void do_init_timer(struct timer_list *timer, unsigned int flags,
639 const char *name, struct lock_class_key *key)
55c888d6 640{
fc683995
TH
641 struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
642
55c888d6 643 timer->entry.next = NULL;
fc683995 644 timer->base = (void *)((unsigned long)base | flags);
3bbb9ec9 645 timer->slack = -1;
82f67cd9
IM
646#ifdef CONFIG_TIMER_STATS
647 timer->start_site = NULL;
648 timer->start_pid = -1;
649 memset(timer->start_comm, 0, TASK_COMM_LEN);
650#endif
6f2b9b9a 651 lockdep_init_map(&timer->lockdep_map, name, key, 0);
55c888d6 652}
c6f3a97f
TG
653
654/**
633fe795 655 * init_timer_key - initialize a timer
c6f3a97f 656 * @timer: the timer to be initialized
fc683995 657 * @flags: timer flags
633fe795
RD
658 * @name: name of the timer
659 * @key: lockdep class key of the fake lock used for tracking timer
660 * sync lock dependencies
c6f3a97f 661 *
633fe795 662 * init_timer_key() must be done to a timer prior calling *any* of the
c6f3a97f
TG
663 * other timer functions.
664 */
fc683995
TH
665void init_timer_key(struct timer_list *timer, unsigned int flags,
666 const char *name, struct lock_class_key *key)
c6f3a97f 667{
2b022e3d 668 debug_init(timer);
fc683995 669 do_init_timer(timer, flags, name, key);
c6f3a97f 670}
6f2b9b9a 671EXPORT_SYMBOL(init_timer_key);
55c888d6 672
ec44bc7a 673static inline void detach_timer(struct timer_list *timer, bool clear_pending)
55c888d6
ON
674{
675 struct list_head *entry = &timer->entry;
676
2b022e3d 677 debug_deactivate(timer);
c6f3a97f 678
55c888d6
ON
679 __list_del(entry->prev, entry->next);
680 if (clear_pending)
681 entry->next = NULL;
682 entry->prev = LIST_POISON2;
683}
684
99d5f3aa
TG
685static inline void
686detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
687{
688 detach_timer(timer, true);
689 if (!tbase_get_deferrable(timer->base))
e52b1db3 690 base->active_timers--;
fff42158 691 base->all_timers--;
16d937f8 692 (void)catchup_timer_jiffies(base);
99d5f3aa
TG
693}
694
ec44bc7a
TG
695static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
696 bool clear_pending)
697{
698 if (!timer_pending(timer))
699 return 0;
700
701 detach_timer(timer, clear_pending);
99d5f3aa 702 if (!tbase_get_deferrable(timer->base)) {
e52b1db3 703 base->active_timers--;
99d5f3aa
TG
704 if (timer->expires == base->next_timer)
705 base->next_timer = base->timer_jiffies;
706 }
fff42158 707 base->all_timers--;
16d937f8 708 (void)catchup_timer_jiffies(base);
ec44bc7a
TG
709 return 1;
710}
711
55c888d6 712/*
3691c519 713 * We are using hashed locking: holding per_cpu(tvec_bases).lock
55c888d6
ON
714 * means that all timers which are tied to this base via timer->base are
715 * locked, and the base itself is locked too.
716 *
717 * So __run_timers/migrate_timers can safely modify all timers which could
718 * be found on ->tvX lists.
719 *
720 * When the timer's base is locked, and the timer removed from list, it is
721 * possible to set timer->base = NULL and drop the lock: the timer remains
722 * locked.
723 */
a6fa8e5a 724static struct tvec_base *lock_timer_base(struct timer_list *timer,
55c888d6 725 unsigned long *flags)
89e7e374 726 __acquires(timer->base->lock)
55c888d6 727{
a6fa8e5a 728 struct tvec_base *base;
55c888d6
ON
729
730 for (;;) {
a6fa8e5a 731 struct tvec_base *prelock_base = timer->base;
6e453a67 732 base = tbase_get_base(prelock_base);
55c888d6
ON
733 if (likely(base != NULL)) {
734 spin_lock_irqsave(&base->lock, *flags);
6e453a67 735 if (likely(prelock_base == timer->base))
55c888d6
ON
736 return base;
737 /* The timer has migrated to another CPU */
738 spin_unlock_irqrestore(&base->lock, *flags);
739 }
740 cpu_relax();
741 }
742}
743
74019224 744static inline int
597d0275
AB
745__mod_timer(struct timer_list *timer, unsigned long expires,
746 bool pending_only, int pinned)
1da177e4 747{
a6fa8e5a 748 struct tvec_base *base, *new_base;
1da177e4 749 unsigned long flags;
eea08f32 750 int ret = 0 , cpu;
1da177e4 751
82f67cd9 752 timer_stats_timer_set_start_info(timer);
1da177e4 753 BUG_ON(!timer->function);
1da177e4 754
55c888d6
ON
755 base = lock_timer_base(timer, &flags);
756
ec44bc7a
TG
757 ret = detach_if_pending(timer, base, false);
758 if (!ret && pending_only)
759 goto out_unlock;
55c888d6 760
2b022e3d 761 debug_activate(timer, expires);
c6f3a97f 762
6201b4d6 763 cpu = get_nohz_timer_target(pinned);
eea08f32
AB
764 new_base = per_cpu(tvec_bases, cpu);
765
3691c519 766 if (base != new_base) {
1da177e4 767 /*
55c888d6
ON
768 * We are trying to schedule the timer on the local CPU.
769 * However we can't change timer's base while it is running,
770 * otherwise del_timer_sync() can't detect that the timer's
771 * handler yet has not finished. This also guarantees that
772 * the timer is serialized wrt itself.
1da177e4 773 */
a2c348fe 774 if (likely(base->running_timer != timer)) {
55c888d6 775 /* See the comment in lock_timer_base() */
6e453a67 776 timer_set_base(timer, NULL);
55c888d6 777 spin_unlock(&base->lock);
a2c348fe
ON
778 base = new_base;
779 spin_lock(&base->lock);
6e453a67 780 timer_set_base(timer, base);
1da177e4
LT
781 }
782 }
783
1da177e4 784 timer->expires = expires;
a2c348fe 785 internal_add_timer(base, timer);
74019224
IM
786
787out_unlock:
a2c348fe 788 spin_unlock_irqrestore(&base->lock, flags);
1da177e4
LT
789
790 return ret;
791}
792
2aae4a10 793/**
74019224
IM
794 * mod_timer_pending - modify a pending timer's timeout
795 * @timer: the pending timer to be modified
796 * @expires: new timeout in jiffies
1da177e4 797 *
74019224
IM
798 * mod_timer_pending() is the same for pending timers as mod_timer(),
799 * but will not re-activate and modify already deleted timers.
800 *
801 * It is useful for unserialized use of timers.
1da177e4 802 */
74019224 803int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1da177e4 804{
597d0275 805 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
1da177e4 806}
74019224 807EXPORT_SYMBOL(mod_timer_pending);
1da177e4 808
3bbb9ec9
AV
809/*
810 * Decide where to put the timer while taking the slack into account
811 *
812 * Algorithm:
813 * 1) calculate the maximum (absolute) time
814 * 2) calculate the highest bit where the expires and new max are different
815 * 3) use this bit to make a mask
816 * 4) use the bitmask to round down the maximum time, so that all last
817 * bits are zeros
818 */
819static inline
820unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
821{
822 unsigned long expires_limit, mask;
823 int bit;
824
8e63d779 825 if (timer->slack >= 0) {
f00e047e 826 expires_limit = expires + timer->slack;
8e63d779 827 } else {
1c3cc116
SAS
828 long delta = expires - jiffies;
829
830 if (delta < 256)
831 return expires;
3bbb9ec9 832
1c3cc116 833 expires_limit = expires + delta / 256;
8e63d779 834 }
3bbb9ec9 835 mask = expires ^ expires_limit;
3bbb9ec9
AV
836 if (mask == 0)
837 return expires;
838
839 bit = find_last_bit(&mask, BITS_PER_LONG);
840
98a01e77 841 mask = (1UL << bit) - 1;
3bbb9ec9
AV
842
843 expires_limit = expires_limit & ~(mask);
844
845 return expires_limit;
846}
847
2aae4a10 848/**
1da177e4
LT
849 * mod_timer - modify a timer's timeout
850 * @timer: the timer to be modified
2aae4a10 851 * @expires: new timeout in jiffies
1da177e4 852 *
72fd4a35 853 * mod_timer() is a more efficient way to update the expire field of an
1da177e4
LT
854 * active timer (if the timer is inactive it will be activated)
855 *
856 * mod_timer(timer, expires) is equivalent to:
857 *
858 * del_timer(timer); timer->expires = expires; add_timer(timer);
859 *
860 * Note that if there are multiple unserialized concurrent users of the
861 * same timer, then mod_timer() is the only safe way to modify the timeout,
862 * since add_timer() cannot modify an already running timer.
863 *
864 * The function returns whether it has modified a pending timer or not.
865 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
866 * active timer returns 1.)
867 */
868int mod_timer(struct timer_list *timer, unsigned long expires)
869{
1c3cc116
SAS
870 expires = apply_slack(timer, expires);
871
1da177e4
LT
872 /*
873 * This is a common optimization triggered by the
874 * networking code - if the timer is re-modified
875 * to be the same thing then just return:
876 */
4841158b 877 if (timer_pending(timer) && timer->expires == expires)
1da177e4
LT
878 return 1;
879
597d0275 880 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
1da177e4 881}
1da177e4
LT
882EXPORT_SYMBOL(mod_timer);
883
597d0275
AB
884/**
885 * mod_timer_pinned - modify a timer's timeout
886 * @timer: the timer to be modified
887 * @expires: new timeout in jiffies
888 *
889 * mod_timer_pinned() is a way to update the expire field of an
890 * active timer (if the timer is inactive it will be activated)
048a0e8f
PM
891 * and to ensure that the timer is scheduled on the current CPU.
892 *
893 * Note that this does not prevent the timer from being migrated
894 * when the current CPU goes offline. If this is a problem for
895 * you, use CPU-hotplug notifiers to handle it correctly, for
896 * example, cancelling the timer when the corresponding CPU goes
897 * offline.
597d0275
AB
898 *
899 * mod_timer_pinned(timer, expires) is equivalent to:
900 *
901 * del_timer(timer); timer->expires = expires; add_timer(timer);
902 */
903int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
904{
905 if (timer->expires == expires && timer_pending(timer))
906 return 1;
907
908 return __mod_timer(timer, expires, false, TIMER_PINNED);
909}
910EXPORT_SYMBOL(mod_timer_pinned);
911
74019224
IM
912/**
913 * add_timer - start a timer
914 * @timer: the timer to be added
915 *
916 * The kernel will do a ->function(->data) callback from the
917 * timer interrupt at the ->expires point in the future. The
918 * current time is 'jiffies'.
919 *
920 * The timer's ->expires, ->function (and if the handler uses it, ->data)
921 * fields must be set prior calling this function.
922 *
923 * Timers with an ->expires field in the past will be executed in the next
924 * timer tick.
925 */
926void add_timer(struct timer_list *timer)
927{
928 BUG_ON(timer_pending(timer));
929 mod_timer(timer, timer->expires);
930}
931EXPORT_SYMBOL(add_timer);
932
933/**
934 * add_timer_on - start a timer on a particular CPU
935 * @timer: the timer to be added
936 * @cpu: the CPU to start it on
937 *
938 * This is not very scalable on SMP. Double adds are not possible.
939 */
940void add_timer_on(struct timer_list *timer, int cpu)
941{
942 struct tvec_base *base = per_cpu(tvec_bases, cpu);
943 unsigned long flags;
944
945 timer_stats_timer_set_start_info(timer);
946 BUG_ON(timer_pending(timer) || !timer->function);
947 spin_lock_irqsave(&base->lock, flags);
948 timer_set_base(timer, base);
2b022e3d 949 debug_activate(timer, timer->expires);
74019224
IM
950 internal_add_timer(base, timer);
951 /*
1c20091e
FW
952 * Check whether the other CPU is in dynticks mode and needs
953 * to be triggered to reevaluate the timer wheel.
954 * We are protected against the other CPU fiddling
74019224 955 * with the timer by holding the timer base lock. This also
1c20091e
FW
956 * makes sure that a CPU on the way to stop its tick can not
957 * evaluate the timer wheel.
8ba14654
VK
958 *
959 * Spare the IPI for deferrable timers on idle targets though.
960 * The next busy ticks will take care of it. Except full dynticks
961 * require special care against races with idle_cpu(), lets deal
962 * with that later.
74019224 963 */
8ba14654
VK
964 if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu))
965 wake_up_nohz_cpu(cpu);
966
74019224
IM
967 spin_unlock_irqrestore(&base->lock, flags);
968}
a9862e05 969EXPORT_SYMBOL_GPL(add_timer_on);
74019224 970
2aae4a10 971/**
1da177e4
LT
972 * del_timer - deactive a timer.
973 * @timer: the timer to be deactivated
974 *
975 * del_timer() deactivates a timer - this works on both active and inactive
976 * timers.
977 *
978 * The function returns whether it has deactivated a pending timer or not.
979 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
980 * active timer returns 1.)
981 */
982int del_timer(struct timer_list *timer)
983{
a6fa8e5a 984 struct tvec_base *base;
1da177e4 985 unsigned long flags;
55c888d6 986 int ret = 0;
1da177e4 987
dc4218bd
CC
988 debug_assert_init(timer);
989
82f67cd9 990 timer_stats_timer_clear_start_info(timer);
55c888d6
ON
991 if (timer_pending(timer)) {
992 base = lock_timer_base(timer, &flags);
ec44bc7a 993 ret = detach_if_pending(timer, base, true);
1da177e4 994 spin_unlock_irqrestore(&base->lock, flags);
1da177e4 995 }
1da177e4 996
55c888d6 997 return ret;
1da177e4 998}
1da177e4
LT
999EXPORT_SYMBOL(del_timer);
1000
2aae4a10
REB
1001/**
1002 * try_to_del_timer_sync - Try to deactivate a timer
1003 * @timer: timer do del
1004 *
fd450b73
ON
1005 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1006 * exit the timer is not queued and the handler is not running on any CPU.
fd450b73
ON
1007 */
1008int try_to_del_timer_sync(struct timer_list *timer)
1009{
a6fa8e5a 1010 struct tvec_base *base;
fd450b73
ON
1011 unsigned long flags;
1012 int ret = -1;
1013
dc4218bd
CC
1014 debug_assert_init(timer);
1015
fd450b73
ON
1016 base = lock_timer_base(timer, &flags);
1017
ec44bc7a
TG
1018 if (base->running_timer != timer) {
1019 timer_stats_timer_clear_start_info(timer);
1020 ret = detach_if_pending(timer, base, true);
fd450b73 1021 }
fd450b73
ON
1022 spin_unlock_irqrestore(&base->lock, flags);
1023
1024 return ret;
1025}
e19dff1f
DH
1026EXPORT_SYMBOL(try_to_del_timer_sync);
1027
6f1bc451 1028#ifdef CONFIG_SMP
2aae4a10 1029/**
1da177e4
LT
1030 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1031 * @timer: the timer to be deactivated
1032 *
1033 * This function only differs from del_timer() on SMP: besides deactivating
1034 * the timer it also makes sure the handler has finished executing on other
1035 * CPUs.
1036 *
72fd4a35 1037 * Synchronization rules: Callers must prevent restarting of the timer,
1da177e4 1038 * otherwise this function is meaningless. It must not be called from
c5f66e99
TH
1039 * interrupt contexts unless the timer is an irqsafe one. The caller must
1040 * not hold locks which would prevent completion of the timer's
1041 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1042 * timer is not queued and the handler is not running on any CPU.
1da177e4 1043 *
c5f66e99
TH
1044 * Note: For !irqsafe timers, you must not hold locks that are held in
1045 * interrupt context while calling this function. Even if the lock has
1046 * nothing to do with the timer in question. Here's why:
48228f7b
SR
1047 *
1048 * CPU0 CPU1
1049 * ---- ----
1050 * <SOFTIRQ>
1051 * call_timer_fn();
1052 * base->running_timer = mytimer;
1053 * spin_lock_irq(somelock);
1054 * <IRQ>
1055 * spin_lock(somelock);
1056 * del_timer_sync(mytimer);
1057 * while (base->running_timer == mytimer);
1058 *
1059 * Now del_timer_sync() will never return and never release somelock.
1060 * The interrupt on the other CPU is waiting to grab somelock but
1061 * it has interrupted the softirq that CPU0 is waiting to finish.
1062 *
1da177e4 1063 * The function returns whether it has deactivated a pending timer or not.
1da177e4
LT
1064 */
1065int del_timer_sync(struct timer_list *timer)
1066{
6f2b9b9a 1067#ifdef CONFIG_LOCKDEP
f266a511
PZ
1068 unsigned long flags;
1069
48228f7b
SR
1070 /*
1071 * If lockdep gives a backtrace here, please reference
1072 * the synchronization rules above.
1073 */
7ff20792 1074 local_irq_save(flags);
6f2b9b9a
JB
1075 lock_map_acquire(&timer->lockdep_map);
1076 lock_map_release(&timer->lockdep_map);
7ff20792 1077 local_irq_restore(flags);
6f2b9b9a 1078#endif
466bd303
YZ
1079 /*
1080 * don't use it in hardirq context, because it
1081 * could lead to deadlock.
1082 */
c5f66e99 1083 WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
fd450b73
ON
1084 for (;;) {
1085 int ret = try_to_del_timer_sync(timer);
1086 if (ret >= 0)
1087 return ret;
a0009652 1088 cpu_relax();
fd450b73 1089 }
1da177e4 1090}
55c888d6 1091EXPORT_SYMBOL(del_timer_sync);
1da177e4
LT
1092#endif
1093
a6fa8e5a 1094static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1da177e4
LT
1095{
1096 /* cascade all the timers from tv up one level */
3439dd86
P
1097 struct timer_list *timer, *tmp;
1098 struct list_head tv_list;
1099
1100 list_replace_init(tv->vec + index, &tv_list);
1da177e4 1101
1da177e4 1102 /*
3439dd86
P
1103 * We are removing _all_ timers from the list, so we
1104 * don't have to detach them individually.
1da177e4 1105 */
3439dd86 1106 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
6e453a67 1107 BUG_ON(tbase_get_base(timer->base) != base);
facbb4a7
TG
1108 /* No accounting, while moving them */
1109 __internal_add_timer(base, timer);
1da177e4 1110 }
1da177e4
LT
1111
1112 return index;
1113}
1114
576da126
TG
1115static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1116 unsigned long data)
1117{
4a2b4b22 1118 int count = preempt_count();
576da126
TG
1119
1120#ifdef CONFIG_LOCKDEP
1121 /*
1122 * It is permissible to free the timer from inside the
1123 * function that is called from it, this we need to take into
1124 * account for lockdep too. To avoid bogus "held lock freed"
1125 * warnings as well as problems when looking into
1126 * timer->lockdep_map, make a copy and use that here.
1127 */
4d82a1de
PZ
1128 struct lockdep_map lockdep_map;
1129
1130 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
576da126
TG
1131#endif
1132 /*
1133 * Couple the lock chain with the lock chain at
1134 * del_timer_sync() by acquiring the lock_map around the fn()
1135 * call here and in del_timer_sync().
1136 */
1137 lock_map_acquire(&lockdep_map);
1138
1139 trace_timer_expire_entry(timer);
1140 fn(data);
1141 trace_timer_expire_exit(timer);
1142
1143 lock_map_release(&lockdep_map);
1144
4a2b4b22 1145 if (count != preempt_count()) {
802702e0 1146 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
4a2b4b22 1147 fn, count, preempt_count());
802702e0
TG
1148 /*
1149 * Restore the preempt count. That gives us a decent
1150 * chance to survive and extract information. If the
1151 * callback kept a lock held, bad luck, but not worse
1152 * than the BUG() we had.
1153 */
4a2b4b22 1154 preempt_count_set(count);
576da126
TG
1155 }
1156}
1157
2aae4a10
REB
1158#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1159
1160/**
1da177e4
LT
1161 * __run_timers - run all expired timers (if any) on this CPU.
1162 * @base: the timer vector to be processed.
1163 *
1164 * This function cascades all vectors and executes all expired timer
1165 * vectors.
1166 */
a6fa8e5a 1167static inline void __run_timers(struct tvec_base *base)
1da177e4
LT
1168{
1169 struct timer_list *timer;
1170
3691c519 1171 spin_lock_irq(&base->lock);
d550e81d
PM
1172 if (catchup_timer_jiffies(base)) {
1173 spin_unlock_irq(&base->lock);
1174 return;
1175 }
1da177e4 1176 while (time_after_eq(jiffies, base->timer_jiffies)) {
626ab0e6 1177 struct list_head work_list;
1da177e4 1178 struct list_head *head = &work_list;
6819457d 1179 int index = base->timer_jiffies & TVR_MASK;
626ab0e6 1180
1da177e4
LT
1181 /*
1182 * Cascade timers:
1183 */
1184 if (!index &&
1185 (!cascade(base, &base->tv2, INDEX(0))) &&
1186 (!cascade(base, &base->tv3, INDEX(1))) &&
1187 !cascade(base, &base->tv4, INDEX(2)))
1188 cascade(base, &base->tv5, INDEX(3));
626ab0e6 1189 ++base->timer_jiffies;
c41eba7d 1190 list_replace_init(base->tv1.vec + index, head);
55c888d6 1191 while (!list_empty(head)) {
1da177e4
LT
1192 void (*fn)(unsigned long);
1193 unsigned long data;
c5f66e99 1194 bool irqsafe;
1da177e4 1195
b5e61818 1196 timer = list_first_entry(head, struct timer_list,entry);
6819457d
TG
1197 fn = timer->function;
1198 data = timer->data;
c5f66e99 1199 irqsafe = tbase_get_irqsafe(timer->base);
1da177e4 1200
82f67cd9
IM
1201 timer_stats_account_timer(timer);
1202
6f1bc451 1203 base->running_timer = timer;
99d5f3aa 1204 detach_expired_timer(timer, base);
6f2b9b9a 1205
c5f66e99
TH
1206 if (irqsafe) {
1207 spin_unlock(&base->lock);
1208 call_timer_fn(timer, fn, data);
1209 spin_lock(&base->lock);
1210 } else {
1211 spin_unlock_irq(&base->lock);
1212 call_timer_fn(timer, fn, data);
1213 spin_lock_irq(&base->lock);
1214 }
1da177e4
LT
1215 }
1216 }
6f1bc451 1217 base->running_timer = NULL;
3691c519 1218 spin_unlock_irq(&base->lock);
1da177e4
LT
1219}
1220
3451d024 1221#ifdef CONFIG_NO_HZ_COMMON
1da177e4
LT
1222/*
1223 * Find out when the next timer event is due to happen. This
90cba64a
RD
1224 * is used on S/390 to stop all activity when a CPU is idle.
1225 * This function needs to be called with interrupts disabled.
1da177e4 1226 */
a6fa8e5a 1227static unsigned long __next_timer_interrupt(struct tvec_base *base)
1da177e4 1228{
1cfd6849 1229 unsigned long timer_jiffies = base->timer_jiffies;
eaad084b 1230 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1cfd6849 1231 int index, slot, array, found = 0;
1da177e4 1232 struct timer_list *nte;
a6fa8e5a 1233 struct tvec *varray[4];
1da177e4
LT
1234
1235 /* Look for timer events in tv1. */
1cfd6849 1236 index = slot = timer_jiffies & TVR_MASK;
1da177e4 1237 do {
1cfd6849 1238 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
6819457d
TG
1239 if (tbase_get_deferrable(nte->base))
1240 continue;
6e453a67 1241
1cfd6849 1242 found = 1;
1da177e4 1243 expires = nte->expires;
1cfd6849
TG
1244 /* Look at the cascade bucket(s)? */
1245 if (!index || slot < index)
1246 goto cascade;
1247 return expires;
1da177e4 1248 }
1cfd6849
TG
1249 slot = (slot + 1) & TVR_MASK;
1250 } while (slot != index);
1251
1252cascade:
1253 /* Calculate the next cascade event */
1254 if (index)
1255 timer_jiffies += TVR_SIZE - index;
1256 timer_jiffies >>= TVR_BITS;
1da177e4
LT
1257
1258 /* Check tv2-tv5. */
1259 varray[0] = &base->tv2;
1260 varray[1] = &base->tv3;
1261 varray[2] = &base->tv4;
1262 varray[3] = &base->tv5;
1cfd6849
TG
1263
1264 for (array = 0; array < 4; array++) {
a6fa8e5a 1265 struct tvec *varp = varray[array];
1cfd6849
TG
1266
1267 index = slot = timer_jiffies & TVN_MASK;
1da177e4 1268 do {
1cfd6849 1269 list_for_each_entry(nte, varp->vec + slot, entry) {
a0419888
JH
1270 if (tbase_get_deferrable(nte->base))
1271 continue;
1272
1cfd6849 1273 found = 1;
1da177e4
LT
1274 if (time_before(nte->expires, expires))
1275 expires = nte->expires;
1cfd6849
TG
1276 }
1277 /*
1278 * Do we still search for the first timer or are
1279 * we looking up the cascade buckets ?
1280 */
1281 if (found) {
1282 /* Look at the cascade bucket(s)? */
1283 if (!index || slot < index)
1284 break;
1285 return expires;
1286 }
1287 slot = (slot + 1) & TVN_MASK;
1288 } while (slot != index);
1289
1290 if (index)
1291 timer_jiffies += TVN_SIZE - index;
1292 timer_jiffies >>= TVN_BITS;
1da177e4 1293 }
1cfd6849
TG
1294 return expires;
1295}
69239749 1296
1cfd6849
TG
1297/*
1298 * Check, if the next hrtimer event is before the next timer wheel
1299 * event:
1300 */
1301static unsigned long cmp_next_hrtimer_event(unsigned long now,
1302 unsigned long expires)
1303{
1304 ktime_t hr_delta = hrtimer_get_next_event();
1305 struct timespec tsdelta;
9501b6cf 1306 unsigned long delta;
1cfd6849
TG
1307
1308 if (hr_delta.tv64 == KTIME_MAX)
1309 return expires;
0662b713 1310
9501b6cf
TG
1311 /*
1312 * Expired timer available, let it expire in the next tick
1313 */
1314 if (hr_delta.tv64 <= 0)
1315 return now + 1;
69239749 1316
1cfd6849 1317 tsdelta = ktime_to_timespec(hr_delta);
9501b6cf 1318 delta = timespec_to_jiffies(&tsdelta);
eaad084b
TG
1319
1320 /*
1321 * Limit the delta to the max value, which is checked in
1322 * tick_nohz_stop_sched_tick():
1323 */
1324 if (delta > NEXT_TIMER_MAX_DELTA)
1325 delta = NEXT_TIMER_MAX_DELTA;
1326
9501b6cf
TG
1327 /*
1328 * Take rounding errors in to account and make sure, that it
1329 * expires in the next tick. Otherwise we go into an endless
1330 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1331 * the timer softirq
1332 */
1333 if (delta < 1)
1334 delta = 1;
1335 now += delta;
1cfd6849
TG
1336 if (time_before(now, expires))
1337 return now;
1da177e4
LT
1338 return expires;
1339}
1cfd6849
TG
1340
1341/**
8dce39c2 1342 * get_next_timer_interrupt - return the jiffy of the next pending timer
05fb6bf0 1343 * @now: current time (in jiffies)
1cfd6849 1344 */
fd064b9b 1345unsigned long get_next_timer_interrupt(unsigned long now)
1cfd6849 1346{
7496351a 1347 struct tvec_base *base = __this_cpu_read(tvec_bases);
e40468a5 1348 unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1cfd6849 1349
dbd87b5a
HC
1350 /*
1351 * Pretend that there is no timer pending if the cpu is offline.
1352 * Possible pending timers will be migrated later to an active cpu.
1353 */
1354 if (cpu_is_offline(smp_processor_id()))
e40468a5
TG
1355 return expires;
1356
1cfd6849 1357 spin_lock(&base->lock);
e40468a5
TG
1358 if (base->active_timers) {
1359 if (time_before_eq(base->next_timer, base->timer_jiffies))
1360 base->next_timer = __next_timer_interrupt(base);
1361 expires = base->next_timer;
1362 }
1cfd6849
TG
1363 spin_unlock(&base->lock);
1364
1365 if (time_before_eq(expires, now))
1366 return now;
1367
1368 return cmp_next_hrtimer_event(now, expires);
1369}
1da177e4
LT
1370#endif
1371
1da177e4 1372/*
5b4db0c2 1373 * Called from the timer interrupt handler to charge one tick to the current
1da177e4
LT
1374 * process. user_tick is 1 if the tick is user time, 0 for system.
1375 */
1376void update_process_times(int user_tick)
1377{
1378 struct task_struct *p = current;
1379 int cpu = smp_processor_id();
1380
1381 /* Note: this timer irq context must be accounted for as well. */
fa13a5a1 1382 account_process_tick(p, user_tick);
1da177e4 1383 run_local_timers();
a157229c 1384 rcu_check_callbacks(cpu, user_tick);
e360adbe
PZ
1385#ifdef CONFIG_IRQ_WORK
1386 if (in_irq())
1387 irq_work_run();
1388#endif
1da177e4 1389 scheduler_tick();
6819457d 1390 run_posix_cpu_timers(p);
1da177e4
LT
1391}
1392
1da177e4
LT
1393/*
1394 * This function runs timers and the timer-tq in bottom half context.
1395 */
1396static void run_timer_softirq(struct softirq_action *h)
1397{
7496351a 1398 struct tvec_base *base = __this_cpu_read(tvec_bases);
1da177e4 1399
d3d74453 1400 hrtimer_run_pending();
82f67cd9 1401
1da177e4
LT
1402 if (time_after_eq(jiffies, base->timer_jiffies))
1403 __run_timers(base);
1404}
1405
1406/*
1407 * Called by the local, per-CPU timer interrupt on SMP.
1408 */
1409void run_local_timers(void)
1410{
d3d74453 1411 hrtimer_run_queues();
1da177e4
LT
1412 raise_softirq(TIMER_SOFTIRQ);
1413}
1414
1da177e4
LT
1415#ifdef __ARCH_WANT_SYS_ALARM
1416
1417/*
1418 * For backwards compatibility? This can be done in libc so Alpha
1419 * and all newer ports shouldn't need it.
1420 */
58fd3aa2 1421SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1da177e4 1422{
c08b8a49 1423 return alarm_setitimer(seconds);
1da177e4
LT
1424}
1425
1426#endif
1427
1da177e4
LT
1428static void process_timeout(unsigned long __data)
1429{
36c8b586 1430 wake_up_process((struct task_struct *)__data);
1da177e4
LT
1431}
1432
1433/**
1434 * schedule_timeout - sleep until timeout
1435 * @timeout: timeout value in jiffies
1436 *
1437 * Make the current task sleep until @timeout jiffies have
1438 * elapsed. The routine will return immediately unless
1439 * the current task state has been set (see set_current_state()).
1440 *
1441 * You can set the task state as follows -
1442 *
1443 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1444 * pass before the routine returns. The routine will return 0
1445 *
1446 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1447 * delivered to the current task. In this case the remaining time
1448 * in jiffies will be returned, or 0 if the timer expired in time
1449 *
1450 * The current task state is guaranteed to be TASK_RUNNING when this
1451 * routine returns.
1452 *
1453 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1454 * the CPU away without a bound on the timeout. In this case the return
1455 * value will be %MAX_SCHEDULE_TIMEOUT.
1456 *
1457 * In all cases the return value is guaranteed to be non-negative.
1458 */
7ad5b3a5 1459signed long __sched schedule_timeout(signed long timeout)
1da177e4
LT
1460{
1461 struct timer_list timer;
1462 unsigned long expire;
1463
1464 switch (timeout)
1465 {
1466 case MAX_SCHEDULE_TIMEOUT:
1467 /*
1468 * These two special cases are useful to be comfortable
1469 * in the caller. Nothing more. We could take
1470 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1471 * but I' d like to return a valid offset (>=0) to allow
1472 * the caller to do everything it want with the retval.
1473 */
1474 schedule();
1475 goto out;
1476 default:
1477 /*
1478 * Another bit of PARANOID. Note that the retval will be
1479 * 0 since no piece of kernel is supposed to do a check
1480 * for a negative retval of schedule_timeout() (since it
1481 * should never happens anyway). You just have the printk()
1482 * that will tell you if something is gone wrong and where.
1483 */
5b149bcc 1484 if (timeout < 0) {
1da177e4 1485 printk(KERN_ERR "schedule_timeout: wrong timeout "
5b149bcc
AM
1486 "value %lx\n", timeout);
1487 dump_stack();
1da177e4
LT
1488 current->state = TASK_RUNNING;
1489 goto out;
1490 }
1491 }
1492
1493 expire = timeout + jiffies;
1494
c6f3a97f 1495 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
597d0275 1496 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1da177e4
LT
1497 schedule();
1498 del_singleshot_timer_sync(&timer);
1499
c6f3a97f
TG
1500 /* Remove the timer from the object tracker */
1501 destroy_timer_on_stack(&timer);
1502
1da177e4
LT
1503 timeout = expire - jiffies;
1504
1505 out:
1506 return timeout < 0 ? 0 : timeout;
1507}
1da177e4
LT
1508EXPORT_SYMBOL(schedule_timeout);
1509
8a1c1757
AM
1510/*
1511 * We can use __set_current_state() here because schedule_timeout() calls
1512 * schedule() unconditionally.
1513 */
64ed93a2
NA
1514signed long __sched schedule_timeout_interruptible(signed long timeout)
1515{
a5a0d52c
AM
1516 __set_current_state(TASK_INTERRUPTIBLE);
1517 return schedule_timeout(timeout);
64ed93a2
NA
1518}
1519EXPORT_SYMBOL(schedule_timeout_interruptible);
1520
294d5cc2
MW
1521signed long __sched schedule_timeout_killable(signed long timeout)
1522{
1523 __set_current_state(TASK_KILLABLE);
1524 return schedule_timeout(timeout);
1525}
1526EXPORT_SYMBOL(schedule_timeout_killable);
1527
64ed93a2
NA
1528signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1529{
a5a0d52c
AM
1530 __set_current_state(TASK_UNINTERRUPTIBLE);
1531 return schedule_timeout(timeout);
64ed93a2
NA
1532}
1533EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1534
0db0628d 1535static int init_timers_cpu(int cpu)
1da177e4
LT
1536{
1537 int j;
a6fa8e5a 1538 struct tvec_base *base;
0db0628d 1539 static char tvec_base_done[NR_CPUS];
55c888d6 1540
ba6edfcd 1541 if (!tvec_base_done[cpu]) {
a4a6198b
JB
1542 static char boot_done;
1543
a4a6198b 1544 if (boot_done) {
ba6edfcd
AM
1545 /*
1546 * The APs use this path later in boot
1547 */
da554eba
JP
1548 base = kzalloc_node(sizeof(*base), GFP_KERNEL,
1549 cpu_to_node(cpu));
a4a6198b
JB
1550 if (!base)
1551 return -ENOMEM;
6e453a67 1552
38edbb0b
VK
1553 /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
1554 if (WARN_ON(base != tbase_get_base(base))) {
6e453a67
VP
1555 kfree(base);
1556 return -ENOMEM;
1557 }
ba6edfcd 1558 per_cpu(tvec_bases, cpu) = base;
a4a6198b 1559 } else {
ba6edfcd
AM
1560 /*
1561 * This is for the boot CPU - we use compile-time
1562 * static initialisation because per-cpu memory isn't
1563 * ready yet and because the memory allocators are not
1564 * initialised either.
1565 */
a4a6198b 1566 boot_done = 1;
ba6edfcd 1567 base = &boot_tvec_bases;
a4a6198b 1568 }
42a5cf46 1569 spin_lock_init(&base->lock);
ba6edfcd
AM
1570 tvec_base_done[cpu] = 1;
1571 } else {
1572 base = per_cpu(tvec_bases, cpu);
a4a6198b 1573 }
ba6edfcd 1574
d730e882 1575
1da177e4
LT
1576 for (j = 0; j < TVN_SIZE; j++) {
1577 INIT_LIST_HEAD(base->tv5.vec + j);
1578 INIT_LIST_HEAD(base->tv4.vec + j);
1579 INIT_LIST_HEAD(base->tv3.vec + j);
1580 INIT_LIST_HEAD(base->tv2.vec + j);
1581 }
1582 for (j = 0; j < TVR_SIZE; j++)
1583 INIT_LIST_HEAD(base->tv1.vec + j);
1584
1585 base->timer_jiffies = jiffies;
97fd9ed4 1586 base->next_timer = base->timer_jiffies;
99d5f3aa 1587 base->active_timers = 0;
fff42158 1588 base->all_timers = 0;
a4a6198b 1589 return 0;
1da177e4
LT
1590}
1591
1592#ifdef CONFIG_HOTPLUG_CPU
a6fa8e5a 1593static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1da177e4
LT
1594{
1595 struct timer_list *timer;
1596
1597 while (!list_empty(head)) {
b5e61818 1598 timer = list_first_entry(head, struct timer_list, entry);
99d5f3aa 1599 /* We ignore the accounting on the dying cpu */
ec44bc7a 1600 detach_timer(timer, false);
6e453a67 1601 timer_set_base(timer, new_base);
1da177e4 1602 internal_add_timer(new_base, timer);
1da177e4 1603 }
1da177e4
LT
1604}
1605
0db0628d 1606static void migrate_timers(int cpu)
1da177e4 1607{
a6fa8e5a
PM
1608 struct tvec_base *old_base;
1609 struct tvec_base *new_base;
1da177e4
LT
1610 int i;
1611
1612 BUG_ON(cpu_online(cpu));
a4a6198b
JB
1613 old_base = per_cpu(tvec_bases, cpu);
1614 new_base = get_cpu_var(tvec_bases);
d82f0b0f
ON
1615 /*
1616 * The caller is globally serialized and nobody else
1617 * takes two locks at once, deadlock is not possible.
1618 */
1619 spin_lock_irq(&new_base->lock);
0d180406 1620 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
3691c519
ON
1621
1622 BUG_ON(old_base->running_timer);
1da177e4 1623
1da177e4 1624 for (i = 0; i < TVR_SIZE; i++)
55c888d6
ON
1625 migrate_timer_list(new_base, old_base->tv1.vec + i);
1626 for (i = 0; i < TVN_SIZE; i++) {
1627 migrate_timer_list(new_base, old_base->tv2.vec + i);
1628 migrate_timer_list(new_base, old_base->tv3.vec + i);
1629 migrate_timer_list(new_base, old_base->tv4.vec + i);
1630 migrate_timer_list(new_base, old_base->tv5.vec + i);
1631 }
1632
0d180406 1633 spin_unlock(&old_base->lock);
d82f0b0f 1634 spin_unlock_irq(&new_base->lock);
1da177e4 1635 put_cpu_var(tvec_bases);
1da177e4
LT
1636}
1637#endif /* CONFIG_HOTPLUG_CPU */
1638
0db0628d 1639static int timer_cpu_notify(struct notifier_block *self,
1da177e4
LT
1640 unsigned long action, void *hcpu)
1641{
1642 long cpu = (long)hcpu;
80b5184c
AM
1643 int err;
1644
1da177e4
LT
1645 switch(action) {
1646 case CPU_UP_PREPARE:
8bb78442 1647 case CPU_UP_PREPARE_FROZEN:
80b5184c
AM
1648 err = init_timers_cpu(cpu);
1649 if (err < 0)
1650 return notifier_from_errno(err);
1da177e4
LT
1651 break;
1652#ifdef CONFIG_HOTPLUG_CPU
1653 case CPU_DEAD:
8bb78442 1654 case CPU_DEAD_FROZEN:
1da177e4
LT
1655 migrate_timers(cpu);
1656 break;
1657#endif
1658 default:
1659 break;
1660 }
1661 return NOTIFY_OK;
1662}
1663
0db0628d 1664static struct notifier_block timers_nb = {
1da177e4
LT
1665 .notifier_call = timer_cpu_notify,
1666};
1667
1668
1669void __init init_timers(void)
1670{
e52b1db3
TH
1671 int err;
1672
1673 /* ensure there are enough low bits for flags in timer->base pointer */
1674 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
07dccf33 1675
e52b1db3
TH
1676 err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1677 (void *)(long)smp_processor_id());
9e506f7a 1678 BUG_ON(err != NOTIFY_OK);
c24a4a36
VK
1679
1680 init_timer_stats();
1da177e4 1681 register_cpu_notifier(&timers_nb);
962cf36c 1682 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1da177e4
LT
1683}
1684
1da177e4
LT
1685/**
1686 * msleep - sleep safely even with waitqueue interruptions
1687 * @msecs: Time in milliseconds to sleep for
1688 */
1689void msleep(unsigned int msecs)
1690{
1691 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1692
75bcc8c5
NA
1693 while (timeout)
1694 timeout = schedule_timeout_uninterruptible(timeout);
1da177e4
LT
1695}
1696
1697EXPORT_SYMBOL(msleep);
1698
1699/**
96ec3efd 1700 * msleep_interruptible - sleep waiting for signals
1da177e4
LT
1701 * @msecs: Time in milliseconds to sleep for
1702 */
1703unsigned long msleep_interruptible(unsigned int msecs)
1704{
1705 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1706
75bcc8c5
NA
1707 while (timeout && !signal_pending(current))
1708 timeout = schedule_timeout_interruptible(timeout);
1da177e4
LT
1709 return jiffies_to_msecs(timeout);
1710}
1711
1712EXPORT_SYMBOL(msleep_interruptible);
5e7f5a17
PP
1713
1714static int __sched do_usleep_range(unsigned long min, unsigned long max)
1715{
1716 ktime_t kmin;
1717 unsigned long delta;
1718
1719 kmin = ktime_set(0, min * NSEC_PER_USEC);
1720 delta = (max - min) * NSEC_PER_USEC;
1721 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1722}
1723
1724/**
1725 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1726 * @min: Minimum time in usecs to sleep
1727 * @max: Maximum time in usecs to sleep
1728 */
1729void usleep_range(unsigned long min, unsigned long max)
1730{
1731 __set_current_state(TASK_UNINTERRUPTIBLE);
1732 do_usleep_range(min, max);
1733}
1734EXPORT_SYMBOL(usleep_range);