]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4a22f166 | 4 | * Kernel internal timers |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
9984de1a | 23 | #include <linux/export.h> |
1da177e4 LT |
24 | #include <linux/interrupt.h> |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
b488893a | 29 | #include <linux/pid_namespace.h> |
1da177e4 LT |
30 | #include <linux/notifier.h> |
31 | #include <linux/thread_info.h> | |
32 | #include <linux/time.h> | |
33 | #include <linux/jiffies.h> | |
34 | #include <linux/posix-timers.h> | |
35 | #include <linux/cpu.h> | |
36 | #include <linux/syscalls.h> | |
97a41e26 | 37 | #include <linux/delay.h> |
79bf2bb3 | 38 | #include <linux/tick.h> |
82f67cd9 | 39 | #include <linux/kallsyms.h> |
e360adbe | 40 | #include <linux/irq_work.h> |
eea08f32 | 41 | #include <linux/sched.h> |
cf4aebc2 | 42 | #include <linux/sched/sysctl.h> |
5a0e3ad6 | 43 | #include <linux/slab.h> |
1a0df594 | 44 | #include <linux/compat.h> |
1da177e4 LT |
45 | |
46 | #include <asm/uaccess.h> | |
47 | #include <asm/unistd.h> | |
48 | #include <asm/div64.h> | |
49 | #include <asm/timex.h> | |
50 | #include <asm/io.h> | |
51 | ||
2b022e3d XG |
52 | #define CREATE_TRACE_POINTS |
53 | #include <trace/events/timer.h> | |
54 | ||
40747ffa | 55 | __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
ecea8d19 TG |
56 | |
57 | EXPORT_SYMBOL(jiffies_64); | |
58 | ||
1da177e4 LT |
59 | /* |
60 | * per-CPU timer vector definitions: | |
61 | */ | |
1da177e4 LT |
62 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
63 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
64 | #define TVN_SIZE (1 << TVN_BITS) | |
65 | #define TVR_SIZE (1 << TVR_BITS) | |
66 | #define TVN_MASK (TVN_SIZE - 1) | |
67 | #define TVR_MASK (TVR_SIZE - 1) | |
26cff4e2 | 68 | #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1)) |
1da177e4 | 69 | |
a6fa8e5a | 70 | struct tvec { |
1da177e4 | 71 | struct list_head vec[TVN_SIZE]; |
a6fa8e5a | 72 | }; |
1da177e4 | 73 | |
a6fa8e5a | 74 | struct tvec_root { |
1da177e4 | 75 | struct list_head vec[TVR_SIZE]; |
a6fa8e5a | 76 | }; |
1da177e4 | 77 | |
a6fa8e5a | 78 | struct tvec_base { |
3691c519 ON |
79 | spinlock_t lock; |
80 | struct timer_list *running_timer; | |
1da177e4 | 81 | unsigned long timer_jiffies; |
97fd9ed4 | 82 | unsigned long next_timer; |
99d5f3aa | 83 | unsigned long active_timers; |
fff42158 | 84 | unsigned long all_timers; |
d6f93829 | 85 | int cpu; |
a6fa8e5a PM |
86 | struct tvec_root tv1; |
87 | struct tvec tv2; | |
88 | struct tvec tv3; | |
89 | struct tvec tv4; | |
90 | struct tvec tv5; | |
6e453a67 | 91 | } ____cacheline_aligned; |
1da177e4 | 92 | |
b337a938 PZ |
93 | /* |
94 | * __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've | |
95 | * made NULL special, hint: lock_timer_base()) and we cannot get a compile time | |
96 | * pointer to per-cpu entries because we don't know where we'll map the section, | |
97 | * even for the boot cpu. | |
98 | * | |
99 | * And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the | |
100 | * rest of them. | |
101 | */ | |
a6fa8e5a | 102 | struct tvec_base boot_tvec_bases; |
3691c519 | 103 | EXPORT_SYMBOL(boot_tvec_bases); |
b337a938 | 104 | |
a6fa8e5a | 105 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 106 | |
6e453a67 | 107 | /* Functions below help us manage 'deferrable' flag */ |
a6fa8e5a | 108 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) |
6e453a67 | 109 | { |
e52b1db3 | 110 | return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE); |
6e453a67 VP |
111 | } |
112 | ||
c5f66e99 TH |
113 | static inline unsigned int tbase_get_irqsafe(struct tvec_base *base) |
114 | { | |
115 | return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE); | |
116 | } | |
117 | ||
a6fa8e5a | 118 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) |
6e453a67 | 119 | { |
e52b1db3 | 120 | return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK)); |
6e453a67 VP |
121 | } |
122 | ||
6e453a67 | 123 | static inline void |
a6fa8e5a | 124 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) |
6e453a67 | 125 | { |
e52b1db3 TH |
126 | unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK; |
127 | ||
128 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags); | |
6e453a67 VP |
129 | } |
130 | ||
9c133c46 AS |
131 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
132 | bool force_up) | |
4c36a5de AV |
133 | { |
134 | int rem; | |
135 | unsigned long original = j; | |
136 | ||
137 | /* | |
138 | * We don't want all cpus firing their timers at once hitting the | |
139 | * same lock or cachelines, so we skew each extra cpu with an extra | |
140 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
141 | * already did this. | |
142 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
143 | * extra offset again. | |
144 | */ | |
145 | j += cpu * 3; | |
146 | ||
147 | rem = j % HZ; | |
148 | ||
149 | /* | |
150 | * If the target jiffie is just after a whole second (which can happen | |
151 | * due to delays of the timer irq, long irq off times etc etc) then | |
152 | * we should round down to the whole second, not up. Use 1/4th second | |
153 | * as cutoff for this rounding as an extreme upper bound for this. | |
9c133c46 | 154 | * But never round down if @force_up is set. |
4c36a5de | 155 | */ |
9c133c46 | 156 | if (rem < HZ/4 && !force_up) /* round down */ |
4c36a5de AV |
157 | j = j - rem; |
158 | else /* round up */ | |
159 | j = j - rem + HZ; | |
160 | ||
161 | /* now that we have rounded, subtract the extra skew again */ | |
162 | j -= cpu * 3; | |
163 | ||
9e04d380 BVA |
164 | /* |
165 | * Make sure j is still in the future. Otherwise return the | |
166 | * unmodified value. | |
167 | */ | |
168 | return time_is_after_jiffies(j) ? j : original; | |
4c36a5de | 169 | } |
9c133c46 AS |
170 | |
171 | /** | |
172 | * __round_jiffies - function to round jiffies to a full second | |
173 | * @j: the time in (absolute) jiffies that should be rounded | |
174 | * @cpu: the processor number on which the timeout will happen | |
175 | * | |
176 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | |
177 | * up or down to (approximately) full seconds. This is useful for timers | |
178 | * for which the exact time they fire does not matter too much, as long as | |
179 | * they fire approximately every X seconds. | |
180 | * | |
181 | * By rounding these timers to whole seconds, all such timers will fire | |
182 | * at the same time, rather than at various times spread out. The goal | |
183 | * of this is to have the CPU wake up less, which saves power. | |
184 | * | |
185 | * The exact rounding is skewed for each processor to avoid all | |
186 | * processors firing at the exact same time, which could lead | |
187 | * to lock contention or spurious cache line bouncing. | |
188 | * | |
189 | * The return value is the rounded version of the @j parameter. | |
190 | */ | |
191 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
192 | { | |
193 | return round_jiffies_common(j, cpu, false); | |
194 | } | |
4c36a5de AV |
195 | EXPORT_SYMBOL_GPL(__round_jiffies); |
196 | ||
197 | /** | |
198 | * __round_jiffies_relative - function to round jiffies to a full second | |
199 | * @j: the time in (relative) jiffies that should be rounded | |
200 | * @cpu: the processor number on which the timeout will happen | |
201 | * | |
72fd4a35 | 202 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
203 | * up or down to (approximately) full seconds. This is useful for timers |
204 | * for which the exact time they fire does not matter too much, as long as | |
205 | * they fire approximately every X seconds. | |
206 | * | |
207 | * By rounding these timers to whole seconds, all such timers will fire | |
208 | * at the same time, rather than at various times spread out. The goal | |
209 | * of this is to have the CPU wake up less, which saves power. | |
210 | * | |
211 | * The exact rounding is skewed for each processor to avoid all | |
212 | * processors firing at the exact same time, which could lead | |
213 | * to lock contention or spurious cache line bouncing. | |
214 | * | |
72fd4a35 | 215 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
216 | */ |
217 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
218 | { | |
9c133c46 AS |
219 | unsigned long j0 = jiffies; |
220 | ||
221 | /* Use j0 because jiffies might change while we run */ | |
222 | return round_jiffies_common(j + j0, cpu, false) - j0; | |
4c36a5de AV |
223 | } |
224 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
225 | ||
226 | /** | |
227 | * round_jiffies - function to round jiffies to a full second | |
228 | * @j: the time in (absolute) jiffies that should be rounded | |
229 | * | |
72fd4a35 | 230 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
231 | * up or down to (approximately) full seconds. This is useful for timers |
232 | * for which the exact time they fire does not matter too much, as long as | |
233 | * they fire approximately every X seconds. | |
234 | * | |
235 | * By rounding these timers to whole seconds, all such timers will fire | |
236 | * at the same time, rather than at various times spread out. The goal | |
237 | * of this is to have the CPU wake up less, which saves power. | |
238 | * | |
72fd4a35 | 239 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
240 | */ |
241 | unsigned long round_jiffies(unsigned long j) | |
242 | { | |
9c133c46 | 243 | return round_jiffies_common(j, raw_smp_processor_id(), false); |
4c36a5de AV |
244 | } |
245 | EXPORT_SYMBOL_GPL(round_jiffies); | |
246 | ||
247 | /** | |
248 | * round_jiffies_relative - function to round jiffies to a full second | |
249 | * @j: the time in (relative) jiffies that should be rounded | |
250 | * | |
72fd4a35 | 251 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
252 | * up or down to (approximately) full seconds. This is useful for timers |
253 | * for which the exact time they fire does not matter too much, as long as | |
254 | * they fire approximately every X seconds. | |
255 | * | |
256 | * By rounding these timers to whole seconds, all such timers will fire | |
257 | * at the same time, rather than at various times spread out. The goal | |
258 | * of this is to have the CPU wake up less, which saves power. | |
259 | * | |
72fd4a35 | 260 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
261 | */ |
262 | unsigned long round_jiffies_relative(unsigned long j) | |
263 | { | |
264 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
265 | } | |
266 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
267 | ||
9c133c46 AS |
268 | /** |
269 | * __round_jiffies_up - function to round jiffies up to a full second | |
270 | * @j: the time in (absolute) jiffies that should be rounded | |
271 | * @cpu: the processor number on which the timeout will happen | |
272 | * | |
273 | * This is the same as __round_jiffies() except that it will never | |
274 | * round down. This is useful for timeouts for which the exact time | |
275 | * of firing does not matter too much, as long as they don't fire too | |
276 | * early. | |
277 | */ | |
278 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | |
279 | { | |
280 | return round_jiffies_common(j, cpu, true); | |
281 | } | |
282 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | |
283 | ||
284 | /** | |
285 | * __round_jiffies_up_relative - function to round jiffies up to a full second | |
286 | * @j: the time in (relative) jiffies that should be rounded | |
287 | * @cpu: the processor number on which the timeout will happen | |
288 | * | |
289 | * This is the same as __round_jiffies_relative() except that it will never | |
290 | * round down. This is useful for timeouts for which the exact time | |
291 | * of firing does not matter too much, as long as they don't fire too | |
292 | * early. | |
293 | */ | |
294 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | |
295 | { | |
296 | unsigned long j0 = jiffies; | |
297 | ||
298 | /* Use j0 because jiffies might change while we run */ | |
299 | return round_jiffies_common(j + j0, cpu, true) - j0; | |
300 | } | |
301 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | |
302 | ||
303 | /** | |
304 | * round_jiffies_up - function to round jiffies up to a full second | |
305 | * @j: the time in (absolute) jiffies that should be rounded | |
306 | * | |
307 | * This is the same as round_jiffies() except that it will never | |
308 | * round down. This is useful for timeouts for which the exact time | |
309 | * of firing does not matter too much, as long as they don't fire too | |
310 | * early. | |
311 | */ | |
312 | unsigned long round_jiffies_up(unsigned long j) | |
313 | { | |
314 | return round_jiffies_common(j, raw_smp_processor_id(), true); | |
315 | } | |
316 | EXPORT_SYMBOL_GPL(round_jiffies_up); | |
317 | ||
318 | /** | |
319 | * round_jiffies_up_relative - function to round jiffies up to a full second | |
320 | * @j: the time in (relative) jiffies that should be rounded | |
321 | * | |
322 | * This is the same as round_jiffies_relative() except that it will never | |
323 | * round down. This is useful for timeouts for which the exact time | |
324 | * of firing does not matter too much, as long as they don't fire too | |
325 | * early. | |
326 | */ | |
327 | unsigned long round_jiffies_up_relative(unsigned long j) | |
328 | { | |
329 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | |
330 | } | |
331 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | |
332 | ||
3bbb9ec9 AV |
333 | /** |
334 | * set_timer_slack - set the allowed slack for a timer | |
0caa6210 | 335 | * @timer: the timer to be modified |
3bbb9ec9 AV |
336 | * @slack_hz: the amount of time (in jiffies) allowed for rounding |
337 | * | |
338 | * Set the amount of time, in jiffies, that a certain timer has | |
339 | * in terms of slack. By setting this value, the timer subsystem | |
340 | * will schedule the actual timer somewhere between | |
341 | * the time mod_timer() asks for, and that time plus the slack. | |
342 | * | |
343 | * By setting the slack to -1, a percentage of the delay is used | |
344 | * instead. | |
345 | */ | |
346 | void set_timer_slack(struct timer_list *timer, int slack_hz) | |
347 | { | |
348 | timer->slack = slack_hz; | |
349 | } | |
350 | EXPORT_SYMBOL_GPL(set_timer_slack); | |
351 | ||
d550e81d PM |
352 | /* |
353 | * If the list is empty, catch up ->timer_jiffies to the current time. | |
354 | * The caller must hold the tvec_base lock. Returns true if the list | |
355 | * was empty and therefore ->timer_jiffies was updated. | |
356 | */ | |
357 | static bool catchup_timer_jiffies(struct tvec_base *base) | |
358 | { | |
359 | if (!base->all_timers) { | |
360 | base->timer_jiffies = jiffies; | |
361 | return true; | |
362 | } | |
363 | return false; | |
364 | } | |
365 | ||
facbb4a7 TG |
366 | static void |
367 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |
1da177e4 LT |
368 | { |
369 | unsigned long expires = timer->expires; | |
370 | unsigned long idx = expires - base->timer_jiffies; | |
371 | struct list_head *vec; | |
372 | ||
373 | if (idx < TVR_SIZE) { | |
374 | int i = expires & TVR_MASK; | |
375 | vec = base->tv1.vec + i; | |
376 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
377 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
378 | vec = base->tv2.vec + i; | |
379 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
380 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
381 | vec = base->tv3.vec + i; | |
382 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
383 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
384 | vec = base->tv4.vec + i; | |
385 | } else if ((signed long) idx < 0) { | |
386 | /* | |
387 | * Can happen if you add a timer with expires == jiffies, | |
388 | * or you set a timer to go off in the past | |
389 | */ | |
390 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
391 | } else { | |
392 | int i; | |
26cff4e2 HC |
393 | /* If the timeout is larger than MAX_TVAL (on 64-bit |
394 | * architectures or with CONFIG_BASE_SMALL=1) then we | |
395 | * use the maximum timeout. | |
1da177e4 | 396 | */ |
26cff4e2 HC |
397 | if (idx > MAX_TVAL) { |
398 | idx = MAX_TVAL; | |
1da177e4 LT |
399 | expires = idx + base->timer_jiffies; |
400 | } | |
401 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
402 | vec = base->tv5.vec + i; | |
403 | } | |
404 | /* | |
405 | * Timers are FIFO: | |
406 | */ | |
407 | list_add_tail(&timer->entry, vec); | |
408 | } | |
409 | ||
facbb4a7 TG |
410 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
411 | { | |
18d8cb64 | 412 | (void)catchup_timer_jiffies(base); |
facbb4a7 TG |
413 | __internal_add_timer(base, timer); |
414 | /* | |
99d5f3aa | 415 | * Update base->active_timers and base->next_timer |
facbb4a7 | 416 | */ |
99d5f3aa | 417 | if (!tbase_get_deferrable(timer->base)) { |
aea369b9 ON |
418 | if (!base->active_timers++ || |
419 | time_before(timer->expires, base->next_timer)) | |
99d5f3aa | 420 | base->next_timer = timer->expires; |
99d5f3aa | 421 | } |
fff42158 | 422 | base->all_timers++; |
9f6d9baa VK |
423 | |
424 | /* | |
425 | * Check whether the other CPU is in dynticks mode and needs | |
426 | * to be triggered to reevaluate the timer wheel. | |
427 | * We are protected against the other CPU fiddling | |
428 | * with the timer by holding the timer base lock. This also | |
429 | * makes sure that a CPU on the way to stop its tick can not | |
430 | * evaluate the timer wheel. | |
431 | * | |
432 | * Spare the IPI for deferrable timers on idle targets though. | |
433 | * The next busy ticks will take care of it. Except full dynticks | |
434 | * require special care against races with idle_cpu(), lets deal | |
435 | * with that later. | |
436 | */ | |
437 | if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu)) | |
438 | wake_up_nohz_cpu(base->cpu); | |
facbb4a7 TG |
439 | } |
440 | ||
82f67cd9 IM |
441 | #ifdef CONFIG_TIMER_STATS |
442 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |
443 | { | |
444 | if (timer->start_site) | |
445 | return; | |
446 | ||
447 | timer->start_site = addr; | |
448 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
449 | timer->start_pid = current->pid; | |
450 | } | |
c5c061b8 VP |
451 | |
452 | static void timer_stats_account_timer(struct timer_list *timer) | |
453 | { | |
454 | unsigned int flag = 0; | |
455 | ||
507e1231 HC |
456 | if (likely(!timer->start_site)) |
457 | return; | |
c5c061b8 VP |
458 | if (unlikely(tbase_get_deferrable(timer->base))) |
459 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | |
460 | ||
461 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | |
462 | timer->function, timer->start_comm, flag); | |
463 | } | |
464 | ||
465 | #else | |
466 | static void timer_stats_account_timer(struct timer_list *timer) {} | |
82f67cd9 IM |
467 | #endif |
468 | ||
c6f3a97f TG |
469 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
470 | ||
471 | static struct debug_obj_descr timer_debug_descr; | |
472 | ||
99777288 SG |
473 | static void *timer_debug_hint(void *addr) |
474 | { | |
475 | return ((struct timer_list *) addr)->function; | |
476 | } | |
477 | ||
c6f3a97f TG |
478 | /* |
479 | * fixup_init is called when: | |
480 | * - an active object is initialized | |
55c888d6 | 481 | */ |
c6f3a97f TG |
482 | static int timer_fixup_init(void *addr, enum debug_obj_state state) |
483 | { | |
484 | struct timer_list *timer = addr; | |
485 | ||
486 | switch (state) { | |
487 | case ODEBUG_STATE_ACTIVE: | |
488 | del_timer_sync(timer); | |
489 | debug_object_init(timer, &timer_debug_descr); | |
490 | return 1; | |
491 | default: | |
492 | return 0; | |
493 | } | |
494 | } | |
495 | ||
fb16b8cf SB |
496 | /* Stub timer callback for improperly used timers. */ |
497 | static void stub_timer(unsigned long data) | |
498 | { | |
499 | WARN_ON(1); | |
500 | } | |
501 | ||
c6f3a97f TG |
502 | /* |
503 | * fixup_activate is called when: | |
504 | * - an active object is activated | |
505 | * - an unknown object is activated (might be a statically initialized object) | |
506 | */ | |
507 | static int timer_fixup_activate(void *addr, enum debug_obj_state state) | |
508 | { | |
509 | struct timer_list *timer = addr; | |
510 | ||
511 | switch (state) { | |
512 | ||
513 | case ODEBUG_STATE_NOTAVAILABLE: | |
514 | /* | |
515 | * This is not really a fixup. The timer was | |
516 | * statically initialized. We just make sure that it | |
517 | * is tracked in the object tracker. | |
518 | */ | |
519 | if (timer->entry.next == NULL && | |
520 | timer->entry.prev == TIMER_ENTRY_STATIC) { | |
521 | debug_object_init(timer, &timer_debug_descr); | |
522 | debug_object_activate(timer, &timer_debug_descr); | |
523 | return 0; | |
524 | } else { | |
fb16b8cf SB |
525 | setup_timer(timer, stub_timer, 0); |
526 | return 1; | |
c6f3a97f TG |
527 | } |
528 | return 0; | |
529 | ||
530 | case ODEBUG_STATE_ACTIVE: | |
531 | WARN_ON(1); | |
532 | ||
533 | default: | |
534 | return 0; | |
535 | } | |
536 | } | |
537 | ||
538 | /* | |
539 | * fixup_free is called when: | |
540 | * - an active object is freed | |
541 | */ | |
542 | static int timer_fixup_free(void *addr, enum debug_obj_state state) | |
543 | { | |
544 | struct timer_list *timer = addr; | |
545 | ||
546 | switch (state) { | |
547 | case ODEBUG_STATE_ACTIVE: | |
548 | del_timer_sync(timer); | |
549 | debug_object_free(timer, &timer_debug_descr); | |
550 | return 1; | |
551 | default: | |
552 | return 0; | |
553 | } | |
554 | } | |
555 | ||
dc4218bd CC |
556 | /* |
557 | * fixup_assert_init is called when: | |
558 | * - an untracked/uninit-ed object is found | |
559 | */ | |
560 | static int timer_fixup_assert_init(void *addr, enum debug_obj_state state) | |
561 | { | |
562 | struct timer_list *timer = addr; | |
563 | ||
564 | switch (state) { | |
565 | case ODEBUG_STATE_NOTAVAILABLE: | |
566 | if (timer->entry.prev == TIMER_ENTRY_STATIC) { | |
567 | /* | |
568 | * This is not really a fixup. The timer was | |
569 | * statically initialized. We just make sure that it | |
570 | * is tracked in the object tracker. | |
571 | */ | |
572 | debug_object_init(timer, &timer_debug_descr); | |
573 | return 0; | |
574 | } else { | |
575 | setup_timer(timer, stub_timer, 0); | |
576 | return 1; | |
577 | } | |
578 | default: | |
579 | return 0; | |
580 | } | |
581 | } | |
582 | ||
c6f3a97f | 583 | static struct debug_obj_descr timer_debug_descr = { |
dc4218bd CC |
584 | .name = "timer_list", |
585 | .debug_hint = timer_debug_hint, | |
586 | .fixup_init = timer_fixup_init, | |
587 | .fixup_activate = timer_fixup_activate, | |
588 | .fixup_free = timer_fixup_free, | |
589 | .fixup_assert_init = timer_fixup_assert_init, | |
c6f3a97f TG |
590 | }; |
591 | ||
592 | static inline void debug_timer_init(struct timer_list *timer) | |
593 | { | |
594 | debug_object_init(timer, &timer_debug_descr); | |
595 | } | |
596 | ||
597 | static inline void debug_timer_activate(struct timer_list *timer) | |
598 | { | |
599 | debug_object_activate(timer, &timer_debug_descr); | |
600 | } | |
601 | ||
602 | static inline void debug_timer_deactivate(struct timer_list *timer) | |
603 | { | |
604 | debug_object_deactivate(timer, &timer_debug_descr); | |
605 | } | |
606 | ||
607 | static inline void debug_timer_free(struct timer_list *timer) | |
608 | { | |
609 | debug_object_free(timer, &timer_debug_descr); | |
610 | } | |
611 | ||
dc4218bd CC |
612 | static inline void debug_timer_assert_init(struct timer_list *timer) |
613 | { | |
614 | debug_object_assert_init(timer, &timer_debug_descr); | |
615 | } | |
616 | ||
fc683995 TH |
617 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
618 | const char *name, struct lock_class_key *key); | |
c6f3a97f | 619 | |
fc683995 TH |
620 | void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags, |
621 | const char *name, struct lock_class_key *key) | |
c6f3a97f TG |
622 | { |
623 | debug_object_init_on_stack(timer, &timer_debug_descr); | |
fc683995 | 624 | do_init_timer(timer, flags, name, key); |
c6f3a97f | 625 | } |
6f2b9b9a | 626 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
c6f3a97f TG |
627 | |
628 | void destroy_timer_on_stack(struct timer_list *timer) | |
629 | { | |
630 | debug_object_free(timer, &timer_debug_descr); | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); | |
633 | ||
634 | #else | |
635 | static inline void debug_timer_init(struct timer_list *timer) { } | |
636 | static inline void debug_timer_activate(struct timer_list *timer) { } | |
637 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | |
dc4218bd | 638 | static inline void debug_timer_assert_init(struct timer_list *timer) { } |
c6f3a97f TG |
639 | #endif |
640 | ||
2b022e3d XG |
641 | static inline void debug_init(struct timer_list *timer) |
642 | { | |
643 | debug_timer_init(timer); | |
644 | trace_timer_init(timer); | |
645 | } | |
646 | ||
647 | static inline void | |
648 | debug_activate(struct timer_list *timer, unsigned long expires) | |
649 | { | |
650 | debug_timer_activate(timer); | |
651 | trace_timer_start(timer, expires); | |
652 | } | |
653 | ||
654 | static inline void debug_deactivate(struct timer_list *timer) | |
655 | { | |
656 | debug_timer_deactivate(timer); | |
657 | trace_timer_cancel(timer); | |
658 | } | |
659 | ||
dc4218bd CC |
660 | static inline void debug_assert_init(struct timer_list *timer) |
661 | { | |
662 | debug_timer_assert_init(timer); | |
663 | } | |
664 | ||
fc683995 TH |
665 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
666 | const char *name, struct lock_class_key *key) | |
55c888d6 | 667 | { |
22127e93 | 668 | struct tvec_base *base = raw_cpu_read(tvec_bases); |
fc683995 | 669 | |
55c888d6 | 670 | timer->entry.next = NULL; |
fc683995 | 671 | timer->base = (void *)((unsigned long)base | flags); |
3bbb9ec9 | 672 | timer->slack = -1; |
82f67cd9 IM |
673 | #ifdef CONFIG_TIMER_STATS |
674 | timer->start_site = NULL; | |
675 | timer->start_pid = -1; | |
676 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
677 | #endif | |
6f2b9b9a | 678 | lockdep_init_map(&timer->lockdep_map, name, key, 0); |
55c888d6 | 679 | } |
c6f3a97f TG |
680 | |
681 | /** | |
633fe795 | 682 | * init_timer_key - initialize a timer |
c6f3a97f | 683 | * @timer: the timer to be initialized |
fc683995 | 684 | * @flags: timer flags |
633fe795 RD |
685 | * @name: name of the timer |
686 | * @key: lockdep class key of the fake lock used for tracking timer | |
687 | * sync lock dependencies | |
c6f3a97f | 688 | * |
633fe795 | 689 | * init_timer_key() must be done to a timer prior calling *any* of the |
c6f3a97f TG |
690 | * other timer functions. |
691 | */ | |
fc683995 TH |
692 | void init_timer_key(struct timer_list *timer, unsigned int flags, |
693 | const char *name, struct lock_class_key *key) | |
c6f3a97f | 694 | { |
2b022e3d | 695 | debug_init(timer); |
fc683995 | 696 | do_init_timer(timer, flags, name, key); |
c6f3a97f | 697 | } |
6f2b9b9a | 698 | EXPORT_SYMBOL(init_timer_key); |
55c888d6 | 699 | |
ec44bc7a | 700 | static inline void detach_timer(struct timer_list *timer, bool clear_pending) |
55c888d6 ON |
701 | { |
702 | struct list_head *entry = &timer->entry; | |
703 | ||
2b022e3d | 704 | debug_deactivate(timer); |
c6f3a97f | 705 | |
55c888d6 ON |
706 | __list_del(entry->prev, entry->next); |
707 | if (clear_pending) | |
708 | entry->next = NULL; | |
709 | entry->prev = LIST_POISON2; | |
710 | } | |
711 | ||
99d5f3aa TG |
712 | static inline void |
713 | detach_expired_timer(struct timer_list *timer, struct tvec_base *base) | |
714 | { | |
715 | detach_timer(timer, true); | |
716 | if (!tbase_get_deferrable(timer->base)) | |
e52b1db3 | 717 | base->active_timers--; |
fff42158 | 718 | base->all_timers--; |
16d937f8 | 719 | (void)catchup_timer_jiffies(base); |
99d5f3aa TG |
720 | } |
721 | ||
ec44bc7a TG |
722 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, |
723 | bool clear_pending) | |
724 | { | |
725 | if (!timer_pending(timer)) | |
726 | return 0; | |
727 | ||
728 | detach_timer(timer, clear_pending); | |
99d5f3aa | 729 | if (!tbase_get_deferrable(timer->base)) { |
e52b1db3 | 730 | base->active_timers--; |
99d5f3aa TG |
731 | if (timer->expires == base->next_timer) |
732 | base->next_timer = base->timer_jiffies; | |
733 | } | |
fff42158 | 734 | base->all_timers--; |
16d937f8 | 735 | (void)catchup_timer_jiffies(base); |
ec44bc7a TG |
736 | return 1; |
737 | } | |
738 | ||
55c888d6 | 739 | /* |
3691c519 | 740 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
741 | * means that all timers which are tied to this base via timer->base are |
742 | * locked, and the base itself is locked too. | |
743 | * | |
744 | * So __run_timers/migrate_timers can safely modify all timers which could | |
745 | * be found on ->tvX lists. | |
746 | * | |
747 | * When the timer's base is locked, and the timer removed from list, it is | |
748 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
749 | * locked. | |
750 | */ | |
a6fa8e5a | 751 | static struct tvec_base *lock_timer_base(struct timer_list *timer, |
55c888d6 | 752 | unsigned long *flags) |
89e7e374 | 753 | __acquires(timer->base->lock) |
55c888d6 | 754 | { |
a6fa8e5a | 755 | struct tvec_base *base; |
55c888d6 ON |
756 | |
757 | for (;;) { | |
a6fa8e5a | 758 | struct tvec_base *prelock_base = timer->base; |
6e453a67 | 759 | base = tbase_get_base(prelock_base); |
55c888d6 ON |
760 | if (likely(base != NULL)) { |
761 | spin_lock_irqsave(&base->lock, *flags); | |
6e453a67 | 762 | if (likely(prelock_base == timer->base)) |
55c888d6 ON |
763 | return base; |
764 | /* The timer has migrated to another CPU */ | |
765 | spin_unlock_irqrestore(&base->lock, *flags); | |
766 | } | |
767 | cpu_relax(); | |
768 | } | |
769 | } | |
770 | ||
74019224 | 771 | static inline int |
597d0275 AB |
772 | __mod_timer(struct timer_list *timer, unsigned long expires, |
773 | bool pending_only, int pinned) | |
1da177e4 | 774 | { |
a6fa8e5a | 775 | struct tvec_base *base, *new_base; |
1da177e4 | 776 | unsigned long flags; |
eea08f32 | 777 | int ret = 0 , cpu; |
1da177e4 | 778 | |
82f67cd9 | 779 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 780 | BUG_ON(!timer->function); |
1da177e4 | 781 | |
55c888d6 ON |
782 | base = lock_timer_base(timer, &flags); |
783 | ||
ec44bc7a TG |
784 | ret = detach_if_pending(timer, base, false); |
785 | if (!ret && pending_only) | |
786 | goto out_unlock; | |
55c888d6 | 787 | |
2b022e3d | 788 | debug_activate(timer, expires); |
c6f3a97f | 789 | |
6201b4d6 | 790 | cpu = get_nohz_timer_target(pinned); |
eea08f32 AB |
791 | new_base = per_cpu(tvec_bases, cpu); |
792 | ||
3691c519 | 793 | if (base != new_base) { |
1da177e4 | 794 | /* |
55c888d6 ON |
795 | * We are trying to schedule the timer on the local CPU. |
796 | * However we can't change timer's base while it is running, | |
797 | * otherwise del_timer_sync() can't detect that the timer's | |
798 | * handler yet has not finished. This also guarantees that | |
799 | * the timer is serialized wrt itself. | |
1da177e4 | 800 | */ |
a2c348fe | 801 | if (likely(base->running_timer != timer)) { |
55c888d6 | 802 | /* See the comment in lock_timer_base() */ |
6e453a67 | 803 | timer_set_base(timer, NULL); |
55c888d6 | 804 | spin_unlock(&base->lock); |
a2c348fe ON |
805 | base = new_base; |
806 | spin_lock(&base->lock); | |
6e453a67 | 807 | timer_set_base(timer, base); |
1da177e4 LT |
808 | } |
809 | } | |
810 | ||
1da177e4 | 811 | timer->expires = expires; |
a2c348fe | 812 | internal_add_timer(base, timer); |
74019224 IM |
813 | |
814 | out_unlock: | |
a2c348fe | 815 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
816 | |
817 | return ret; | |
818 | } | |
819 | ||
2aae4a10 | 820 | /** |
74019224 IM |
821 | * mod_timer_pending - modify a pending timer's timeout |
822 | * @timer: the pending timer to be modified | |
823 | * @expires: new timeout in jiffies | |
1da177e4 | 824 | * |
74019224 IM |
825 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
826 | * but will not re-activate and modify already deleted timers. | |
827 | * | |
828 | * It is useful for unserialized use of timers. | |
1da177e4 | 829 | */ |
74019224 | 830 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
1da177e4 | 831 | { |
597d0275 | 832 | return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); |
1da177e4 | 833 | } |
74019224 | 834 | EXPORT_SYMBOL(mod_timer_pending); |
1da177e4 | 835 | |
3bbb9ec9 AV |
836 | /* |
837 | * Decide where to put the timer while taking the slack into account | |
838 | * | |
839 | * Algorithm: | |
840 | * 1) calculate the maximum (absolute) time | |
841 | * 2) calculate the highest bit where the expires and new max are different | |
842 | * 3) use this bit to make a mask | |
843 | * 4) use the bitmask to round down the maximum time, so that all last | |
844 | * bits are zeros | |
845 | */ | |
846 | static inline | |
847 | unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | |
848 | { | |
849 | unsigned long expires_limit, mask; | |
850 | int bit; | |
851 | ||
8e63d779 | 852 | if (timer->slack >= 0) { |
f00e047e | 853 | expires_limit = expires + timer->slack; |
8e63d779 | 854 | } else { |
1c3cc116 SAS |
855 | long delta = expires - jiffies; |
856 | ||
857 | if (delta < 256) | |
858 | return expires; | |
3bbb9ec9 | 859 | |
1c3cc116 | 860 | expires_limit = expires + delta / 256; |
8e63d779 | 861 | } |
3bbb9ec9 | 862 | mask = expires ^ expires_limit; |
3bbb9ec9 AV |
863 | if (mask == 0) |
864 | return expires; | |
865 | ||
866 | bit = find_last_bit(&mask, BITS_PER_LONG); | |
867 | ||
98a01e77 | 868 | mask = (1UL << bit) - 1; |
3bbb9ec9 AV |
869 | |
870 | expires_limit = expires_limit & ~(mask); | |
871 | ||
872 | return expires_limit; | |
873 | } | |
874 | ||
2aae4a10 | 875 | /** |
1da177e4 LT |
876 | * mod_timer - modify a timer's timeout |
877 | * @timer: the timer to be modified | |
2aae4a10 | 878 | * @expires: new timeout in jiffies |
1da177e4 | 879 | * |
72fd4a35 | 880 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
881 | * active timer (if the timer is inactive it will be activated) |
882 | * | |
883 | * mod_timer(timer, expires) is equivalent to: | |
884 | * | |
885 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
886 | * | |
887 | * Note that if there are multiple unserialized concurrent users of the | |
888 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
889 | * since add_timer() cannot modify an already running timer. | |
890 | * | |
891 | * The function returns whether it has modified a pending timer or not. | |
892 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
893 | * active timer returns 1.) | |
894 | */ | |
895 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
896 | { | |
1c3cc116 SAS |
897 | expires = apply_slack(timer, expires); |
898 | ||
1da177e4 LT |
899 | /* |
900 | * This is a common optimization triggered by the | |
901 | * networking code - if the timer is re-modified | |
902 | * to be the same thing then just return: | |
903 | */ | |
4841158b | 904 | if (timer_pending(timer) && timer->expires == expires) |
1da177e4 LT |
905 | return 1; |
906 | ||
597d0275 | 907 | return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); |
1da177e4 | 908 | } |
1da177e4 LT |
909 | EXPORT_SYMBOL(mod_timer); |
910 | ||
597d0275 AB |
911 | /** |
912 | * mod_timer_pinned - modify a timer's timeout | |
913 | * @timer: the timer to be modified | |
914 | * @expires: new timeout in jiffies | |
915 | * | |
916 | * mod_timer_pinned() is a way to update the expire field of an | |
917 | * active timer (if the timer is inactive it will be activated) | |
048a0e8f PM |
918 | * and to ensure that the timer is scheduled on the current CPU. |
919 | * | |
920 | * Note that this does not prevent the timer from being migrated | |
921 | * when the current CPU goes offline. If this is a problem for | |
922 | * you, use CPU-hotplug notifiers to handle it correctly, for | |
923 | * example, cancelling the timer when the corresponding CPU goes | |
924 | * offline. | |
597d0275 AB |
925 | * |
926 | * mod_timer_pinned(timer, expires) is equivalent to: | |
927 | * | |
928 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
929 | */ | |
930 | int mod_timer_pinned(struct timer_list *timer, unsigned long expires) | |
931 | { | |
932 | if (timer->expires == expires && timer_pending(timer)) | |
933 | return 1; | |
934 | ||
935 | return __mod_timer(timer, expires, false, TIMER_PINNED); | |
936 | } | |
937 | EXPORT_SYMBOL(mod_timer_pinned); | |
938 | ||
74019224 IM |
939 | /** |
940 | * add_timer - start a timer | |
941 | * @timer: the timer to be added | |
942 | * | |
943 | * The kernel will do a ->function(->data) callback from the | |
944 | * timer interrupt at the ->expires point in the future. The | |
945 | * current time is 'jiffies'. | |
946 | * | |
947 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | |
948 | * fields must be set prior calling this function. | |
949 | * | |
950 | * Timers with an ->expires field in the past will be executed in the next | |
951 | * timer tick. | |
952 | */ | |
953 | void add_timer(struct timer_list *timer) | |
954 | { | |
955 | BUG_ON(timer_pending(timer)); | |
956 | mod_timer(timer, timer->expires); | |
957 | } | |
958 | EXPORT_SYMBOL(add_timer); | |
959 | ||
960 | /** | |
961 | * add_timer_on - start a timer on a particular CPU | |
962 | * @timer: the timer to be added | |
963 | * @cpu: the CPU to start it on | |
964 | * | |
965 | * This is not very scalable on SMP. Double adds are not possible. | |
966 | */ | |
967 | void add_timer_on(struct timer_list *timer, int cpu) | |
968 | { | |
969 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | |
970 | unsigned long flags; | |
971 | ||
972 | timer_stats_timer_set_start_info(timer); | |
973 | BUG_ON(timer_pending(timer) || !timer->function); | |
974 | spin_lock_irqsave(&base->lock, flags); | |
975 | timer_set_base(timer, base); | |
2b022e3d | 976 | debug_activate(timer, timer->expires); |
74019224 | 977 | internal_add_timer(base, timer); |
74019224 IM |
978 | spin_unlock_irqrestore(&base->lock, flags); |
979 | } | |
a9862e05 | 980 | EXPORT_SYMBOL_GPL(add_timer_on); |
74019224 | 981 | |
2aae4a10 | 982 | /** |
1da177e4 LT |
983 | * del_timer - deactive a timer. |
984 | * @timer: the timer to be deactivated | |
985 | * | |
986 | * del_timer() deactivates a timer - this works on both active and inactive | |
987 | * timers. | |
988 | * | |
989 | * The function returns whether it has deactivated a pending timer or not. | |
990 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
991 | * active timer returns 1.) | |
992 | */ | |
993 | int del_timer(struct timer_list *timer) | |
994 | { | |
a6fa8e5a | 995 | struct tvec_base *base; |
1da177e4 | 996 | unsigned long flags; |
55c888d6 | 997 | int ret = 0; |
1da177e4 | 998 | |
dc4218bd CC |
999 | debug_assert_init(timer); |
1000 | ||
82f67cd9 | 1001 | timer_stats_timer_clear_start_info(timer); |
55c888d6 ON |
1002 | if (timer_pending(timer)) { |
1003 | base = lock_timer_base(timer, &flags); | |
ec44bc7a | 1004 | ret = detach_if_pending(timer, base, true); |
1da177e4 | 1005 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 1006 | } |
1da177e4 | 1007 | |
55c888d6 | 1008 | return ret; |
1da177e4 | 1009 | } |
1da177e4 LT |
1010 | EXPORT_SYMBOL(del_timer); |
1011 | ||
2aae4a10 REB |
1012 | /** |
1013 | * try_to_del_timer_sync - Try to deactivate a timer | |
1014 | * @timer: timer do del | |
1015 | * | |
fd450b73 ON |
1016 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
1017 | * exit the timer is not queued and the handler is not running on any CPU. | |
fd450b73 ON |
1018 | */ |
1019 | int try_to_del_timer_sync(struct timer_list *timer) | |
1020 | { | |
a6fa8e5a | 1021 | struct tvec_base *base; |
fd450b73 ON |
1022 | unsigned long flags; |
1023 | int ret = -1; | |
1024 | ||
dc4218bd CC |
1025 | debug_assert_init(timer); |
1026 | ||
fd450b73 ON |
1027 | base = lock_timer_base(timer, &flags); |
1028 | ||
ec44bc7a TG |
1029 | if (base->running_timer != timer) { |
1030 | timer_stats_timer_clear_start_info(timer); | |
1031 | ret = detach_if_pending(timer, base, true); | |
fd450b73 | 1032 | } |
fd450b73 ON |
1033 | spin_unlock_irqrestore(&base->lock, flags); |
1034 | ||
1035 | return ret; | |
1036 | } | |
e19dff1f DH |
1037 | EXPORT_SYMBOL(try_to_del_timer_sync); |
1038 | ||
6f1bc451 | 1039 | #ifdef CONFIG_SMP |
3650b57f PZ |
1040 | static DEFINE_PER_CPU(struct tvec_base, __tvec_bases); |
1041 | ||
2aae4a10 | 1042 | /** |
1da177e4 LT |
1043 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
1044 | * @timer: the timer to be deactivated | |
1045 | * | |
1046 | * This function only differs from del_timer() on SMP: besides deactivating | |
1047 | * the timer it also makes sure the handler has finished executing on other | |
1048 | * CPUs. | |
1049 | * | |
72fd4a35 | 1050 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 | 1051 | * otherwise this function is meaningless. It must not be called from |
c5f66e99 TH |
1052 | * interrupt contexts unless the timer is an irqsafe one. The caller must |
1053 | * not hold locks which would prevent completion of the timer's | |
1054 | * handler. The timer's handler must not call add_timer_on(). Upon exit the | |
1055 | * timer is not queued and the handler is not running on any CPU. | |
1da177e4 | 1056 | * |
c5f66e99 TH |
1057 | * Note: For !irqsafe timers, you must not hold locks that are held in |
1058 | * interrupt context while calling this function. Even if the lock has | |
1059 | * nothing to do with the timer in question. Here's why: | |
48228f7b SR |
1060 | * |
1061 | * CPU0 CPU1 | |
1062 | * ---- ---- | |
1063 | * <SOFTIRQ> | |
1064 | * call_timer_fn(); | |
1065 | * base->running_timer = mytimer; | |
1066 | * spin_lock_irq(somelock); | |
1067 | * <IRQ> | |
1068 | * spin_lock(somelock); | |
1069 | * del_timer_sync(mytimer); | |
1070 | * while (base->running_timer == mytimer); | |
1071 | * | |
1072 | * Now del_timer_sync() will never return and never release somelock. | |
1073 | * The interrupt on the other CPU is waiting to grab somelock but | |
1074 | * it has interrupted the softirq that CPU0 is waiting to finish. | |
1075 | * | |
1da177e4 | 1076 | * The function returns whether it has deactivated a pending timer or not. |
1da177e4 LT |
1077 | */ |
1078 | int del_timer_sync(struct timer_list *timer) | |
1079 | { | |
6f2b9b9a | 1080 | #ifdef CONFIG_LOCKDEP |
f266a511 PZ |
1081 | unsigned long flags; |
1082 | ||
48228f7b SR |
1083 | /* |
1084 | * If lockdep gives a backtrace here, please reference | |
1085 | * the synchronization rules above. | |
1086 | */ | |
7ff20792 | 1087 | local_irq_save(flags); |
6f2b9b9a JB |
1088 | lock_map_acquire(&timer->lockdep_map); |
1089 | lock_map_release(&timer->lockdep_map); | |
7ff20792 | 1090 | local_irq_restore(flags); |
6f2b9b9a | 1091 | #endif |
466bd303 YZ |
1092 | /* |
1093 | * don't use it in hardirq context, because it | |
1094 | * could lead to deadlock. | |
1095 | */ | |
c5f66e99 | 1096 | WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base)); |
fd450b73 ON |
1097 | for (;;) { |
1098 | int ret = try_to_del_timer_sync(timer); | |
1099 | if (ret >= 0) | |
1100 | return ret; | |
a0009652 | 1101 | cpu_relax(); |
fd450b73 | 1102 | } |
1da177e4 | 1103 | } |
55c888d6 | 1104 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
1105 | #endif |
1106 | ||
a6fa8e5a | 1107 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) |
1da177e4 LT |
1108 | { |
1109 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
1110 | struct timer_list *timer, *tmp; |
1111 | struct list_head tv_list; | |
1112 | ||
1113 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 1114 | |
1da177e4 | 1115 | /* |
3439dd86 P |
1116 | * We are removing _all_ timers from the list, so we |
1117 | * don't have to detach them individually. | |
1da177e4 | 1118 | */ |
3439dd86 | 1119 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
6e453a67 | 1120 | BUG_ON(tbase_get_base(timer->base) != base); |
facbb4a7 TG |
1121 | /* No accounting, while moving them */ |
1122 | __internal_add_timer(base, timer); | |
1da177e4 | 1123 | } |
1da177e4 LT |
1124 | |
1125 | return index; | |
1126 | } | |
1127 | ||
576da126 TG |
1128 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), |
1129 | unsigned long data) | |
1130 | { | |
4a2b4b22 | 1131 | int count = preempt_count(); |
576da126 TG |
1132 | |
1133 | #ifdef CONFIG_LOCKDEP | |
1134 | /* | |
1135 | * It is permissible to free the timer from inside the | |
1136 | * function that is called from it, this we need to take into | |
1137 | * account for lockdep too. To avoid bogus "held lock freed" | |
1138 | * warnings as well as problems when looking into | |
1139 | * timer->lockdep_map, make a copy and use that here. | |
1140 | */ | |
4d82a1de PZ |
1141 | struct lockdep_map lockdep_map; |
1142 | ||
1143 | lockdep_copy_map(&lockdep_map, &timer->lockdep_map); | |
576da126 TG |
1144 | #endif |
1145 | /* | |
1146 | * Couple the lock chain with the lock chain at | |
1147 | * del_timer_sync() by acquiring the lock_map around the fn() | |
1148 | * call here and in del_timer_sync(). | |
1149 | */ | |
1150 | lock_map_acquire(&lockdep_map); | |
1151 | ||
1152 | trace_timer_expire_entry(timer); | |
1153 | fn(data); | |
1154 | trace_timer_expire_exit(timer); | |
1155 | ||
1156 | lock_map_release(&lockdep_map); | |
1157 | ||
4a2b4b22 | 1158 | if (count != preempt_count()) { |
802702e0 | 1159 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", |
4a2b4b22 | 1160 | fn, count, preempt_count()); |
802702e0 TG |
1161 | /* |
1162 | * Restore the preempt count. That gives us a decent | |
1163 | * chance to survive and extract information. If the | |
1164 | * callback kept a lock held, bad luck, but not worse | |
1165 | * than the BUG() we had. | |
1166 | */ | |
4a2b4b22 | 1167 | preempt_count_set(count); |
576da126 TG |
1168 | } |
1169 | } | |
1170 | ||
2aae4a10 REB |
1171 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
1172 | ||
1173 | /** | |
1da177e4 LT |
1174 | * __run_timers - run all expired timers (if any) on this CPU. |
1175 | * @base: the timer vector to be processed. | |
1176 | * | |
1177 | * This function cascades all vectors and executes all expired timer | |
1178 | * vectors. | |
1179 | */ | |
a6fa8e5a | 1180 | static inline void __run_timers(struct tvec_base *base) |
1da177e4 LT |
1181 | { |
1182 | struct timer_list *timer; | |
1183 | ||
3691c519 | 1184 | spin_lock_irq(&base->lock); |
d550e81d PM |
1185 | if (catchup_timer_jiffies(base)) { |
1186 | spin_unlock_irq(&base->lock); | |
1187 | return; | |
1188 | } | |
1da177e4 | 1189 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 1190 | struct list_head work_list; |
1da177e4 | 1191 | struct list_head *head = &work_list; |
6819457d | 1192 | int index = base->timer_jiffies & TVR_MASK; |
626ab0e6 | 1193 | |
1da177e4 LT |
1194 | /* |
1195 | * Cascade timers: | |
1196 | */ | |
1197 | if (!index && | |
1198 | (!cascade(base, &base->tv2, INDEX(0))) && | |
1199 | (!cascade(base, &base->tv3, INDEX(1))) && | |
1200 | !cascade(base, &base->tv4, INDEX(2))) | |
1201 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 | 1202 | ++base->timer_jiffies; |
c41eba7d | 1203 | list_replace_init(base->tv1.vec + index, head); |
55c888d6 | 1204 | while (!list_empty(head)) { |
1da177e4 LT |
1205 | void (*fn)(unsigned long); |
1206 | unsigned long data; | |
c5f66e99 | 1207 | bool irqsafe; |
1da177e4 | 1208 | |
b5e61818 | 1209 | timer = list_first_entry(head, struct timer_list,entry); |
6819457d TG |
1210 | fn = timer->function; |
1211 | data = timer->data; | |
c5f66e99 | 1212 | irqsafe = tbase_get_irqsafe(timer->base); |
1da177e4 | 1213 | |
82f67cd9 IM |
1214 | timer_stats_account_timer(timer); |
1215 | ||
6f1bc451 | 1216 | base->running_timer = timer; |
99d5f3aa | 1217 | detach_expired_timer(timer, base); |
6f2b9b9a | 1218 | |
c5f66e99 TH |
1219 | if (irqsafe) { |
1220 | spin_unlock(&base->lock); | |
1221 | call_timer_fn(timer, fn, data); | |
1222 | spin_lock(&base->lock); | |
1223 | } else { | |
1224 | spin_unlock_irq(&base->lock); | |
1225 | call_timer_fn(timer, fn, data); | |
1226 | spin_lock_irq(&base->lock); | |
1227 | } | |
1da177e4 LT |
1228 | } |
1229 | } | |
6f1bc451 | 1230 | base->running_timer = NULL; |
3691c519 | 1231 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
1232 | } |
1233 | ||
3451d024 | 1234 | #ifdef CONFIG_NO_HZ_COMMON |
1da177e4 LT |
1235 | /* |
1236 | * Find out when the next timer event is due to happen. This | |
90cba64a RD |
1237 | * is used on S/390 to stop all activity when a CPU is idle. |
1238 | * This function needs to be called with interrupts disabled. | |
1da177e4 | 1239 | */ |
a6fa8e5a | 1240 | static unsigned long __next_timer_interrupt(struct tvec_base *base) |
1da177e4 | 1241 | { |
1cfd6849 | 1242 | unsigned long timer_jiffies = base->timer_jiffies; |
eaad084b | 1243 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 1244 | int index, slot, array, found = 0; |
1da177e4 | 1245 | struct timer_list *nte; |
a6fa8e5a | 1246 | struct tvec *varray[4]; |
1da177e4 LT |
1247 | |
1248 | /* Look for timer events in tv1. */ | |
1cfd6849 | 1249 | index = slot = timer_jiffies & TVR_MASK; |
1da177e4 | 1250 | do { |
1cfd6849 | 1251 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
6819457d TG |
1252 | if (tbase_get_deferrable(nte->base)) |
1253 | continue; | |
6e453a67 | 1254 | |
1cfd6849 | 1255 | found = 1; |
1da177e4 | 1256 | expires = nte->expires; |
1cfd6849 TG |
1257 | /* Look at the cascade bucket(s)? */ |
1258 | if (!index || slot < index) | |
1259 | goto cascade; | |
1260 | return expires; | |
1da177e4 | 1261 | } |
1cfd6849 TG |
1262 | slot = (slot + 1) & TVR_MASK; |
1263 | } while (slot != index); | |
1264 | ||
1265 | cascade: | |
1266 | /* Calculate the next cascade event */ | |
1267 | if (index) | |
1268 | timer_jiffies += TVR_SIZE - index; | |
1269 | timer_jiffies >>= TVR_BITS; | |
1da177e4 LT |
1270 | |
1271 | /* Check tv2-tv5. */ | |
1272 | varray[0] = &base->tv2; | |
1273 | varray[1] = &base->tv3; | |
1274 | varray[2] = &base->tv4; | |
1275 | varray[3] = &base->tv5; | |
1cfd6849 TG |
1276 | |
1277 | for (array = 0; array < 4; array++) { | |
a6fa8e5a | 1278 | struct tvec *varp = varray[array]; |
1cfd6849 TG |
1279 | |
1280 | index = slot = timer_jiffies & TVN_MASK; | |
1da177e4 | 1281 | do { |
1cfd6849 | 1282 | list_for_each_entry(nte, varp->vec + slot, entry) { |
a0419888 JH |
1283 | if (tbase_get_deferrable(nte->base)) |
1284 | continue; | |
1285 | ||
1cfd6849 | 1286 | found = 1; |
1da177e4 LT |
1287 | if (time_before(nte->expires, expires)) |
1288 | expires = nte->expires; | |
1cfd6849 TG |
1289 | } |
1290 | /* | |
1291 | * Do we still search for the first timer or are | |
1292 | * we looking up the cascade buckets ? | |
1293 | */ | |
1294 | if (found) { | |
1295 | /* Look at the cascade bucket(s)? */ | |
1296 | if (!index || slot < index) | |
1297 | break; | |
1298 | return expires; | |
1299 | } | |
1300 | slot = (slot + 1) & TVN_MASK; | |
1301 | } while (slot != index); | |
1302 | ||
1303 | if (index) | |
1304 | timer_jiffies += TVN_SIZE - index; | |
1305 | timer_jiffies >>= TVN_BITS; | |
1da177e4 | 1306 | } |
1cfd6849 TG |
1307 | return expires; |
1308 | } | |
69239749 | 1309 | |
1cfd6849 TG |
1310 | /* |
1311 | * Check, if the next hrtimer event is before the next timer wheel | |
1312 | * event: | |
1313 | */ | |
1314 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | |
1315 | unsigned long expires) | |
1316 | { | |
1317 | ktime_t hr_delta = hrtimer_get_next_event(); | |
1318 | struct timespec tsdelta; | |
9501b6cf | 1319 | unsigned long delta; |
1cfd6849 TG |
1320 | |
1321 | if (hr_delta.tv64 == KTIME_MAX) | |
1322 | return expires; | |
0662b713 | 1323 | |
9501b6cf TG |
1324 | /* |
1325 | * Expired timer available, let it expire in the next tick | |
1326 | */ | |
1327 | if (hr_delta.tv64 <= 0) | |
1328 | return now + 1; | |
69239749 | 1329 | |
1cfd6849 | 1330 | tsdelta = ktime_to_timespec(hr_delta); |
9501b6cf | 1331 | delta = timespec_to_jiffies(&tsdelta); |
eaad084b TG |
1332 | |
1333 | /* | |
1334 | * Limit the delta to the max value, which is checked in | |
1335 | * tick_nohz_stop_sched_tick(): | |
1336 | */ | |
1337 | if (delta > NEXT_TIMER_MAX_DELTA) | |
1338 | delta = NEXT_TIMER_MAX_DELTA; | |
1339 | ||
9501b6cf TG |
1340 | /* |
1341 | * Take rounding errors in to account and make sure, that it | |
1342 | * expires in the next tick. Otherwise we go into an endless | |
1343 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | |
1344 | * the timer softirq | |
1345 | */ | |
1346 | if (delta < 1) | |
1347 | delta = 1; | |
1348 | now += delta; | |
1cfd6849 TG |
1349 | if (time_before(now, expires)) |
1350 | return now; | |
1da177e4 LT |
1351 | return expires; |
1352 | } | |
1cfd6849 TG |
1353 | |
1354 | /** | |
8dce39c2 | 1355 | * get_next_timer_interrupt - return the jiffy of the next pending timer |
05fb6bf0 | 1356 | * @now: current time (in jiffies) |
1cfd6849 | 1357 | */ |
fd064b9b | 1358 | unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd6849 | 1359 | { |
7496351a | 1360 | struct tvec_base *base = __this_cpu_read(tvec_bases); |
e40468a5 | 1361 | unsigned long expires = now + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 1362 | |
dbd87b5a HC |
1363 | /* |
1364 | * Pretend that there is no timer pending if the cpu is offline. | |
1365 | * Possible pending timers will be migrated later to an active cpu. | |
1366 | */ | |
1367 | if (cpu_is_offline(smp_processor_id())) | |
e40468a5 TG |
1368 | return expires; |
1369 | ||
1cfd6849 | 1370 | spin_lock(&base->lock); |
e40468a5 TG |
1371 | if (base->active_timers) { |
1372 | if (time_before_eq(base->next_timer, base->timer_jiffies)) | |
1373 | base->next_timer = __next_timer_interrupt(base); | |
1374 | expires = base->next_timer; | |
1375 | } | |
1cfd6849 TG |
1376 | spin_unlock(&base->lock); |
1377 | ||
1378 | if (time_before_eq(expires, now)) | |
1379 | return now; | |
1380 | ||
1381 | return cmp_next_hrtimer_event(now, expires); | |
1382 | } | |
1da177e4 LT |
1383 | #endif |
1384 | ||
1da177e4 | 1385 | /* |
5b4db0c2 | 1386 | * Called from the timer interrupt handler to charge one tick to the current |
1da177e4 LT |
1387 | * process. user_tick is 1 if the tick is user time, 0 for system. |
1388 | */ | |
1389 | void update_process_times(int user_tick) | |
1390 | { | |
1391 | struct task_struct *p = current; | |
1da177e4 LT |
1392 | |
1393 | /* Note: this timer irq context must be accounted for as well. */ | |
fa13a5a1 | 1394 | account_process_tick(p, user_tick); |
1da177e4 | 1395 | run_local_timers(); |
c3377c2d | 1396 | rcu_check_callbacks(user_tick); |
e360adbe PZ |
1397 | #ifdef CONFIG_IRQ_WORK |
1398 | if (in_irq()) | |
76a33061 | 1399 | irq_work_tick(); |
e360adbe | 1400 | #endif |
1da177e4 | 1401 | scheduler_tick(); |
6819457d | 1402 | run_posix_cpu_timers(p); |
1da177e4 LT |
1403 | } |
1404 | ||
1da177e4 LT |
1405 | /* |
1406 | * This function runs timers and the timer-tq in bottom half context. | |
1407 | */ | |
1408 | static void run_timer_softirq(struct softirq_action *h) | |
1409 | { | |
7496351a | 1410 | struct tvec_base *base = __this_cpu_read(tvec_bases); |
1da177e4 | 1411 | |
d3d74453 | 1412 | hrtimer_run_pending(); |
82f67cd9 | 1413 | |
1da177e4 LT |
1414 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1415 | __run_timers(base); | |
1416 | } | |
1417 | ||
1418 | /* | |
1419 | * Called by the local, per-CPU timer interrupt on SMP. | |
1420 | */ | |
1421 | void run_local_timers(void) | |
1422 | { | |
d3d74453 | 1423 | hrtimer_run_queues(); |
1da177e4 LT |
1424 | raise_softirq(TIMER_SOFTIRQ); |
1425 | } | |
1426 | ||
1da177e4 LT |
1427 | #ifdef __ARCH_WANT_SYS_ALARM |
1428 | ||
1429 | /* | |
1430 | * For backwards compatibility? This can be done in libc so Alpha | |
1431 | * and all newer ports shouldn't need it. | |
1432 | */ | |
58fd3aa2 | 1433 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
1da177e4 | 1434 | { |
c08b8a49 | 1435 | return alarm_setitimer(seconds); |
1da177e4 LT |
1436 | } |
1437 | ||
1438 | #endif | |
1439 | ||
1da177e4 LT |
1440 | static void process_timeout(unsigned long __data) |
1441 | { | |
36c8b586 | 1442 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1443 | } |
1444 | ||
1445 | /** | |
1446 | * schedule_timeout - sleep until timeout | |
1447 | * @timeout: timeout value in jiffies | |
1448 | * | |
1449 | * Make the current task sleep until @timeout jiffies have | |
1450 | * elapsed. The routine will return immediately unless | |
1451 | * the current task state has been set (see set_current_state()). | |
1452 | * | |
1453 | * You can set the task state as follows - | |
1454 | * | |
1455 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1456 | * pass before the routine returns. The routine will return 0 | |
1457 | * | |
1458 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1459 | * delivered to the current task. In this case the remaining time | |
1460 | * in jiffies will be returned, or 0 if the timer expired in time | |
1461 | * | |
1462 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1463 | * routine returns. | |
1464 | * | |
1465 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1466 | * the CPU away without a bound on the timeout. In this case the return | |
1467 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1468 | * | |
1469 | * In all cases the return value is guaranteed to be non-negative. | |
1470 | */ | |
7ad5b3a5 | 1471 | signed long __sched schedule_timeout(signed long timeout) |
1da177e4 LT |
1472 | { |
1473 | struct timer_list timer; | |
1474 | unsigned long expire; | |
1475 | ||
1476 | switch (timeout) | |
1477 | { | |
1478 | case MAX_SCHEDULE_TIMEOUT: | |
1479 | /* | |
1480 | * These two special cases are useful to be comfortable | |
1481 | * in the caller. Nothing more. We could take | |
1482 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1483 | * but I' d like to return a valid offset (>=0) to allow | |
1484 | * the caller to do everything it want with the retval. | |
1485 | */ | |
1486 | schedule(); | |
1487 | goto out; | |
1488 | default: | |
1489 | /* | |
1490 | * Another bit of PARANOID. Note that the retval will be | |
1491 | * 0 since no piece of kernel is supposed to do a check | |
1492 | * for a negative retval of schedule_timeout() (since it | |
1493 | * should never happens anyway). You just have the printk() | |
1494 | * that will tell you if something is gone wrong and where. | |
1495 | */ | |
5b149bcc | 1496 | if (timeout < 0) { |
1da177e4 | 1497 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1498 | "value %lx\n", timeout); |
1499 | dump_stack(); | |
1da177e4 LT |
1500 | current->state = TASK_RUNNING; |
1501 | goto out; | |
1502 | } | |
1503 | } | |
1504 | ||
1505 | expire = timeout + jiffies; | |
1506 | ||
c6f3a97f | 1507 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
597d0275 | 1508 | __mod_timer(&timer, expire, false, TIMER_NOT_PINNED); |
1da177e4 LT |
1509 | schedule(); |
1510 | del_singleshot_timer_sync(&timer); | |
1511 | ||
c6f3a97f TG |
1512 | /* Remove the timer from the object tracker */ |
1513 | destroy_timer_on_stack(&timer); | |
1514 | ||
1da177e4 LT |
1515 | timeout = expire - jiffies; |
1516 | ||
1517 | out: | |
1518 | return timeout < 0 ? 0 : timeout; | |
1519 | } | |
1da177e4 LT |
1520 | EXPORT_SYMBOL(schedule_timeout); |
1521 | ||
8a1c1757 AM |
1522 | /* |
1523 | * We can use __set_current_state() here because schedule_timeout() calls | |
1524 | * schedule() unconditionally. | |
1525 | */ | |
64ed93a2 NA |
1526 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1527 | { | |
a5a0d52c AM |
1528 | __set_current_state(TASK_INTERRUPTIBLE); |
1529 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1530 | } |
1531 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1532 | ||
294d5cc2 MW |
1533 | signed long __sched schedule_timeout_killable(signed long timeout) |
1534 | { | |
1535 | __set_current_state(TASK_KILLABLE); | |
1536 | return schedule_timeout(timeout); | |
1537 | } | |
1538 | EXPORT_SYMBOL(schedule_timeout_killable); | |
1539 | ||
64ed93a2 NA |
1540 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1541 | { | |
a5a0d52c AM |
1542 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1543 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1544 | } |
1545 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1546 | ||
1da177e4 | 1547 | #ifdef CONFIG_HOTPLUG_CPU |
a6fa8e5a | 1548 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) |
1da177e4 LT |
1549 | { |
1550 | struct timer_list *timer; | |
1551 | ||
1552 | while (!list_empty(head)) { | |
b5e61818 | 1553 | timer = list_first_entry(head, struct timer_list, entry); |
99d5f3aa | 1554 | /* We ignore the accounting on the dying cpu */ |
ec44bc7a | 1555 | detach_timer(timer, false); |
6e453a67 | 1556 | timer_set_base(timer, new_base); |
1da177e4 | 1557 | internal_add_timer(new_base, timer); |
1da177e4 | 1558 | } |
1da177e4 LT |
1559 | } |
1560 | ||
0db0628d | 1561 | static void migrate_timers(int cpu) |
1da177e4 | 1562 | { |
a6fa8e5a PM |
1563 | struct tvec_base *old_base; |
1564 | struct tvec_base *new_base; | |
1da177e4 LT |
1565 | int i; |
1566 | ||
1567 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1568 | old_base = per_cpu(tvec_bases, cpu); |
1569 | new_base = get_cpu_var(tvec_bases); | |
d82f0b0f ON |
1570 | /* |
1571 | * The caller is globally serialized and nobody else | |
1572 | * takes two locks at once, deadlock is not possible. | |
1573 | */ | |
1574 | spin_lock_irq(&new_base->lock); | |
0d180406 | 1575 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
3691c519 ON |
1576 | |
1577 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1578 | |
1da177e4 | 1579 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1580 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1581 | for (i = 0; i < TVN_SIZE; i++) { | |
1582 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1583 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1584 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1585 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1586 | } | |
1587 | ||
8def9060 VK |
1588 | old_base->active_timers = 0; |
1589 | old_base->all_timers = 0; | |
1590 | ||
0d180406 | 1591 | spin_unlock(&old_base->lock); |
d82f0b0f | 1592 | spin_unlock_irq(&new_base->lock); |
1da177e4 | 1593 | put_cpu_var(tvec_bases); |
1da177e4 | 1594 | } |
1da177e4 | 1595 | |
0db0628d | 1596 | static int timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1597 | unsigned long action, void *hcpu) |
1598 | { | |
8def9060 | 1599 | switch (action) { |
1da177e4 | 1600 | case CPU_DEAD: |
8bb78442 | 1601 | case CPU_DEAD_FROZEN: |
8def9060 | 1602 | migrate_timers((long)hcpu); |
1da177e4 | 1603 | break; |
1da177e4 LT |
1604 | default: |
1605 | break; | |
1606 | } | |
3650b57f | 1607 | |
1da177e4 LT |
1608 | return NOTIFY_OK; |
1609 | } | |
1610 | ||
3650b57f PZ |
1611 | static inline void timer_register_cpu_notifier(void) |
1612 | { | |
1613 | cpu_notifier(timer_cpu_notify, 0); | |
1614 | } | |
1615 | #else | |
1616 | static inline void timer_register_cpu_notifier(void) { } | |
1617 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1da177e4 | 1618 | |
8def9060 VK |
1619 | static void __init init_timer_cpu(struct tvec_base *base, int cpu) |
1620 | { | |
1621 | int j; | |
1da177e4 | 1622 | |
3650b57f PZ |
1623 | BUG_ON(base != tbase_get_base(base)); |
1624 | ||
8def9060 VK |
1625 | base->cpu = cpu; |
1626 | per_cpu(tvec_bases, cpu) = base; | |
1627 | spin_lock_init(&base->lock); | |
1628 | ||
1629 | for (j = 0; j < TVN_SIZE; j++) { | |
1630 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1631 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1632 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1633 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1634 | } | |
1635 | for (j = 0; j < TVR_SIZE; j++) | |
1636 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1637 | ||
1638 | base->timer_jiffies = jiffies; | |
1639 | base->next_timer = base->timer_jiffies; | |
1640 | } | |
1641 | ||
1642 | static void __init init_timer_cpus(void) | |
1da177e4 | 1643 | { |
8def9060 VK |
1644 | struct tvec_base *base; |
1645 | int local_cpu = smp_processor_id(); | |
1646 | int cpu; | |
1647 | ||
1648 | for_each_possible_cpu(cpu) { | |
1649 | if (cpu == local_cpu) | |
1650 | base = &boot_tvec_bases; | |
3650b57f | 1651 | #ifdef CONFIG_SMP |
8def9060 VK |
1652 | else |
1653 | base = per_cpu_ptr(&__tvec_bases, cpu); | |
3650b57f | 1654 | #endif |
8def9060 VK |
1655 | |
1656 | init_timer_cpu(base, cpu); | |
1657 | } | |
1658 | } | |
e52b1db3 | 1659 | |
8def9060 VK |
1660 | void __init init_timers(void) |
1661 | { | |
e52b1db3 TH |
1662 | /* ensure there are enough low bits for flags in timer->base pointer */ |
1663 | BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); | |
07dccf33 | 1664 | |
8def9060 | 1665 | init_timer_cpus(); |
c24a4a36 | 1666 | init_timer_stats(); |
3650b57f | 1667 | timer_register_cpu_notifier(); |
962cf36c | 1668 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1da177e4 LT |
1669 | } |
1670 | ||
1da177e4 LT |
1671 | /** |
1672 | * msleep - sleep safely even with waitqueue interruptions | |
1673 | * @msecs: Time in milliseconds to sleep for | |
1674 | */ | |
1675 | void msleep(unsigned int msecs) | |
1676 | { | |
1677 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1678 | ||
75bcc8c5 NA |
1679 | while (timeout) |
1680 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1681 | } |
1682 | ||
1683 | EXPORT_SYMBOL(msleep); | |
1684 | ||
1685 | /** | |
96ec3efd | 1686 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1687 | * @msecs: Time in milliseconds to sleep for |
1688 | */ | |
1689 | unsigned long msleep_interruptible(unsigned int msecs) | |
1690 | { | |
1691 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1692 | ||
75bcc8c5 NA |
1693 | while (timeout && !signal_pending(current)) |
1694 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1695 | return jiffies_to_msecs(timeout); |
1696 | } | |
1697 | ||
1698 | EXPORT_SYMBOL(msleep_interruptible); | |
5e7f5a17 PP |
1699 | |
1700 | static int __sched do_usleep_range(unsigned long min, unsigned long max) | |
1701 | { | |
1702 | ktime_t kmin; | |
1703 | unsigned long delta; | |
1704 | ||
1705 | kmin = ktime_set(0, min * NSEC_PER_USEC); | |
1706 | delta = (max - min) * NSEC_PER_USEC; | |
1707 | return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); | |
1708 | } | |
1709 | ||
1710 | /** | |
1711 | * usleep_range - Drop in replacement for udelay where wakeup is flexible | |
1712 | * @min: Minimum time in usecs to sleep | |
1713 | * @max: Maximum time in usecs to sleep | |
1714 | */ | |
1715 | void usleep_range(unsigned long min, unsigned long max) | |
1716 | { | |
1717 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
1718 | do_usleep_range(min, max); | |
1719 | } | |
1720 | EXPORT_SYMBOL(usleep_range); |