]>
Commit | Line | Data |
---|---|---|
35728b82 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
4a22f166 | 3 | * Kernel internal timers |
1da177e4 LT |
4 | * |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * | |
7 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
8 | * | |
9 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
10 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
11 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
12 | * serialize accesses to xtime/lost_ticks). | |
13 | * Copyright (C) 1998 Andrea Arcangeli | |
14 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
15 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
16 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
17 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
18 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
19 | */ | |
20 | ||
21 | #include <linux/kernel_stat.h> | |
9984de1a | 22 | #include <linux/export.h> |
1da177e4 LT |
23 | #include <linux/interrupt.h> |
24 | #include <linux/percpu.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/swap.h> | |
b488893a | 28 | #include <linux/pid_namespace.h> |
1da177e4 LT |
29 | #include <linux/notifier.h> |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
97a41e26 | 36 | #include <linux/delay.h> |
79bf2bb3 | 37 | #include <linux/tick.h> |
82f67cd9 | 38 | #include <linux/kallsyms.h> |
e360adbe | 39 | #include <linux/irq_work.h> |
174cd4b1 | 40 | #include <linux/sched/signal.h> |
cf4aebc2 | 41 | #include <linux/sched/sysctl.h> |
370c9135 | 42 | #include <linux/sched/nohz.h> |
b17b0153 | 43 | #include <linux/sched/debug.h> |
5a0e3ad6 | 44 | #include <linux/slab.h> |
1a0df594 | 45 | #include <linux/compat.h> |
f227e3ec | 46 | #include <linux/random.h> |
1da177e4 | 47 | |
7c0f6ba6 | 48 | #include <linux/uaccess.h> |
1da177e4 LT |
49 | #include <asm/unistd.h> |
50 | #include <asm/div64.h> | |
51 | #include <asm/timex.h> | |
52 | #include <asm/io.h> | |
53 | ||
c1ad348b TG |
54 | #include "tick-internal.h" |
55 | ||
2b022e3d XG |
56 | #define CREATE_TRACE_POINTS |
57 | #include <trace/events/timer.h> | |
58 | ||
40747ffa | 59 | __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
ecea8d19 TG |
60 | |
61 | EXPORT_SYMBOL(jiffies_64); | |
62 | ||
1da177e4 | 63 | /* |
500462a9 TG |
64 | * The timer wheel has LVL_DEPTH array levels. Each level provides an array of |
65 | * LVL_SIZE buckets. Each level is driven by its own clock and therefor each | |
66 | * level has a different granularity. | |
67 | * | |
68 | * The level granularity is: LVL_CLK_DIV ^ lvl | |
69 | * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) | |
70 | * | |
71 | * The array level of a newly armed timer depends on the relative expiry | |
72 | * time. The farther the expiry time is away the higher the array level and | |
73 | * therefor the granularity becomes. | |
74 | * | |
75 | * Contrary to the original timer wheel implementation, which aims for 'exact' | |
76 | * expiry of the timers, this implementation removes the need for recascading | |
77 | * the timers into the lower array levels. The previous 'classic' timer wheel | |
78 | * implementation of the kernel already violated the 'exact' expiry by adding | |
79 | * slack to the expiry time to provide batched expiration. The granularity | |
80 | * levels provide implicit batching. | |
81 | * | |
82 | * This is an optimization of the original timer wheel implementation for the | |
83 | * majority of the timer wheel use cases: timeouts. The vast majority of | |
84 | * timeout timers (networking, disk I/O ...) are canceled before expiry. If | |
85 | * the timeout expires it indicates that normal operation is disturbed, so it | |
86 | * does not matter much whether the timeout comes with a slight delay. | |
87 | * | |
88 | * The only exception to this are networking timers with a small expiry | |
89 | * time. They rely on the granularity. Those fit into the first wheel level, | |
90 | * which has HZ granularity. | |
91 | * | |
92 | * We don't have cascading anymore. timers with a expiry time above the | |
93 | * capacity of the last wheel level are force expired at the maximum timeout | |
94 | * value of the last wheel level. From data sampling we know that the maximum | |
95 | * value observed is 5 days (network connection tracking), so this should not | |
96 | * be an issue. | |
97 | * | |
98 | * The currently chosen array constants values are a good compromise between | |
99 | * array size and granularity. | |
100 | * | |
101 | * This results in the following granularity and range levels: | |
102 | * | |
103 | * HZ 1000 steps | |
104 | * Level Offset Granularity Range | |
105 | * 0 0 1 ms 0 ms - 63 ms | |
106 | * 1 64 8 ms 64 ms - 511 ms | |
107 | * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) | |
108 | * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) | |
109 | * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) | |
110 | * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) | |
111 | * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) | |
112 | * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) | |
113 | * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) | |
114 | * | |
115 | * HZ 300 | |
116 | * Level Offset Granularity Range | |
117 | * 0 0 3 ms 0 ms - 210 ms | |
118 | * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) | |
119 | * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) | |
120 | * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) | |
121 | * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) | |
122 | * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) | |
123 | * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) | |
124 | * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) | |
125 | * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) | |
126 | * | |
127 | * HZ 250 | |
128 | * Level Offset Granularity Range | |
129 | * 0 0 4 ms 0 ms - 255 ms | |
130 | * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) | |
131 | * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) | |
132 | * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) | |
133 | * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) | |
134 | * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) | |
135 | * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) | |
136 | * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) | |
137 | * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) | |
138 | * | |
139 | * HZ 100 | |
140 | * Level Offset Granularity Range | |
141 | * 0 0 10 ms 0 ms - 630 ms | |
142 | * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) | |
143 | * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) | |
144 | * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) | |
145 | * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) | |
146 | * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) | |
147 | * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) | |
148 | * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) | |
1da177e4 | 149 | */ |
1da177e4 | 150 | |
500462a9 TG |
151 | /* Clock divisor for the next level */ |
152 | #define LVL_CLK_SHIFT 3 | |
153 | #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) | |
154 | #define LVL_CLK_MASK (LVL_CLK_DIV - 1) | |
155 | #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) | |
156 | #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) | |
1da177e4 | 157 | |
500462a9 TG |
158 | /* |
159 | * The time start value for each level to select the bucket at enqueue | |
44688972 FW |
160 | * time. We start from the last possible delta of the previous level |
161 | * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()). | |
500462a9 TG |
162 | */ |
163 | #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) | |
164 | ||
165 | /* Size of each clock level */ | |
166 | #define LVL_BITS 6 | |
167 | #define LVL_SIZE (1UL << LVL_BITS) | |
168 | #define LVL_MASK (LVL_SIZE - 1) | |
169 | #define LVL_OFFS(n) ((n) * LVL_SIZE) | |
170 | ||
171 | /* Level depth */ | |
172 | #if HZ > 100 | |
173 | # define LVL_DEPTH 9 | |
174 | # else | |
175 | # define LVL_DEPTH 8 | |
176 | #endif | |
177 | ||
178 | /* The cutoff (max. capacity of the wheel) */ | |
179 | #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) | |
180 | #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) | |
181 | ||
182 | /* | |
183 | * The resulting wheel size. If NOHZ is configured we allocate two | |
184 | * wheels so we have a separate storage for the deferrable timers. | |
185 | */ | |
186 | #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) | |
187 | ||
188 | #ifdef CONFIG_NO_HZ_COMMON | |
189 | # define NR_BASES 2 | |
190 | # define BASE_STD 0 | |
191 | # define BASE_DEF 1 | |
192 | #else | |
193 | # define NR_BASES 1 | |
194 | # define BASE_STD 0 | |
195 | # define BASE_DEF 0 | |
196 | #endif | |
1da177e4 | 197 | |
494af3ed | 198 | struct timer_base { |
2287d866 | 199 | raw_spinlock_t lock; |
500462a9 | 200 | struct timer_list *running_timer; |
030dcdd1 AMG |
201 | #ifdef CONFIG_PREEMPT_RT |
202 | spinlock_t expiry_lock; | |
203 | atomic_t timer_waiters; | |
204 | #endif | |
500462a9 | 205 | unsigned long clk; |
a683f390 | 206 | unsigned long next_expiry; |
500462a9 | 207 | unsigned int cpu; |
31cd0e11 | 208 | bool next_expiry_recalc; |
a683f390 | 209 | bool is_idle; |
500462a9 TG |
210 | DECLARE_BITMAP(pending_map, WHEEL_SIZE); |
211 | struct hlist_head vectors[WHEEL_SIZE]; | |
6e453a67 | 212 | } ____cacheline_aligned; |
e52b1db3 | 213 | |
500462a9 | 214 | static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); |
6e453a67 | 215 | |
ae67bada TG |
216 | #ifdef CONFIG_NO_HZ_COMMON |
217 | ||
14c80341 | 218 | static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); |
ae67bada TG |
219 | static DEFINE_MUTEX(timer_keys_mutex); |
220 | ||
221 | static void timer_update_keys(struct work_struct *work); | |
222 | static DECLARE_WORK(timer_update_work, timer_update_keys); | |
223 | ||
224 | #ifdef CONFIG_SMP | |
bc7a34b8 TG |
225 | unsigned int sysctl_timer_migration = 1; |
226 | ||
ae67bada TG |
227 | DEFINE_STATIC_KEY_FALSE(timers_migration_enabled); |
228 | ||
229 | static void timers_update_migration(void) | |
bc7a34b8 | 230 | { |
ae67bada TG |
231 | if (sysctl_timer_migration && tick_nohz_active) |
232 | static_branch_enable(&timers_migration_enabled); | |
233 | else | |
234 | static_branch_disable(&timers_migration_enabled); | |
235 | } | |
236 | #else | |
237 | static inline void timers_update_migration(void) { } | |
238 | #endif /* !CONFIG_SMP */ | |
bc7a34b8 | 239 | |
ae67bada TG |
240 | static void timer_update_keys(struct work_struct *work) |
241 | { | |
242 | mutex_lock(&timer_keys_mutex); | |
243 | timers_update_migration(); | |
244 | static_branch_enable(&timers_nohz_active); | |
245 | mutex_unlock(&timer_keys_mutex); | |
246 | } | |
bc7a34b8 | 247 | |
ae67bada TG |
248 | void timers_update_nohz(void) |
249 | { | |
250 | schedule_work(&timer_update_work); | |
bc7a34b8 TG |
251 | } |
252 | ||
253 | int timer_migration_handler(struct ctl_table *table, int write, | |
32927393 | 254 | void *buffer, size_t *lenp, loff_t *ppos) |
bc7a34b8 | 255 | { |
bc7a34b8 TG |
256 | int ret; |
257 | ||
ae67bada | 258 | mutex_lock(&timer_keys_mutex); |
b94bf594 | 259 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
bc7a34b8 | 260 | if (!ret && write) |
ae67bada TG |
261 | timers_update_migration(); |
262 | mutex_unlock(&timer_keys_mutex); | |
bc7a34b8 TG |
263 | return ret; |
264 | } | |
14c80341 AMG |
265 | |
266 | static inline bool is_timers_nohz_active(void) | |
267 | { | |
268 | return static_branch_unlikely(&timers_nohz_active); | |
269 | } | |
270 | #else | |
271 | static inline bool is_timers_nohz_active(void) { return false; } | |
ae67bada | 272 | #endif /* NO_HZ_COMMON */ |
bc7a34b8 | 273 | |
9c133c46 AS |
274 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
275 | bool force_up) | |
4c36a5de AV |
276 | { |
277 | int rem; | |
278 | unsigned long original = j; | |
279 | ||
280 | /* | |
281 | * We don't want all cpus firing their timers at once hitting the | |
282 | * same lock or cachelines, so we skew each extra cpu with an extra | |
283 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
284 | * already did this. | |
285 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
286 | * extra offset again. | |
287 | */ | |
288 | j += cpu * 3; | |
289 | ||
290 | rem = j % HZ; | |
291 | ||
292 | /* | |
293 | * If the target jiffie is just after a whole second (which can happen | |
294 | * due to delays of the timer irq, long irq off times etc etc) then | |
295 | * we should round down to the whole second, not up. Use 1/4th second | |
296 | * as cutoff for this rounding as an extreme upper bound for this. | |
9c133c46 | 297 | * But never round down if @force_up is set. |
4c36a5de | 298 | */ |
9c133c46 | 299 | if (rem < HZ/4 && !force_up) /* round down */ |
4c36a5de AV |
300 | j = j - rem; |
301 | else /* round up */ | |
302 | j = j - rem + HZ; | |
303 | ||
304 | /* now that we have rounded, subtract the extra skew again */ | |
305 | j -= cpu * 3; | |
306 | ||
9e04d380 BVA |
307 | /* |
308 | * Make sure j is still in the future. Otherwise return the | |
309 | * unmodified value. | |
310 | */ | |
311 | return time_is_after_jiffies(j) ? j : original; | |
4c36a5de | 312 | } |
9c133c46 AS |
313 | |
314 | /** | |
315 | * __round_jiffies - function to round jiffies to a full second | |
316 | * @j: the time in (absolute) jiffies that should be rounded | |
317 | * @cpu: the processor number on which the timeout will happen | |
318 | * | |
319 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | |
320 | * up or down to (approximately) full seconds. This is useful for timers | |
321 | * for which the exact time they fire does not matter too much, as long as | |
322 | * they fire approximately every X seconds. | |
323 | * | |
324 | * By rounding these timers to whole seconds, all such timers will fire | |
325 | * at the same time, rather than at various times spread out. The goal | |
326 | * of this is to have the CPU wake up less, which saves power. | |
327 | * | |
328 | * The exact rounding is skewed for each processor to avoid all | |
329 | * processors firing at the exact same time, which could lead | |
330 | * to lock contention or spurious cache line bouncing. | |
331 | * | |
332 | * The return value is the rounded version of the @j parameter. | |
333 | */ | |
334 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
335 | { | |
336 | return round_jiffies_common(j, cpu, false); | |
337 | } | |
4c36a5de AV |
338 | EXPORT_SYMBOL_GPL(__round_jiffies); |
339 | ||
340 | /** | |
341 | * __round_jiffies_relative - function to round jiffies to a full second | |
342 | * @j: the time in (relative) jiffies that should be rounded | |
343 | * @cpu: the processor number on which the timeout will happen | |
344 | * | |
72fd4a35 | 345 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
346 | * up or down to (approximately) full seconds. This is useful for timers |
347 | * for which the exact time they fire does not matter too much, as long as | |
348 | * they fire approximately every X seconds. | |
349 | * | |
350 | * By rounding these timers to whole seconds, all such timers will fire | |
351 | * at the same time, rather than at various times spread out. The goal | |
352 | * of this is to have the CPU wake up less, which saves power. | |
353 | * | |
354 | * The exact rounding is skewed for each processor to avoid all | |
355 | * processors firing at the exact same time, which could lead | |
356 | * to lock contention or spurious cache line bouncing. | |
357 | * | |
72fd4a35 | 358 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
359 | */ |
360 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
361 | { | |
9c133c46 AS |
362 | unsigned long j0 = jiffies; |
363 | ||
364 | /* Use j0 because jiffies might change while we run */ | |
365 | return round_jiffies_common(j + j0, cpu, false) - j0; | |
4c36a5de AV |
366 | } |
367 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
368 | ||
369 | /** | |
370 | * round_jiffies - function to round jiffies to a full second | |
371 | * @j: the time in (absolute) jiffies that should be rounded | |
372 | * | |
72fd4a35 | 373 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
374 | * up or down to (approximately) full seconds. This is useful for timers |
375 | * for which the exact time they fire does not matter too much, as long as | |
376 | * they fire approximately every X seconds. | |
377 | * | |
378 | * By rounding these timers to whole seconds, all such timers will fire | |
379 | * at the same time, rather than at various times spread out. The goal | |
380 | * of this is to have the CPU wake up less, which saves power. | |
381 | * | |
72fd4a35 | 382 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
383 | */ |
384 | unsigned long round_jiffies(unsigned long j) | |
385 | { | |
9c133c46 | 386 | return round_jiffies_common(j, raw_smp_processor_id(), false); |
4c36a5de AV |
387 | } |
388 | EXPORT_SYMBOL_GPL(round_jiffies); | |
389 | ||
390 | /** | |
391 | * round_jiffies_relative - function to round jiffies to a full second | |
392 | * @j: the time in (relative) jiffies that should be rounded | |
393 | * | |
72fd4a35 | 394 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
395 | * up or down to (approximately) full seconds. This is useful for timers |
396 | * for which the exact time they fire does not matter too much, as long as | |
397 | * they fire approximately every X seconds. | |
398 | * | |
399 | * By rounding these timers to whole seconds, all such timers will fire | |
400 | * at the same time, rather than at various times spread out. The goal | |
401 | * of this is to have the CPU wake up less, which saves power. | |
402 | * | |
72fd4a35 | 403 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
404 | */ |
405 | unsigned long round_jiffies_relative(unsigned long j) | |
406 | { | |
407 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
408 | } | |
409 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
410 | ||
9c133c46 AS |
411 | /** |
412 | * __round_jiffies_up - function to round jiffies up to a full second | |
413 | * @j: the time in (absolute) jiffies that should be rounded | |
414 | * @cpu: the processor number on which the timeout will happen | |
415 | * | |
416 | * This is the same as __round_jiffies() except that it will never | |
417 | * round down. This is useful for timeouts for which the exact time | |
418 | * of firing does not matter too much, as long as they don't fire too | |
419 | * early. | |
420 | */ | |
421 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | |
422 | { | |
423 | return round_jiffies_common(j, cpu, true); | |
424 | } | |
425 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | |
426 | ||
427 | /** | |
428 | * __round_jiffies_up_relative - function to round jiffies up to a full second | |
429 | * @j: the time in (relative) jiffies that should be rounded | |
430 | * @cpu: the processor number on which the timeout will happen | |
431 | * | |
432 | * This is the same as __round_jiffies_relative() except that it will never | |
433 | * round down. This is useful for timeouts for which the exact time | |
434 | * of firing does not matter too much, as long as they don't fire too | |
435 | * early. | |
436 | */ | |
437 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | |
438 | { | |
439 | unsigned long j0 = jiffies; | |
440 | ||
441 | /* Use j0 because jiffies might change while we run */ | |
442 | return round_jiffies_common(j + j0, cpu, true) - j0; | |
443 | } | |
444 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | |
445 | ||
446 | /** | |
447 | * round_jiffies_up - function to round jiffies up to a full second | |
448 | * @j: the time in (absolute) jiffies that should be rounded | |
449 | * | |
450 | * This is the same as round_jiffies() except that it will never | |
451 | * round down. This is useful for timeouts for which the exact time | |
452 | * of firing does not matter too much, as long as they don't fire too | |
453 | * early. | |
454 | */ | |
455 | unsigned long round_jiffies_up(unsigned long j) | |
456 | { | |
457 | return round_jiffies_common(j, raw_smp_processor_id(), true); | |
458 | } | |
459 | EXPORT_SYMBOL_GPL(round_jiffies_up); | |
460 | ||
461 | /** | |
462 | * round_jiffies_up_relative - function to round jiffies up to a full second | |
463 | * @j: the time in (relative) jiffies that should be rounded | |
464 | * | |
465 | * This is the same as round_jiffies_relative() except that it will never | |
466 | * round down. This is useful for timeouts for which the exact time | |
467 | * of firing does not matter too much, as long as they don't fire too | |
468 | * early. | |
469 | */ | |
470 | unsigned long round_jiffies_up_relative(unsigned long j) | |
471 | { | |
472 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | |
473 | } | |
474 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | |
475 | ||
3bbb9ec9 | 476 | |
500462a9 | 477 | static inline unsigned int timer_get_idx(struct timer_list *timer) |
3bbb9ec9 | 478 | { |
500462a9 | 479 | return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; |
3bbb9ec9 | 480 | } |
3bbb9ec9 | 481 | |
500462a9 | 482 | static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) |
1da177e4 | 483 | { |
500462a9 TG |
484 | timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | |
485 | idx << TIMER_ARRAYSHIFT; | |
486 | } | |
1da177e4 | 487 | |
500462a9 TG |
488 | /* |
489 | * Helper function to calculate the array index for a given expiry | |
490 | * time. | |
491 | */ | |
1f32cab0 AMB |
492 | static inline unsigned calc_index(unsigned long expires, unsigned lvl, |
493 | unsigned long *bucket_expiry) | |
500462a9 | 494 | { |
44688972 FW |
495 | |
496 | /* | |
497 | * The timer wheel has to guarantee that a timer does not fire | |
498 | * early. Early expiry can happen due to: | |
499 | * - Timer is armed at the edge of a tick | |
500 | * - Truncation of the expiry time in the outer wheel levels | |
501 | * | |
502 | * Round up with level granularity to prevent this. | |
503 | */ | |
500462a9 | 504 | expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl); |
1f32cab0 | 505 | *bucket_expiry = expires << LVL_SHIFT(lvl); |
500462a9 TG |
506 | return LVL_OFFS(lvl) + (expires & LVL_MASK); |
507 | } | |
508 | ||
1f32cab0 AMB |
509 | static int calc_wheel_index(unsigned long expires, unsigned long clk, |
510 | unsigned long *bucket_expiry) | |
1da177e4 | 511 | { |
ffdf0477 | 512 | unsigned long delta = expires - clk; |
500462a9 TG |
513 | unsigned int idx; |
514 | ||
515 | if (delta < LVL_START(1)) { | |
1f32cab0 | 516 | idx = calc_index(expires, 0, bucket_expiry); |
500462a9 | 517 | } else if (delta < LVL_START(2)) { |
1f32cab0 | 518 | idx = calc_index(expires, 1, bucket_expiry); |
500462a9 | 519 | } else if (delta < LVL_START(3)) { |
1f32cab0 | 520 | idx = calc_index(expires, 2, bucket_expiry); |
500462a9 | 521 | } else if (delta < LVL_START(4)) { |
1f32cab0 | 522 | idx = calc_index(expires, 3, bucket_expiry); |
500462a9 | 523 | } else if (delta < LVL_START(5)) { |
1f32cab0 | 524 | idx = calc_index(expires, 4, bucket_expiry); |
500462a9 | 525 | } else if (delta < LVL_START(6)) { |
1f32cab0 | 526 | idx = calc_index(expires, 5, bucket_expiry); |
500462a9 | 527 | } else if (delta < LVL_START(7)) { |
1f32cab0 | 528 | idx = calc_index(expires, 6, bucket_expiry); |
500462a9 | 529 | } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { |
1f32cab0 | 530 | idx = calc_index(expires, 7, bucket_expiry); |
500462a9 | 531 | } else if ((long) delta < 0) { |
ffdf0477 | 532 | idx = clk & LVL_MASK; |
1f32cab0 | 533 | *bucket_expiry = clk; |
1da177e4 | 534 | } else { |
500462a9 TG |
535 | /* |
536 | * Force expire obscene large timeouts to expire at the | |
537 | * capacity limit of the wheel. | |
1da177e4 | 538 | */ |
e2a71bde FW |
539 | if (delta >= WHEEL_TIMEOUT_CUTOFF) |
540 | expires = clk + WHEEL_TIMEOUT_MAX; | |
1bd04bf6 | 541 | |
1f32cab0 | 542 | idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); |
1da177e4 | 543 | } |
ffdf0477 AMG |
544 | return idx; |
545 | } | |
1bd04bf6 | 546 | |
ffdf0477 AMG |
547 | static void |
548 | trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) | |
549 | { | |
ae67bada | 550 | if (!is_timers_nohz_active()) |
a683f390 | 551 | return; |
3bb475a3 | 552 | |
facbb4a7 | 553 | /* |
a683f390 TG |
554 | * TODO: This wants some optimizing similar to the code below, but we |
555 | * will do that when we switch from push to pull for deferrable timers. | |
facbb4a7 | 556 | */ |
a683f390 TG |
557 | if (timer->flags & TIMER_DEFERRABLE) { |
558 | if (tick_nohz_full_cpu(base->cpu)) | |
683be13a | 559 | wake_up_nohz_cpu(base->cpu); |
a683f390 | 560 | return; |
99d5f3aa | 561 | } |
9f6d9baa VK |
562 | |
563 | /* | |
a683f390 TG |
564 | * We might have to IPI the remote CPU if the base is idle and the |
565 | * timer is not deferrable. If the other CPU is on the way to idle | |
566 | * then it can't set base->is_idle as we hold the base lock: | |
9f6d9baa | 567 | */ |
dc2a0f1f FW |
568 | if (base->is_idle) |
569 | wake_up_nohz_cpu(base->cpu); | |
ffdf0477 | 570 | } |
a683f390 | 571 | |
9a2b764b FW |
572 | /* |
573 | * Enqueue the timer into the hash bucket, mark it pending in | |
574 | * the bitmap, store the index in the timer flags then wake up | |
575 | * the target CPU if needed. | |
576 | */ | |
577 | static void enqueue_timer(struct timer_base *base, struct timer_list *timer, | |
578 | unsigned int idx, unsigned long bucket_expiry) | |
ffdf0477 | 579 | { |
dc2a0f1f | 580 | |
9a2b764b FW |
581 | hlist_add_head(&timer->entry, base->vectors + idx); |
582 | __set_bit(idx, base->pending_map); | |
583 | timer_set_idx(timer, idx); | |
1f32cab0 | 584 | |
9a2b764b | 585 | trace_timer_start(timer, timer->expires, timer->flags); |
a683f390 TG |
586 | |
587 | /* | |
dc2a0f1f FW |
588 | * Check whether this is the new first expiring timer. The |
589 | * effective expiry time of the timer is required here | |
590 | * (bucket_expiry) instead of timer->expires. | |
a683f390 | 591 | */ |
dc2a0f1f | 592 | if (time_before(bucket_expiry, base->next_expiry)) { |
30c66fc3 | 593 | /* |
dc2a0f1f FW |
594 | * Set the next expiry time and kick the CPU so it |
595 | * can reevaluate the wheel: | |
30c66fc3 | 596 | */ |
dc2a0f1f | 597 | base->next_expiry = bucket_expiry; |
31cd0e11 | 598 | base->next_expiry_recalc = false; |
dc2a0f1f | 599 | trigger_dyntick_cpu(base, timer); |
30c66fc3 | 600 | } |
ffdf0477 AMG |
601 | } |
602 | ||
9a2b764b | 603 | static void internal_add_timer(struct timer_base *base, struct timer_list *timer) |
ffdf0477 | 604 | { |
9a2b764b FW |
605 | unsigned long bucket_expiry; |
606 | unsigned int idx; | |
607 | ||
608 | idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); | |
609 | enqueue_timer(base, timer, idx, bucket_expiry); | |
facbb4a7 TG |
610 | } |
611 | ||
c6f3a97f TG |
612 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
613 | ||
f9e62f31 | 614 | static const struct debug_obj_descr timer_debug_descr; |
c6f3a97f | 615 | |
99777288 SG |
616 | static void *timer_debug_hint(void *addr) |
617 | { | |
618 | return ((struct timer_list *) addr)->function; | |
619 | } | |
620 | ||
b9fdac7f CD |
621 | static bool timer_is_static_object(void *addr) |
622 | { | |
623 | struct timer_list *timer = addr; | |
624 | ||
625 | return (timer->entry.pprev == NULL && | |
626 | timer->entry.next == TIMER_ENTRY_STATIC); | |
627 | } | |
628 | ||
c6f3a97f TG |
629 | /* |
630 | * fixup_init is called when: | |
631 | * - an active object is initialized | |
55c888d6 | 632 | */ |
e3252464 | 633 | static bool timer_fixup_init(void *addr, enum debug_obj_state state) |
c6f3a97f TG |
634 | { |
635 | struct timer_list *timer = addr; | |
636 | ||
637 | switch (state) { | |
638 | case ODEBUG_STATE_ACTIVE: | |
639 | del_timer_sync(timer); | |
640 | debug_object_init(timer, &timer_debug_descr); | |
e3252464 | 641 | return true; |
c6f3a97f | 642 | default: |
e3252464 | 643 | return false; |
c6f3a97f TG |
644 | } |
645 | } | |
646 | ||
fb16b8cf | 647 | /* Stub timer callback for improperly used timers. */ |
ba16490e | 648 | static void stub_timer(struct timer_list *unused) |
fb16b8cf SB |
649 | { |
650 | WARN_ON(1); | |
651 | } | |
652 | ||
c6f3a97f TG |
653 | /* |
654 | * fixup_activate is called when: | |
655 | * - an active object is activated | |
b9fdac7f | 656 | * - an unknown non-static object is activated |
c6f3a97f | 657 | */ |
e3252464 | 658 | static bool timer_fixup_activate(void *addr, enum debug_obj_state state) |
c6f3a97f TG |
659 | { |
660 | struct timer_list *timer = addr; | |
661 | ||
662 | switch (state) { | |
c6f3a97f | 663 | case ODEBUG_STATE_NOTAVAILABLE: |
ba16490e | 664 | timer_setup(timer, stub_timer, 0); |
b9fdac7f | 665 | return true; |
c6f3a97f TG |
666 | |
667 | case ODEBUG_STATE_ACTIVE: | |
668 | WARN_ON(1); | |
df561f66 | 669 | fallthrough; |
c6f3a97f | 670 | default: |
e3252464 | 671 | return false; |
c6f3a97f TG |
672 | } |
673 | } | |
674 | ||
675 | /* | |
676 | * fixup_free is called when: | |
677 | * - an active object is freed | |
678 | */ | |
e3252464 | 679 | static bool timer_fixup_free(void *addr, enum debug_obj_state state) |
c6f3a97f TG |
680 | { |
681 | struct timer_list *timer = addr; | |
682 | ||
683 | switch (state) { | |
684 | case ODEBUG_STATE_ACTIVE: | |
685 | del_timer_sync(timer); | |
686 | debug_object_free(timer, &timer_debug_descr); | |
e3252464 | 687 | return true; |
c6f3a97f | 688 | default: |
e3252464 | 689 | return false; |
c6f3a97f TG |
690 | } |
691 | } | |
692 | ||
dc4218bd CC |
693 | /* |
694 | * fixup_assert_init is called when: | |
695 | * - an untracked/uninit-ed object is found | |
696 | */ | |
e3252464 | 697 | static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) |
dc4218bd CC |
698 | { |
699 | struct timer_list *timer = addr; | |
700 | ||
701 | switch (state) { | |
702 | case ODEBUG_STATE_NOTAVAILABLE: | |
ba16490e | 703 | timer_setup(timer, stub_timer, 0); |
b9fdac7f | 704 | return true; |
dc4218bd | 705 | default: |
e3252464 | 706 | return false; |
dc4218bd CC |
707 | } |
708 | } | |
709 | ||
f9e62f31 | 710 | static const struct debug_obj_descr timer_debug_descr = { |
dc4218bd CC |
711 | .name = "timer_list", |
712 | .debug_hint = timer_debug_hint, | |
b9fdac7f | 713 | .is_static_object = timer_is_static_object, |
dc4218bd CC |
714 | .fixup_init = timer_fixup_init, |
715 | .fixup_activate = timer_fixup_activate, | |
716 | .fixup_free = timer_fixup_free, | |
717 | .fixup_assert_init = timer_fixup_assert_init, | |
c6f3a97f TG |
718 | }; |
719 | ||
720 | static inline void debug_timer_init(struct timer_list *timer) | |
721 | { | |
722 | debug_object_init(timer, &timer_debug_descr); | |
723 | } | |
724 | ||
725 | static inline void debug_timer_activate(struct timer_list *timer) | |
726 | { | |
727 | debug_object_activate(timer, &timer_debug_descr); | |
728 | } | |
729 | ||
730 | static inline void debug_timer_deactivate(struct timer_list *timer) | |
731 | { | |
732 | debug_object_deactivate(timer, &timer_debug_descr); | |
733 | } | |
734 | ||
dc4218bd CC |
735 | static inline void debug_timer_assert_init(struct timer_list *timer) |
736 | { | |
737 | debug_object_assert_init(timer, &timer_debug_descr); | |
738 | } | |
739 | ||
188665b2 KC |
740 | static void do_init_timer(struct timer_list *timer, |
741 | void (*func)(struct timer_list *), | |
742 | unsigned int flags, | |
fc683995 | 743 | const char *name, struct lock_class_key *key); |
c6f3a97f | 744 | |
188665b2 KC |
745 | void init_timer_on_stack_key(struct timer_list *timer, |
746 | void (*func)(struct timer_list *), | |
747 | unsigned int flags, | |
fc683995 | 748 | const char *name, struct lock_class_key *key) |
c6f3a97f TG |
749 | { |
750 | debug_object_init_on_stack(timer, &timer_debug_descr); | |
188665b2 | 751 | do_init_timer(timer, func, flags, name, key); |
c6f3a97f | 752 | } |
6f2b9b9a | 753 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
c6f3a97f TG |
754 | |
755 | void destroy_timer_on_stack(struct timer_list *timer) | |
756 | { | |
757 | debug_object_free(timer, &timer_debug_descr); | |
758 | } | |
759 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); | |
760 | ||
761 | #else | |
762 | static inline void debug_timer_init(struct timer_list *timer) { } | |
763 | static inline void debug_timer_activate(struct timer_list *timer) { } | |
764 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | |
dc4218bd | 765 | static inline void debug_timer_assert_init(struct timer_list *timer) { } |
c6f3a97f TG |
766 | #endif |
767 | ||
2b022e3d XG |
768 | static inline void debug_init(struct timer_list *timer) |
769 | { | |
770 | debug_timer_init(timer); | |
771 | trace_timer_init(timer); | |
772 | } | |
773 | ||
2b022e3d XG |
774 | static inline void debug_deactivate(struct timer_list *timer) |
775 | { | |
776 | debug_timer_deactivate(timer); | |
777 | trace_timer_cancel(timer); | |
778 | } | |
779 | ||
dc4218bd CC |
780 | static inline void debug_assert_init(struct timer_list *timer) |
781 | { | |
782 | debug_timer_assert_init(timer); | |
783 | } | |
784 | ||
188665b2 KC |
785 | static void do_init_timer(struct timer_list *timer, |
786 | void (*func)(struct timer_list *), | |
787 | unsigned int flags, | |
fc683995 | 788 | const char *name, struct lock_class_key *key) |
55c888d6 | 789 | { |
1dabbcec | 790 | timer->entry.pprev = NULL; |
188665b2 | 791 | timer->function = func; |
b952caf2 QZ |
792 | if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS)) |
793 | flags &= TIMER_INIT_FLAGS; | |
0eeda71b | 794 | timer->flags = flags | raw_smp_processor_id(); |
6f2b9b9a | 795 | lockdep_init_map(&timer->lockdep_map, name, key, 0); |
55c888d6 | 796 | } |
c6f3a97f TG |
797 | |
798 | /** | |
633fe795 | 799 | * init_timer_key - initialize a timer |
c6f3a97f | 800 | * @timer: the timer to be initialized |
188665b2 | 801 | * @func: timer callback function |
fc683995 | 802 | * @flags: timer flags |
633fe795 RD |
803 | * @name: name of the timer |
804 | * @key: lockdep class key of the fake lock used for tracking timer | |
805 | * sync lock dependencies | |
c6f3a97f | 806 | * |
633fe795 | 807 | * init_timer_key() must be done to a timer prior calling *any* of the |
c6f3a97f TG |
808 | * other timer functions. |
809 | */ | |
188665b2 KC |
810 | void init_timer_key(struct timer_list *timer, |
811 | void (*func)(struct timer_list *), unsigned int flags, | |
fc683995 | 812 | const char *name, struct lock_class_key *key) |
c6f3a97f | 813 | { |
2b022e3d | 814 | debug_init(timer); |
188665b2 | 815 | do_init_timer(timer, func, flags, name, key); |
c6f3a97f | 816 | } |
6f2b9b9a | 817 | EXPORT_SYMBOL(init_timer_key); |
55c888d6 | 818 | |
ec44bc7a | 819 | static inline void detach_timer(struct timer_list *timer, bool clear_pending) |
55c888d6 | 820 | { |
1dabbcec | 821 | struct hlist_node *entry = &timer->entry; |
55c888d6 | 822 | |
2b022e3d | 823 | debug_deactivate(timer); |
c6f3a97f | 824 | |
1dabbcec | 825 | __hlist_del(entry); |
55c888d6 | 826 | if (clear_pending) |
1dabbcec TG |
827 | entry->pprev = NULL; |
828 | entry->next = LIST_POISON2; | |
55c888d6 ON |
829 | } |
830 | ||
494af3ed | 831 | static int detach_if_pending(struct timer_list *timer, struct timer_base *base, |
ec44bc7a TG |
832 | bool clear_pending) |
833 | { | |
500462a9 TG |
834 | unsigned idx = timer_get_idx(timer); |
835 | ||
ec44bc7a TG |
836 | if (!timer_pending(timer)) |
837 | return 0; | |
838 | ||
31cd0e11 | 839 | if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { |
500462a9 | 840 | __clear_bit(idx, base->pending_map); |
31cd0e11 FW |
841 | base->next_expiry_recalc = true; |
842 | } | |
500462a9 | 843 | |
ec44bc7a | 844 | detach_timer(timer, clear_pending); |
ec44bc7a TG |
845 | return 1; |
846 | } | |
847 | ||
500462a9 TG |
848 | static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) |
849 | { | |
850 | struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); | |
851 | ||
852 | /* | |
ced6d5c1 AMG |
853 | * If the timer is deferrable and NO_HZ_COMMON is set then we need |
854 | * to use the deferrable base. | |
500462a9 | 855 | */ |
ced6d5c1 | 856 | if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) |
500462a9 TG |
857 | base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); |
858 | return base; | |
859 | } | |
860 | ||
861 | static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) | |
862 | { | |
863 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
864 | ||
865 | /* | |
ced6d5c1 AMG |
866 | * If the timer is deferrable and NO_HZ_COMMON is set then we need |
867 | * to use the deferrable base. | |
500462a9 | 868 | */ |
ced6d5c1 | 869 | if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) |
500462a9 TG |
870 | base = this_cpu_ptr(&timer_bases[BASE_DEF]); |
871 | return base; | |
872 | } | |
873 | ||
874 | static inline struct timer_base *get_timer_base(u32 tflags) | |
875 | { | |
876 | return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); | |
877 | } | |
878 | ||
a683f390 | 879 | static inline struct timer_base * |
6bad6bcc | 880 | get_target_base(struct timer_base *base, unsigned tflags) |
500462a9 | 881 | { |
ae67bada TG |
882 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
883 | if (static_branch_likely(&timers_migration_enabled) && | |
884 | !(tflags & TIMER_PINNED)) | |
885 | return get_timer_cpu_base(tflags, get_nohz_timer_target()); | |
500462a9 | 886 | #endif |
ae67bada | 887 | return get_timer_this_cpu_base(tflags); |
500462a9 TG |
888 | } |
889 | ||
a683f390 TG |
890 | static inline void forward_timer_base(struct timer_base *base) |
891 | { | |
0975fb56 | 892 | unsigned long jnow = READ_ONCE(jiffies); |
6bad6bcc | 893 | |
a683f390 | 894 | /* |
0975fb56 FW |
895 | * No need to forward if we are close enough below jiffies. |
896 | * Also while executing timers, base->clk is 1 offset ahead | |
897 | * of jiffies to avoid endless requeuing to current jffies. | |
a683f390 | 898 | */ |
36cd28a4 | 899 | if ((long)(jnow - base->clk) < 1) |
a683f390 TG |
900 | return; |
901 | ||
902 | /* | |
903 | * If the next expiry value is > jiffies, then we fast forward to | |
904 | * jiffies otherwise we forward to the next expiry value. | |
905 | */ | |
30c66fc3 | 906 | if (time_after(base->next_expiry, jnow)) { |
6bad6bcc | 907 | base->clk = jnow; |
30c66fc3 FW |
908 | } else { |
909 | if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) | |
910 | return; | |
a683f390 | 911 | base->clk = base->next_expiry; |
30c66fc3 | 912 | } |
ae67bada | 913 | } |
a683f390 | 914 | |
a683f390 | 915 | |
55c888d6 | 916 | /* |
500462a9 TG |
917 | * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means |
918 | * that all timers which are tied to this base are locked, and the base itself | |
919 | * is locked too. | |
55c888d6 ON |
920 | * |
921 | * So __run_timers/migrate_timers can safely modify all timers which could | |
500462a9 | 922 | * be found in the base->vectors array. |
55c888d6 | 923 | * |
500462a9 TG |
924 | * When a timer is migrating then the TIMER_MIGRATING flag is set and we need |
925 | * to wait until the migration is done. | |
55c888d6 | 926 | */ |
494af3ed | 927 | static struct timer_base *lock_timer_base(struct timer_list *timer, |
500462a9 | 928 | unsigned long *flags) |
89e7e374 | 929 | __acquires(timer->base->lock) |
55c888d6 | 930 | { |
55c888d6 | 931 | for (;;) { |
494af3ed | 932 | struct timer_base *base; |
b831275a TG |
933 | u32 tf; |
934 | ||
935 | /* | |
936 | * We need to use READ_ONCE() here, otherwise the compiler | |
937 | * might re-read @tf between the check for TIMER_MIGRATING | |
938 | * and spin_lock(). | |
939 | */ | |
940 | tf = READ_ONCE(timer->flags); | |
0eeda71b TG |
941 | |
942 | if (!(tf & TIMER_MIGRATING)) { | |
500462a9 | 943 | base = get_timer_base(tf); |
2287d866 | 944 | raw_spin_lock_irqsave(&base->lock, *flags); |
0eeda71b | 945 | if (timer->flags == tf) |
55c888d6 | 946 | return base; |
2287d866 | 947 | raw_spin_unlock_irqrestore(&base->lock, *flags); |
55c888d6 ON |
948 | } |
949 | cpu_relax(); | |
950 | } | |
951 | } | |
952 | ||
b24591e2 DH |
953 | #define MOD_TIMER_PENDING_ONLY 0x01 |
954 | #define MOD_TIMER_REDUCE 0x02 | |
90c01894 | 955 | #define MOD_TIMER_NOTPENDING 0x04 |
b24591e2 | 956 | |
74019224 | 957 | static inline int |
b24591e2 | 958 | __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) |
1da177e4 | 959 | { |
1f32cab0 | 960 | unsigned long clk = 0, flags, bucket_expiry; |
494af3ed | 961 | struct timer_base *base, *new_base; |
f00c0afd | 962 | unsigned int idx = UINT_MAX; |
bc7a34b8 | 963 | int ret = 0; |
1da177e4 | 964 | |
4da9152a TG |
965 | BUG_ON(!timer->function); |
966 | ||
500462a9 | 967 | /* |
f00c0afd AMG |
968 | * This is a common optimization triggered by the networking code - if |
969 | * the timer is re-modified to have the same timeout or ends up in the | |
970 | * same array bucket then just return: | |
500462a9 | 971 | */ |
90c01894 | 972 | if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { |
2fe59f50 NP |
973 | /* |
974 | * The downside of this optimization is that it can result in | |
975 | * larger granularity than you would get from adding a new | |
976 | * timer with this expiry. | |
977 | */ | |
b24591e2 DH |
978 | long diff = timer->expires - expires; |
979 | ||
980 | if (!diff) | |
981 | return 1; | |
982 | if (options & MOD_TIMER_REDUCE && diff <= 0) | |
500462a9 | 983 | return 1; |
4da9152a | 984 | |
f00c0afd | 985 | /* |
4da9152a TG |
986 | * We lock timer base and calculate the bucket index right |
987 | * here. If the timer ends up in the same bucket, then we | |
988 | * just update the expiry time and avoid the whole | |
989 | * dequeue/enqueue dance. | |
f00c0afd | 990 | */ |
4da9152a | 991 | base = lock_timer_base(timer, &flags); |
2fe59f50 | 992 | forward_timer_base(base); |
f00c0afd | 993 | |
b24591e2 DH |
994 | if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && |
995 | time_before_eq(timer->expires, expires)) { | |
996 | ret = 1; | |
997 | goto out_unlock; | |
998 | } | |
999 | ||
4da9152a | 1000 | clk = base->clk; |
1f32cab0 | 1001 | idx = calc_wheel_index(expires, clk, &bucket_expiry); |
f00c0afd AMG |
1002 | |
1003 | /* | |
1004 | * Retrieve and compare the array index of the pending | |
1005 | * timer. If it matches set the expiry to the new value so a | |
1006 | * subsequent call will exit in the expires check above. | |
1007 | */ | |
1008 | if (idx == timer_get_idx(timer)) { | |
b24591e2 DH |
1009 | if (!(options & MOD_TIMER_REDUCE)) |
1010 | timer->expires = expires; | |
1011 | else if (time_after(timer->expires, expires)) | |
1012 | timer->expires = expires; | |
4da9152a TG |
1013 | ret = 1; |
1014 | goto out_unlock; | |
f00c0afd | 1015 | } |
4da9152a TG |
1016 | } else { |
1017 | base = lock_timer_base(timer, &flags); | |
2fe59f50 | 1018 | forward_timer_base(base); |
500462a9 TG |
1019 | } |
1020 | ||
ec44bc7a | 1021 | ret = detach_if_pending(timer, base, false); |
b24591e2 | 1022 | if (!ret && (options & MOD_TIMER_PENDING_ONLY)) |
ec44bc7a | 1023 | goto out_unlock; |
55c888d6 | 1024 | |
500462a9 | 1025 | new_base = get_target_base(base, timer->flags); |
eea08f32 | 1026 | |
3691c519 | 1027 | if (base != new_base) { |
1da177e4 | 1028 | /* |
500462a9 | 1029 | * We are trying to schedule the timer on the new base. |
55c888d6 ON |
1030 | * However we can't change timer's base while it is running, |
1031 | * otherwise del_timer_sync() can't detect that the timer's | |
500462a9 TG |
1032 | * handler yet has not finished. This also guarantees that the |
1033 | * timer is serialized wrt itself. | |
1da177e4 | 1034 | */ |
a2c348fe | 1035 | if (likely(base->running_timer != timer)) { |
55c888d6 | 1036 | /* See the comment in lock_timer_base() */ |
0eeda71b TG |
1037 | timer->flags |= TIMER_MIGRATING; |
1038 | ||
2287d866 | 1039 | raw_spin_unlock(&base->lock); |
a2c348fe | 1040 | base = new_base; |
2287d866 | 1041 | raw_spin_lock(&base->lock); |
d0023a14 ED |
1042 | WRITE_ONCE(timer->flags, |
1043 | (timer->flags & ~TIMER_BASEMASK) | base->cpu); | |
2fe59f50 | 1044 | forward_timer_base(base); |
1da177e4 LT |
1045 | } |
1046 | } | |
1047 | ||
dc1e7dc5 | 1048 | debug_timer_activate(timer); |
fd45bb77 | 1049 | |
1da177e4 | 1050 | timer->expires = expires; |
f00c0afd AMG |
1051 | /* |
1052 | * If 'idx' was calculated above and the base time did not advance | |
4da9152a | 1053 | * between calculating 'idx' and possibly switching the base, only |
9a2b764b FW |
1054 | * enqueue_timer() is required. Otherwise we need to (re)calculate |
1055 | * the wheel index via internal_add_timer(). | |
f00c0afd | 1056 | */ |
9a2b764b FW |
1057 | if (idx != UINT_MAX && clk == base->clk) |
1058 | enqueue_timer(base, timer, idx, bucket_expiry); | |
1059 | else | |
f00c0afd | 1060 | internal_add_timer(base, timer); |
74019224 IM |
1061 | |
1062 | out_unlock: | |
2287d866 | 1063 | raw_spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
1064 | |
1065 | return ret; | |
1066 | } | |
1067 | ||
2aae4a10 | 1068 | /** |
74019224 IM |
1069 | * mod_timer_pending - modify a pending timer's timeout |
1070 | * @timer: the pending timer to be modified | |
1071 | * @expires: new timeout in jiffies | |
1da177e4 | 1072 | * |
74019224 IM |
1073 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
1074 | * but will not re-activate and modify already deleted timers. | |
1075 | * | |
1076 | * It is useful for unserialized use of timers. | |
1da177e4 | 1077 | */ |
74019224 | 1078 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
1da177e4 | 1079 | { |
b24591e2 | 1080 | return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); |
1da177e4 | 1081 | } |
74019224 | 1082 | EXPORT_SYMBOL(mod_timer_pending); |
1da177e4 | 1083 | |
2aae4a10 | 1084 | /** |
1da177e4 LT |
1085 | * mod_timer - modify a timer's timeout |
1086 | * @timer: the timer to be modified | |
2aae4a10 | 1087 | * @expires: new timeout in jiffies |
1da177e4 | 1088 | * |
72fd4a35 | 1089 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
1090 | * active timer (if the timer is inactive it will be activated) |
1091 | * | |
1092 | * mod_timer(timer, expires) is equivalent to: | |
1093 | * | |
1094 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
1095 | * | |
1096 | * Note that if there are multiple unserialized concurrent users of the | |
1097 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
1098 | * since add_timer() cannot modify an already running timer. | |
1099 | * | |
1100 | * The function returns whether it has modified a pending timer or not. | |
1101 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
1102 | * active timer returns 1.) | |
1103 | */ | |
1104 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
1105 | { | |
b24591e2 | 1106 | return __mod_timer(timer, expires, 0); |
1da177e4 | 1107 | } |
1da177e4 LT |
1108 | EXPORT_SYMBOL(mod_timer); |
1109 | ||
b24591e2 DH |
1110 | /** |
1111 | * timer_reduce - Modify a timer's timeout if it would reduce the timeout | |
1112 | * @timer: The timer to be modified | |
1113 | * @expires: New timeout in jiffies | |
1114 | * | |
1115 | * timer_reduce() is very similar to mod_timer(), except that it will only | |
1116 | * modify a running timer if that would reduce the expiration time (it will | |
1117 | * start a timer that isn't running). | |
1118 | */ | |
1119 | int timer_reduce(struct timer_list *timer, unsigned long expires) | |
1120 | { | |
1121 | return __mod_timer(timer, expires, MOD_TIMER_REDUCE); | |
1122 | } | |
1123 | EXPORT_SYMBOL(timer_reduce); | |
1124 | ||
74019224 IM |
1125 | /** |
1126 | * add_timer - start a timer | |
1127 | * @timer: the timer to be added | |
1128 | * | |
c1eba5bc | 1129 | * The kernel will do a ->function(@timer) callback from the |
74019224 IM |
1130 | * timer interrupt at the ->expires point in the future. The |
1131 | * current time is 'jiffies'. | |
1132 | * | |
c1eba5bc KC |
1133 | * The timer's ->expires, ->function fields must be set prior calling this |
1134 | * function. | |
74019224 IM |
1135 | * |
1136 | * Timers with an ->expires field in the past will be executed in the next | |
1137 | * timer tick. | |
1138 | */ | |
1139 | void add_timer(struct timer_list *timer) | |
1140 | { | |
1141 | BUG_ON(timer_pending(timer)); | |
90c01894 | 1142 | __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); |
74019224 IM |
1143 | } |
1144 | EXPORT_SYMBOL(add_timer); | |
1145 | ||
1146 | /** | |
1147 | * add_timer_on - start a timer on a particular CPU | |
1148 | * @timer: the timer to be added | |
1149 | * @cpu: the CPU to start it on | |
1150 | * | |
1151 | * This is not very scalable on SMP. Double adds are not possible. | |
1152 | */ | |
1153 | void add_timer_on(struct timer_list *timer, int cpu) | |
1154 | { | |
500462a9 | 1155 | struct timer_base *new_base, *base; |
74019224 IM |
1156 | unsigned long flags; |
1157 | ||
74019224 | 1158 | BUG_ON(timer_pending(timer) || !timer->function); |
22b886dd | 1159 | |
500462a9 TG |
1160 | new_base = get_timer_cpu_base(timer->flags, cpu); |
1161 | ||
22b886dd TH |
1162 | /* |
1163 | * If @timer was on a different CPU, it should be migrated with the | |
1164 | * old base locked to prevent other operations proceeding with the | |
1165 | * wrong base locked. See lock_timer_base(). | |
1166 | */ | |
1167 | base = lock_timer_base(timer, &flags); | |
1168 | if (base != new_base) { | |
1169 | timer->flags |= TIMER_MIGRATING; | |
1170 | ||
2287d866 | 1171 | raw_spin_unlock(&base->lock); |
22b886dd | 1172 | base = new_base; |
2287d866 | 1173 | raw_spin_lock(&base->lock); |
22b886dd TH |
1174 | WRITE_ONCE(timer->flags, |
1175 | (timer->flags & ~TIMER_BASEMASK) | cpu); | |
1176 | } | |
2fe59f50 | 1177 | forward_timer_base(base); |
22b886dd | 1178 | |
dc1e7dc5 | 1179 | debug_timer_activate(timer); |
74019224 | 1180 | internal_add_timer(base, timer); |
2287d866 | 1181 | raw_spin_unlock_irqrestore(&base->lock, flags); |
74019224 | 1182 | } |
a9862e05 | 1183 | EXPORT_SYMBOL_GPL(add_timer_on); |
74019224 | 1184 | |
2aae4a10 | 1185 | /** |
0ba42a59 | 1186 | * del_timer - deactivate a timer. |
1da177e4 LT |
1187 | * @timer: the timer to be deactivated |
1188 | * | |
1189 | * del_timer() deactivates a timer - this works on both active and inactive | |
1190 | * timers. | |
1191 | * | |
1192 | * The function returns whether it has deactivated a pending timer or not. | |
1193 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
1194 | * active timer returns 1.) | |
1195 | */ | |
1196 | int del_timer(struct timer_list *timer) | |
1197 | { | |
494af3ed | 1198 | struct timer_base *base; |
1da177e4 | 1199 | unsigned long flags; |
55c888d6 | 1200 | int ret = 0; |
1da177e4 | 1201 | |
dc4218bd CC |
1202 | debug_assert_init(timer); |
1203 | ||
55c888d6 ON |
1204 | if (timer_pending(timer)) { |
1205 | base = lock_timer_base(timer, &flags); | |
ec44bc7a | 1206 | ret = detach_if_pending(timer, base, true); |
2287d866 | 1207 | raw_spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 1208 | } |
1da177e4 | 1209 | |
55c888d6 | 1210 | return ret; |
1da177e4 | 1211 | } |
1da177e4 LT |
1212 | EXPORT_SYMBOL(del_timer); |
1213 | ||
2aae4a10 REB |
1214 | /** |
1215 | * try_to_del_timer_sync - Try to deactivate a timer | |
d15bc69a | 1216 | * @timer: timer to delete |
2aae4a10 | 1217 | * |
fd450b73 ON |
1218 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
1219 | * exit the timer is not queued and the handler is not running on any CPU. | |
fd450b73 ON |
1220 | */ |
1221 | int try_to_del_timer_sync(struct timer_list *timer) | |
1222 | { | |
494af3ed | 1223 | struct timer_base *base; |
fd450b73 ON |
1224 | unsigned long flags; |
1225 | int ret = -1; | |
1226 | ||
dc4218bd CC |
1227 | debug_assert_init(timer); |
1228 | ||
fd450b73 ON |
1229 | base = lock_timer_base(timer, &flags); |
1230 | ||
dfb4357d | 1231 | if (base->running_timer != timer) |
ec44bc7a | 1232 | ret = detach_if_pending(timer, base, true); |
dfb4357d | 1233 | |
2287d866 | 1234 | raw_spin_unlock_irqrestore(&base->lock, flags); |
fd450b73 ON |
1235 | |
1236 | return ret; | |
1237 | } | |
e19dff1f DH |
1238 | EXPORT_SYMBOL(try_to_del_timer_sync); |
1239 | ||
030dcdd1 AMG |
1240 | #ifdef CONFIG_PREEMPT_RT |
1241 | static __init void timer_base_init_expiry_lock(struct timer_base *base) | |
1242 | { | |
1243 | spin_lock_init(&base->expiry_lock); | |
1244 | } | |
1245 | ||
1246 | static inline void timer_base_lock_expiry(struct timer_base *base) | |
1247 | { | |
1248 | spin_lock(&base->expiry_lock); | |
1249 | } | |
1250 | ||
1251 | static inline void timer_base_unlock_expiry(struct timer_base *base) | |
1252 | { | |
1253 | spin_unlock(&base->expiry_lock); | |
1254 | } | |
1255 | ||
1256 | /* | |
1257 | * The counterpart to del_timer_wait_running(). | |
1258 | * | |
1259 | * If there is a waiter for base->expiry_lock, then it was waiting for the | |
1260 | * timer callback to finish. Drop expiry_lock and reaquire it. That allows | |
1261 | * the waiter to acquire the lock and make progress. | |
1262 | */ | |
1263 | static void timer_sync_wait_running(struct timer_base *base) | |
1264 | { | |
1265 | if (atomic_read(&base->timer_waiters)) { | |
1266 | spin_unlock(&base->expiry_lock); | |
1267 | spin_lock(&base->expiry_lock); | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | /* | |
1272 | * This function is called on PREEMPT_RT kernels when the fast path | |
1273 | * deletion of a timer failed because the timer callback function was | |
1274 | * running. | |
1275 | * | |
1276 | * This prevents priority inversion, if the softirq thread on a remote CPU | |
1277 | * got preempted, and it prevents a life lock when the task which tries to | |
1278 | * delete a timer preempted the softirq thread running the timer callback | |
1279 | * function. | |
1280 | */ | |
1281 | static void del_timer_wait_running(struct timer_list *timer) | |
1282 | { | |
1283 | u32 tf; | |
1284 | ||
1285 | tf = READ_ONCE(timer->flags); | |
c725dafc | 1286 | if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) { |
030dcdd1 AMG |
1287 | struct timer_base *base = get_timer_base(tf); |
1288 | ||
1289 | /* | |
1290 | * Mark the base as contended and grab the expiry lock, | |
1291 | * which is held by the softirq across the timer | |
1292 | * callback. Drop the lock immediately so the softirq can | |
1293 | * expire the next timer. In theory the timer could already | |
1294 | * be running again, but that's more than unlikely and just | |
1295 | * causes another wait loop. | |
1296 | */ | |
1297 | atomic_inc(&base->timer_waiters); | |
1298 | spin_lock_bh(&base->expiry_lock); | |
1299 | atomic_dec(&base->timer_waiters); | |
1300 | spin_unlock_bh(&base->expiry_lock); | |
1301 | } | |
1302 | } | |
1303 | #else | |
1304 | static inline void timer_base_init_expiry_lock(struct timer_base *base) { } | |
1305 | static inline void timer_base_lock_expiry(struct timer_base *base) { } | |
1306 | static inline void timer_base_unlock_expiry(struct timer_base *base) { } | |
1307 | static inline void timer_sync_wait_running(struct timer_base *base) { } | |
1308 | static inline void del_timer_wait_running(struct timer_list *timer) { } | |
1309 | #endif | |
1310 | ||
1311 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) | |
2aae4a10 | 1312 | /** |
1da177e4 LT |
1313 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
1314 | * @timer: the timer to be deactivated | |
1315 | * | |
1316 | * This function only differs from del_timer() on SMP: besides deactivating | |
1317 | * the timer it also makes sure the handler has finished executing on other | |
1318 | * CPUs. | |
1319 | * | |
72fd4a35 | 1320 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 | 1321 | * otherwise this function is meaningless. It must not be called from |
c5f66e99 TH |
1322 | * interrupt contexts unless the timer is an irqsafe one. The caller must |
1323 | * not hold locks which would prevent completion of the timer's | |
1324 | * handler. The timer's handler must not call add_timer_on(). Upon exit the | |
1325 | * timer is not queued and the handler is not running on any CPU. | |
1da177e4 | 1326 | * |
c5f66e99 TH |
1327 | * Note: For !irqsafe timers, you must not hold locks that are held in |
1328 | * interrupt context while calling this function. Even if the lock has | |
bf9c96be | 1329 | * nothing to do with the timer in question. Here's why:: |
48228f7b SR |
1330 | * |
1331 | * CPU0 CPU1 | |
1332 | * ---- ---- | |
bf9c96be MCC |
1333 | * <SOFTIRQ> |
1334 | * call_timer_fn(); | |
1335 | * base->running_timer = mytimer; | |
1336 | * spin_lock_irq(somelock); | |
48228f7b SR |
1337 | * <IRQ> |
1338 | * spin_lock(somelock); | |
bf9c96be MCC |
1339 | * del_timer_sync(mytimer); |
1340 | * while (base->running_timer == mytimer); | |
48228f7b SR |
1341 | * |
1342 | * Now del_timer_sync() will never return and never release somelock. | |
1343 | * The interrupt on the other CPU is waiting to grab somelock but | |
1344 | * it has interrupted the softirq that CPU0 is waiting to finish. | |
1345 | * | |
1da177e4 | 1346 | * The function returns whether it has deactivated a pending timer or not. |
1da177e4 LT |
1347 | */ |
1348 | int del_timer_sync(struct timer_list *timer) | |
1349 | { | |
030dcdd1 AMG |
1350 | int ret; |
1351 | ||
6f2b9b9a | 1352 | #ifdef CONFIG_LOCKDEP |
f266a511 PZ |
1353 | unsigned long flags; |
1354 | ||
48228f7b SR |
1355 | /* |
1356 | * If lockdep gives a backtrace here, please reference | |
1357 | * the synchronization rules above. | |
1358 | */ | |
7ff20792 | 1359 | local_irq_save(flags); |
6f2b9b9a JB |
1360 | lock_map_acquire(&timer->lockdep_map); |
1361 | lock_map_release(&timer->lockdep_map); | |
7ff20792 | 1362 | local_irq_restore(flags); |
6f2b9b9a | 1363 | #endif |
466bd303 YZ |
1364 | /* |
1365 | * don't use it in hardirq context, because it | |
1366 | * could lead to deadlock. | |
1367 | */ | |
0eeda71b | 1368 | WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); |
030dcdd1 | 1369 | |
c725dafc SAS |
1370 | /* |
1371 | * Must be able to sleep on PREEMPT_RT because of the slowpath in | |
1372 | * del_timer_wait_running(). | |
1373 | */ | |
1374 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE)) | |
1375 | lockdep_assert_preemption_enabled(); | |
1376 | ||
030dcdd1 AMG |
1377 | do { |
1378 | ret = try_to_del_timer_sync(timer); | |
1379 | ||
1380 | if (unlikely(ret < 0)) { | |
1381 | del_timer_wait_running(timer); | |
1382 | cpu_relax(); | |
1383 | } | |
1384 | } while (ret < 0); | |
1385 | ||
1386 | return ret; | |
1da177e4 | 1387 | } |
55c888d6 | 1388 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
1389 | #endif |
1390 | ||
f28d3d53 AMG |
1391 | static void call_timer_fn(struct timer_list *timer, |
1392 | void (*fn)(struct timer_list *), | |
1393 | unsigned long baseclk) | |
576da126 | 1394 | { |
4a2b4b22 | 1395 | int count = preempt_count(); |
576da126 TG |
1396 | |
1397 | #ifdef CONFIG_LOCKDEP | |
1398 | /* | |
1399 | * It is permissible to free the timer from inside the | |
1400 | * function that is called from it, this we need to take into | |
1401 | * account for lockdep too. To avoid bogus "held lock freed" | |
1402 | * warnings as well as problems when looking into | |
1403 | * timer->lockdep_map, make a copy and use that here. | |
1404 | */ | |
4d82a1de PZ |
1405 | struct lockdep_map lockdep_map; |
1406 | ||
1407 | lockdep_copy_map(&lockdep_map, &timer->lockdep_map); | |
576da126 TG |
1408 | #endif |
1409 | /* | |
1410 | * Couple the lock chain with the lock chain at | |
1411 | * del_timer_sync() by acquiring the lock_map around the fn() | |
1412 | * call here and in del_timer_sync(). | |
1413 | */ | |
1414 | lock_map_acquire(&lockdep_map); | |
1415 | ||
f28d3d53 | 1416 | trace_timer_expire_entry(timer, baseclk); |
354b46b1 | 1417 | fn(timer); |
576da126 TG |
1418 | trace_timer_expire_exit(timer); |
1419 | ||
1420 | lock_map_release(&lockdep_map); | |
1421 | ||
4a2b4b22 | 1422 | if (count != preempt_count()) { |
d75f773c | 1423 | WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", |
4a2b4b22 | 1424 | fn, count, preempt_count()); |
802702e0 TG |
1425 | /* |
1426 | * Restore the preempt count. That gives us a decent | |
1427 | * chance to survive and extract information. If the | |
1428 | * callback kept a lock held, bad luck, but not worse | |
1429 | * than the BUG() we had. | |
1430 | */ | |
4a2b4b22 | 1431 | preempt_count_set(count); |
576da126 TG |
1432 | } |
1433 | } | |
1434 | ||
500462a9 | 1435 | static void expire_timers(struct timer_base *base, struct hlist_head *head) |
1da177e4 | 1436 | { |
f28d3d53 AMG |
1437 | /* |
1438 | * This value is required only for tracing. base->clk was | |
1439 | * incremented directly before expire_timers was called. But expiry | |
1440 | * is related to the old base->clk value. | |
1441 | */ | |
1442 | unsigned long baseclk = base->clk - 1; | |
1443 | ||
500462a9 TG |
1444 | while (!hlist_empty(head)) { |
1445 | struct timer_list *timer; | |
354b46b1 | 1446 | void (*fn)(struct timer_list *); |
1da177e4 | 1447 | |
500462a9 | 1448 | timer = hlist_entry(head->first, struct timer_list, entry); |
3bb475a3 | 1449 | |
500462a9 TG |
1450 | base->running_timer = timer; |
1451 | detach_timer(timer, true); | |
3bb475a3 | 1452 | |
500462a9 | 1453 | fn = timer->function; |
500462a9 TG |
1454 | |
1455 | if (timer->flags & TIMER_IRQSAFE) { | |
2287d866 | 1456 | raw_spin_unlock(&base->lock); |
f28d3d53 | 1457 | call_timer_fn(timer, fn, baseclk); |
030dcdd1 | 1458 | base->running_timer = NULL; |
2287d866 | 1459 | raw_spin_lock(&base->lock); |
500462a9 | 1460 | } else { |
2287d866 | 1461 | raw_spin_unlock_irq(&base->lock); |
f28d3d53 | 1462 | call_timer_fn(timer, fn, baseclk); |
030dcdd1 AMG |
1463 | base->running_timer = NULL; |
1464 | timer_sync_wait_running(base); | |
2287d866 | 1465 | raw_spin_lock_irq(&base->lock); |
3bb475a3 | 1466 | } |
500462a9 TG |
1467 | } |
1468 | } | |
3bb475a3 | 1469 | |
d4f7dae8 FW |
1470 | static int collect_expired_timers(struct timer_base *base, |
1471 | struct hlist_head *heads) | |
500462a9 | 1472 | { |
d4f7dae8 | 1473 | unsigned long clk = base->clk = base->next_expiry; |
500462a9 TG |
1474 | struct hlist_head *vec; |
1475 | int i, levels = 0; | |
1476 | unsigned int idx; | |
626ab0e6 | 1477 | |
500462a9 TG |
1478 | for (i = 0; i < LVL_DEPTH; i++) { |
1479 | idx = (clk & LVL_MASK) + i * LVL_SIZE; | |
1480 | ||
1481 | if (__test_and_clear_bit(idx, base->pending_map)) { | |
1482 | vec = base->vectors + idx; | |
1483 | hlist_move_list(vec, heads++); | |
1484 | levels++; | |
1da177e4 | 1485 | } |
500462a9 TG |
1486 | /* Is it time to look at the next level? */ |
1487 | if (clk & LVL_CLK_MASK) | |
1488 | break; | |
1489 | /* Shift clock for the next level granularity */ | |
1490 | clk >>= LVL_CLK_SHIFT; | |
1da177e4 | 1491 | } |
500462a9 | 1492 | return levels; |
1da177e4 LT |
1493 | } |
1494 | ||
1da177e4 | 1495 | /* |
23696838 AMG |
1496 | * Find the next pending bucket of a level. Search from level start (@offset) |
1497 | * + @clk upwards and if nothing there, search from start of the level | |
1498 | * (@offset) up to @offset + clk. | |
1da177e4 | 1499 | */ |
500462a9 TG |
1500 | static int next_pending_bucket(struct timer_base *base, unsigned offset, |
1501 | unsigned clk) | |
1502 | { | |
1503 | unsigned pos, start = offset + clk; | |
1504 | unsigned end = offset + LVL_SIZE; | |
1505 | ||
1506 | pos = find_next_bit(base->pending_map, end, start); | |
1507 | if (pos < end) | |
1508 | return pos - start; | |
1509 | ||
1510 | pos = find_next_bit(base->pending_map, start, offset); | |
1511 | return pos < start ? pos + LVL_SIZE - start : -1; | |
1512 | } | |
1513 | ||
1514 | /* | |
23696838 AMG |
1515 | * Search the first expiring timer in the various clock levels. Caller must |
1516 | * hold base->lock. | |
1da177e4 | 1517 | */ |
494af3ed | 1518 | static unsigned long __next_timer_interrupt(struct timer_base *base) |
1da177e4 | 1519 | { |
500462a9 TG |
1520 | unsigned long clk, next, adj; |
1521 | unsigned lvl, offset = 0; | |
1522 | ||
500462a9 TG |
1523 | next = base->clk + NEXT_TIMER_MAX_DELTA; |
1524 | clk = base->clk; | |
1525 | for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { | |
1526 | int pos = next_pending_bucket(base, offset, clk & LVL_MASK); | |
001ec1b3 | 1527 | unsigned long lvl_clk = clk & LVL_CLK_MASK; |
500462a9 TG |
1528 | |
1529 | if (pos >= 0) { | |
1530 | unsigned long tmp = clk + (unsigned long) pos; | |
1531 | ||
1532 | tmp <<= LVL_SHIFT(lvl); | |
1533 | if (time_before(tmp, next)) | |
1534 | next = tmp; | |
001ec1b3 FW |
1535 | |
1536 | /* | |
1537 | * If the next expiration happens before we reach | |
1538 | * the next level, no need to check further. | |
1539 | */ | |
1540 | if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) | |
1541 | break; | |
1da177e4 | 1542 | } |
500462a9 TG |
1543 | /* |
1544 | * Clock for the next level. If the current level clock lower | |
1545 | * bits are zero, we look at the next level as is. If not we | |
1546 | * need to advance it by one because that's going to be the | |
1547 | * next expiring bucket in that level. base->clk is the next | |
1548 | * expiring jiffie. So in case of: | |
1549 | * | |
1550 | * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 | |
1551 | * 0 0 0 0 0 0 | |
1552 | * | |
1553 | * we have to look at all levels @index 0. With | |
1554 | * | |
1555 | * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 | |
1556 | * 0 0 0 0 0 2 | |
1557 | * | |
1558 | * LVL0 has the next expiring bucket @index 2. The upper | |
1559 | * levels have the next expiring bucket @index 1. | |
1560 | * | |
1561 | * In case that the propagation wraps the next level the same | |
1562 | * rules apply: | |
1563 | * | |
1564 | * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 | |
1565 | * 0 0 0 0 F 2 | |
1566 | * | |
1567 | * So after looking at LVL0 we get: | |
1568 | * | |
1569 | * LVL5 LVL4 LVL3 LVL2 LVL1 | |
1570 | * 0 0 0 1 0 | |
1571 | * | |
1572 | * So no propagation from LVL1 to LVL2 because that happened | |
1573 | * with the add already, but then we need to propagate further | |
1574 | * from LVL2 to LVL3. | |
1575 | * | |
1576 | * So the simple check whether the lower bits of the current | |
1577 | * level are 0 or not is sufficient for all cases. | |
1578 | */ | |
001ec1b3 | 1579 | adj = lvl_clk ? 1 : 0; |
500462a9 TG |
1580 | clk >>= LVL_CLK_SHIFT; |
1581 | clk += adj; | |
1da177e4 | 1582 | } |
31cd0e11 FW |
1583 | |
1584 | base->next_expiry_recalc = false; | |
1585 | ||
500462a9 | 1586 | return next; |
1cfd6849 | 1587 | } |
69239749 | 1588 | |
dc2a0f1f | 1589 | #ifdef CONFIG_NO_HZ_COMMON |
1cfd6849 TG |
1590 | /* |
1591 | * Check, if the next hrtimer event is before the next timer wheel | |
1592 | * event: | |
1593 | */ | |
c1ad348b | 1594 | static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) |
1cfd6849 | 1595 | { |
c1ad348b | 1596 | u64 nextevt = hrtimer_get_next_event(); |
0662b713 | 1597 | |
9501b6cf | 1598 | /* |
c1ad348b TG |
1599 | * If high resolution timers are enabled |
1600 | * hrtimer_get_next_event() returns KTIME_MAX. | |
9501b6cf | 1601 | */ |
c1ad348b TG |
1602 | if (expires <= nextevt) |
1603 | return expires; | |
eaad084b TG |
1604 | |
1605 | /* | |
c1ad348b TG |
1606 | * If the next timer is already expired, return the tick base |
1607 | * time so the tick is fired immediately. | |
eaad084b | 1608 | */ |
c1ad348b TG |
1609 | if (nextevt <= basem) |
1610 | return basem; | |
eaad084b | 1611 | |
9501b6cf | 1612 | /* |
c1ad348b TG |
1613 | * Round up to the next jiffie. High resolution timers are |
1614 | * off, so the hrtimers are expired in the tick and we need to | |
1615 | * make sure that this tick really expires the timer to avoid | |
1616 | * a ping pong of the nohz stop code. | |
1617 | * | |
1618 | * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 | |
9501b6cf | 1619 | */ |
c1ad348b | 1620 | return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; |
1da177e4 | 1621 | } |
1cfd6849 TG |
1622 | |
1623 | /** | |
c1ad348b TG |
1624 | * get_next_timer_interrupt - return the time (clock mono) of the next timer |
1625 | * @basej: base time jiffies | |
1626 | * @basem: base time clock monotonic | |
1627 | * | |
1628 | * Returns the tick aligned clock monotonic time of the next pending | |
1629 | * timer or KTIME_MAX if no timer is pending. | |
1cfd6849 | 1630 | */ |
c1ad348b | 1631 | u64 get_next_timer_interrupt(unsigned long basej, u64 basem) |
1cfd6849 | 1632 | { |
500462a9 | 1633 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); |
c1ad348b TG |
1634 | u64 expires = KTIME_MAX; |
1635 | unsigned long nextevt; | |
46c8f0b0 | 1636 | bool is_max_delta; |
1cfd6849 | 1637 | |
dbd87b5a HC |
1638 | /* |
1639 | * Pretend that there is no timer pending if the cpu is offline. | |
1640 | * Possible pending timers will be migrated later to an active cpu. | |
1641 | */ | |
1642 | if (cpu_is_offline(smp_processor_id())) | |
e40468a5 TG |
1643 | return expires; |
1644 | ||
2287d866 | 1645 | raw_spin_lock(&base->lock); |
31cd0e11 FW |
1646 | if (base->next_expiry_recalc) |
1647 | base->next_expiry = __next_timer_interrupt(base); | |
1648 | nextevt = base->next_expiry; | |
46c8f0b0 | 1649 | is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); |
31cd0e11 | 1650 | |
a683f390 | 1651 | /* |
041ad7bc TG |
1652 | * We have a fresh next event. Check whether we can forward the |
1653 | * base. We can only do that when @basej is past base->clk | |
1654 | * otherwise we might rewind base->clk. | |
a683f390 | 1655 | */ |
041ad7bc TG |
1656 | if (time_after(basej, base->clk)) { |
1657 | if (time_after(nextevt, basej)) | |
1658 | base->clk = basej; | |
1659 | else if (time_after(nextevt, base->clk)) | |
1660 | base->clk = nextevt; | |
1661 | } | |
23696838 | 1662 | |
a683f390 | 1663 | if (time_before_eq(nextevt, basej)) { |
500462a9 | 1664 | expires = basem; |
a683f390 TG |
1665 | base->is_idle = false; |
1666 | } else { | |
46c8f0b0 | 1667 | if (!is_max_delta) |
34f41c03 | 1668 | expires = basem + (u64)(nextevt - basej) * TICK_NSEC; |
a683f390 | 1669 | /* |
2fe59f50 NP |
1670 | * If we expect to sleep more than a tick, mark the base idle. |
1671 | * Also the tick is stopped so any added timer must forward | |
1672 | * the base clk itself to keep granularity small. This idle | |
1673 | * logic is only maintained for the BASE_STD base, deferrable | |
1674 | * timers may still see large granularity skew (by design). | |
a683f390 | 1675 | */ |
1f8a4212 | 1676 | if ((expires - basem) > TICK_NSEC) |
a683f390 | 1677 | base->is_idle = true; |
e40468a5 | 1678 | } |
2287d866 | 1679 | raw_spin_unlock(&base->lock); |
1cfd6849 | 1680 | |
c1ad348b | 1681 | return cmp_next_hrtimer_event(basem, expires); |
1cfd6849 | 1682 | } |
23696838 | 1683 | |
a683f390 TG |
1684 | /** |
1685 | * timer_clear_idle - Clear the idle state of the timer base | |
1686 | * | |
1687 | * Called with interrupts disabled | |
1688 | */ | |
1689 | void timer_clear_idle(void) | |
1690 | { | |
1691 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); | |
1692 | ||
1693 | /* | |
1694 | * We do this unlocked. The worst outcome is a remote enqueue sending | |
1695 | * a pointless IPI, but taking the lock would just make the window for | |
1696 | * sending the IPI a few instructions smaller for the cost of taking | |
1697 | * the lock in the exit from idle path. | |
1698 | */ | |
1699 | base->is_idle = false; | |
1700 | } | |
1da177e4 LT |
1701 | #endif |
1702 | ||
73420fea AMG |
1703 | /** |
1704 | * __run_timers - run all expired timers (if any) on this CPU. | |
1705 | * @base: the timer vector to be processed. | |
1706 | */ | |
1707 | static inline void __run_timers(struct timer_base *base) | |
1708 | { | |
1709 | struct hlist_head heads[LVL_DEPTH]; | |
1710 | int levels; | |
1711 | ||
d4f7dae8 | 1712 | if (time_before(jiffies, base->next_expiry)) |
73420fea AMG |
1713 | return; |
1714 | ||
030dcdd1 | 1715 | timer_base_lock_expiry(base); |
2287d866 | 1716 | raw_spin_lock_irq(&base->lock); |
73420fea | 1717 | |
d4f7dae8 FW |
1718 | while (time_after_eq(jiffies, base->clk) && |
1719 | time_after_eq(jiffies, base->next_expiry)) { | |
73420fea | 1720 | levels = collect_expired_timers(base, heads); |
31cd0e11 FW |
1721 | /* |
1722 | * The only possible reason for not finding any expired | |
1723 | * timer at this clk is that all matching timers have been | |
1724 | * dequeued. | |
1725 | */ | |
1726 | WARN_ON_ONCE(!levels && !base->next_expiry_recalc); | |
73420fea | 1727 | base->clk++; |
dc2a0f1f | 1728 | base->next_expiry = __next_timer_interrupt(base); |
73420fea AMG |
1729 | |
1730 | while (levels--) | |
1731 | expire_timers(base, heads + levels); | |
1732 | } | |
2287d866 | 1733 | raw_spin_unlock_irq(&base->lock); |
030dcdd1 | 1734 | timer_base_unlock_expiry(base); |
73420fea AMG |
1735 | } |
1736 | ||
1da177e4 LT |
1737 | /* |
1738 | * This function runs timers and the timer-tq in bottom half context. | |
1739 | */ | |
0766f788 | 1740 | static __latent_entropy void run_timer_softirq(struct softirq_action *h) |
1da177e4 | 1741 | { |
500462a9 | 1742 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); |
1da177e4 | 1743 | |
500462a9 | 1744 | __run_timers(base); |
ced6d5c1 | 1745 | if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) |
500462a9 | 1746 | __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); |
1da177e4 LT |
1747 | } |
1748 | ||
1749 | /* | |
1750 | * Called by the local, per-CPU timer interrupt on SMP. | |
1751 | */ | |
cc947f2b | 1752 | static void run_local_timers(void) |
1da177e4 | 1753 | { |
4e85876a TG |
1754 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); |
1755 | ||
d3d74453 | 1756 | hrtimer_run_queues(); |
4e85876a | 1757 | /* Raise the softirq only if required. */ |
d4f7dae8 | 1758 | if (time_before(jiffies, base->next_expiry)) { |
ed4bbf79 | 1759 | if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) |
4e85876a TG |
1760 | return; |
1761 | /* CPU is awake, so check the deferrable base. */ | |
1762 | base++; | |
d4f7dae8 | 1763 | if (time_before(jiffies, base->next_expiry)) |
4e85876a TG |
1764 | return; |
1765 | } | |
1da177e4 LT |
1766 | raise_softirq(TIMER_SOFTIRQ); |
1767 | } | |
1768 | ||
cc947f2b TG |
1769 | /* |
1770 | * Called from the timer interrupt handler to charge one tick to the current | |
1771 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
1772 | */ | |
1773 | void update_process_times(int user_tick) | |
1774 | { | |
1775 | struct task_struct *p = current; | |
1776 | ||
1777 | PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0); | |
1778 | ||
1779 | /* Note: this timer irq context must be accounted for as well. */ | |
1780 | account_process_tick(p, user_tick); | |
1781 | run_local_timers(); | |
1782 | rcu_sched_clock_irq(user_tick); | |
1783 | #ifdef CONFIG_IRQ_WORK | |
1784 | if (in_irq()) | |
1785 | irq_work_tick(); | |
1786 | #endif | |
1787 | scheduler_tick(); | |
1788 | if (IS_ENABLED(CONFIG_POSIX_TIMERS)) | |
1789 | run_posix_cpu_timers(); | |
1790 | } | |
1791 | ||
58e1177b KC |
1792 | /* |
1793 | * Since schedule_timeout()'s timer is defined on the stack, it must store | |
1794 | * the target task on the stack as well. | |
1795 | */ | |
1796 | struct process_timer { | |
1797 | struct timer_list timer; | |
1798 | struct task_struct *task; | |
1799 | }; | |
1800 | ||
1801 | static void process_timeout(struct timer_list *t) | |
1da177e4 | 1802 | { |
58e1177b KC |
1803 | struct process_timer *timeout = from_timer(timeout, t, timer); |
1804 | ||
1805 | wake_up_process(timeout->task); | |
1da177e4 LT |
1806 | } |
1807 | ||
1808 | /** | |
1809 | * schedule_timeout - sleep until timeout | |
1810 | * @timeout: timeout value in jiffies | |
1811 | * | |
6e317c32 AP |
1812 | * Make the current task sleep until @timeout jiffies have elapsed. |
1813 | * The function behavior depends on the current task state | |
1814 | * (see also set_current_state() description): | |
1da177e4 | 1815 | * |
6e317c32 AP |
1816 | * %TASK_RUNNING - the scheduler is called, but the task does not sleep |
1817 | * at all. That happens because sched_submit_work() does nothing for | |
1818 | * tasks in %TASK_RUNNING state. | |
1da177e4 LT |
1819 | * |
1820 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
4b7e9cf9 | 1821 | * pass before the routine returns unless the current task is explicitly |
6e317c32 | 1822 | * woken up, (e.g. by wake_up_process()). |
1da177e4 LT |
1823 | * |
1824 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
4b7e9cf9 DA |
1825 | * delivered to the current task or the current task is explicitly woken |
1826 | * up. | |
1da177e4 | 1827 | * |
6e317c32 | 1828 | * The current task state is guaranteed to be %TASK_RUNNING when this |
1da177e4 LT |
1829 | * routine returns. |
1830 | * | |
1831 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1832 | * the CPU away without a bound on the timeout. In this case the return | |
1833 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1834 | * | |
4b7e9cf9 | 1835 | * Returns 0 when the timer has expired otherwise the remaining time in |
6e317c32 | 1836 | * jiffies will be returned. In all cases the return value is guaranteed |
4b7e9cf9 | 1837 | * to be non-negative. |
1da177e4 | 1838 | */ |
7ad5b3a5 | 1839 | signed long __sched schedule_timeout(signed long timeout) |
1da177e4 | 1840 | { |
58e1177b | 1841 | struct process_timer timer; |
1da177e4 LT |
1842 | unsigned long expire; |
1843 | ||
1844 | switch (timeout) | |
1845 | { | |
1846 | case MAX_SCHEDULE_TIMEOUT: | |
1847 | /* | |
1848 | * These two special cases are useful to be comfortable | |
1849 | * in the caller. Nothing more. We could take | |
1850 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1851 | * but I' d like to return a valid offset (>=0) to allow | |
1852 | * the caller to do everything it want with the retval. | |
1853 | */ | |
1854 | schedule(); | |
1855 | goto out; | |
1856 | default: | |
1857 | /* | |
1858 | * Another bit of PARANOID. Note that the retval will be | |
1859 | * 0 since no piece of kernel is supposed to do a check | |
1860 | * for a negative retval of schedule_timeout() (since it | |
1861 | * should never happens anyway). You just have the printk() | |
1862 | * that will tell you if something is gone wrong and where. | |
1863 | */ | |
5b149bcc | 1864 | if (timeout < 0) { |
1da177e4 | 1865 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1866 | "value %lx\n", timeout); |
1867 | dump_stack(); | |
1da177e4 LT |
1868 | current->state = TASK_RUNNING; |
1869 | goto out; | |
1870 | } | |
1871 | } | |
1872 | ||
1873 | expire = timeout + jiffies; | |
1874 | ||
58e1177b KC |
1875 | timer.task = current; |
1876 | timer_setup_on_stack(&timer.timer, process_timeout, 0); | |
90c01894 | 1877 | __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); |
1da177e4 | 1878 | schedule(); |
58e1177b | 1879 | del_singleshot_timer_sync(&timer.timer); |
1da177e4 | 1880 | |
c6f3a97f | 1881 | /* Remove the timer from the object tracker */ |
58e1177b | 1882 | destroy_timer_on_stack(&timer.timer); |
c6f3a97f | 1883 | |
1da177e4 LT |
1884 | timeout = expire - jiffies; |
1885 | ||
1886 | out: | |
1887 | return timeout < 0 ? 0 : timeout; | |
1888 | } | |
1da177e4 LT |
1889 | EXPORT_SYMBOL(schedule_timeout); |
1890 | ||
8a1c1757 AM |
1891 | /* |
1892 | * We can use __set_current_state() here because schedule_timeout() calls | |
1893 | * schedule() unconditionally. | |
1894 | */ | |
64ed93a2 NA |
1895 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1896 | { | |
a5a0d52c AM |
1897 | __set_current_state(TASK_INTERRUPTIBLE); |
1898 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1899 | } |
1900 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1901 | ||
294d5cc2 MW |
1902 | signed long __sched schedule_timeout_killable(signed long timeout) |
1903 | { | |
1904 | __set_current_state(TASK_KILLABLE); | |
1905 | return schedule_timeout(timeout); | |
1906 | } | |
1907 | EXPORT_SYMBOL(schedule_timeout_killable); | |
1908 | ||
64ed93a2 NA |
1909 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1910 | { | |
a5a0d52c AM |
1911 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1912 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1913 | } |
1914 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1915 | ||
69b27baf AM |
1916 | /* |
1917 | * Like schedule_timeout_uninterruptible(), except this task will not contribute | |
1918 | * to load average. | |
1919 | */ | |
1920 | signed long __sched schedule_timeout_idle(signed long timeout) | |
1921 | { | |
1922 | __set_current_state(TASK_IDLE); | |
1923 | return schedule_timeout(timeout); | |
1924 | } | |
1925 | EXPORT_SYMBOL(schedule_timeout_idle); | |
1926 | ||
1da177e4 | 1927 | #ifdef CONFIG_HOTPLUG_CPU |
494af3ed | 1928 | static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) |
1da177e4 LT |
1929 | { |
1930 | struct timer_list *timer; | |
0eeda71b | 1931 | int cpu = new_base->cpu; |
1da177e4 | 1932 | |
1dabbcec TG |
1933 | while (!hlist_empty(head)) { |
1934 | timer = hlist_entry(head->first, struct timer_list, entry); | |
ec44bc7a | 1935 | detach_timer(timer, false); |
0eeda71b | 1936 | timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; |
1da177e4 | 1937 | internal_add_timer(new_base, timer); |
1da177e4 | 1938 | } |
1da177e4 LT |
1939 | } |
1940 | ||
26456f87 TG |
1941 | int timers_prepare_cpu(unsigned int cpu) |
1942 | { | |
1943 | struct timer_base *base; | |
1944 | int b; | |
1945 | ||
1946 | for (b = 0; b < NR_BASES; b++) { | |
1947 | base = per_cpu_ptr(&timer_bases[b], cpu); | |
1948 | base->clk = jiffies; | |
1949 | base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; | |
1950 | base->is_idle = false; | |
26456f87 TG |
1951 | } |
1952 | return 0; | |
1953 | } | |
1954 | ||
24f73b99 | 1955 | int timers_dead_cpu(unsigned int cpu) |
1da177e4 | 1956 | { |
494af3ed TG |
1957 | struct timer_base *old_base; |
1958 | struct timer_base *new_base; | |
500462a9 | 1959 | int b, i; |
1da177e4 LT |
1960 | |
1961 | BUG_ON(cpu_online(cpu)); | |
55c888d6 | 1962 | |
500462a9 TG |
1963 | for (b = 0; b < NR_BASES; b++) { |
1964 | old_base = per_cpu_ptr(&timer_bases[b], cpu); | |
1965 | new_base = get_cpu_ptr(&timer_bases[b]); | |
1966 | /* | |
1967 | * The caller is globally serialized and nobody else | |
1968 | * takes two locks at once, deadlock is not possible. | |
1969 | */ | |
2287d866 SAS |
1970 | raw_spin_lock_irq(&new_base->lock); |
1971 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | |
500462a9 | 1972 | |
c52232a4 LC |
1973 | /* |
1974 | * The current CPUs base clock might be stale. Update it | |
1975 | * before moving the timers over. | |
1976 | */ | |
1977 | forward_timer_base(new_base); | |
1978 | ||
500462a9 TG |
1979 | BUG_ON(old_base->running_timer); |
1980 | ||
1981 | for (i = 0; i < WHEEL_SIZE; i++) | |
1982 | migrate_timer_list(new_base, old_base->vectors + i); | |
8def9060 | 1983 | |
2287d866 SAS |
1984 | raw_spin_unlock(&old_base->lock); |
1985 | raw_spin_unlock_irq(&new_base->lock); | |
500462a9 TG |
1986 | put_cpu_ptr(&timer_bases); |
1987 | } | |
24f73b99 | 1988 | return 0; |
1da177e4 | 1989 | } |
1da177e4 | 1990 | |
3650b57f | 1991 | #endif /* CONFIG_HOTPLUG_CPU */ |
1da177e4 | 1992 | |
0eeda71b | 1993 | static void __init init_timer_cpu(int cpu) |
8def9060 | 1994 | { |
500462a9 TG |
1995 | struct timer_base *base; |
1996 | int i; | |
8def9060 | 1997 | |
500462a9 TG |
1998 | for (i = 0; i < NR_BASES; i++) { |
1999 | base = per_cpu_ptr(&timer_bases[i], cpu); | |
2000 | base->cpu = cpu; | |
2287d866 | 2001 | raw_spin_lock_init(&base->lock); |
500462a9 | 2002 | base->clk = jiffies; |
dc2a0f1f | 2003 | base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; |
030dcdd1 | 2004 | timer_base_init_expiry_lock(base); |
500462a9 | 2005 | } |
8def9060 VK |
2006 | } |
2007 | ||
2008 | static void __init init_timer_cpus(void) | |
1da177e4 | 2009 | { |
8def9060 VK |
2010 | int cpu; |
2011 | ||
0eeda71b TG |
2012 | for_each_possible_cpu(cpu) |
2013 | init_timer_cpu(cpu); | |
8def9060 | 2014 | } |
e52b1db3 | 2015 | |
8def9060 VK |
2016 | void __init init_timers(void) |
2017 | { | |
8def9060 | 2018 | init_timer_cpus(); |
1fb497dd | 2019 | posix_cputimers_init_work(); |
962cf36c | 2020 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1da177e4 LT |
2021 | } |
2022 | ||
1da177e4 LT |
2023 | /** |
2024 | * msleep - sleep safely even with waitqueue interruptions | |
2025 | * @msecs: Time in milliseconds to sleep for | |
2026 | */ | |
2027 | void msleep(unsigned int msecs) | |
2028 | { | |
2029 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
2030 | ||
75bcc8c5 NA |
2031 | while (timeout) |
2032 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
2033 | } |
2034 | ||
2035 | EXPORT_SYMBOL(msleep); | |
2036 | ||
2037 | /** | |
96ec3efd | 2038 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
2039 | * @msecs: Time in milliseconds to sleep for |
2040 | */ | |
2041 | unsigned long msleep_interruptible(unsigned int msecs) | |
2042 | { | |
2043 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
2044 | ||
75bcc8c5 NA |
2045 | while (timeout && !signal_pending(current)) |
2046 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
2047 | return jiffies_to_msecs(timeout); |
2048 | } | |
2049 | ||
2050 | EXPORT_SYMBOL(msleep_interruptible); | |
5e7f5a17 | 2051 | |
5e7f5a17 | 2052 | /** |
b5227d03 | 2053 | * usleep_range - Sleep for an approximate time |
5e7f5a17 PP |
2054 | * @min: Minimum time in usecs to sleep |
2055 | * @max: Maximum time in usecs to sleep | |
b5227d03 BH |
2056 | * |
2057 | * In non-atomic context where the exact wakeup time is flexible, use | |
2058 | * usleep_range() instead of udelay(). The sleep improves responsiveness | |
2059 | * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces | |
2060 | * power usage by allowing hrtimers to take advantage of an already- | |
2061 | * scheduled interrupt instead of scheduling a new one just for this sleep. | |
5e7f5a17 | 2062 | */ |
2ad5d327 | 2063 | void __sched usleep_range(unsigned long min, unsigned long max) |
5e7f5a17 | 2064 | { |
6c5e9059 DA |
2065 | ktime_t exp = ktime_add_us(ktime_get(), min); |
2066 | u64 delta = (u64)(max - min) * NSEC_PER_USEC; | |
2067 | ||
2068 | for (;;) { | |
2069 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
2070 | /* Do not return before the requested sleep time has elapsed */ | |
2071 | if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) | |
2072 | break; | |
2073 | } | |
5e7f5a17 PP |
2074 | } |
2075 | EXPORT_SYMBOL(usleep_range); |