]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
8524070b | 4 | * Kernel internal timers, basic process system calls |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
9984de1a | 23 | #include <linux/export.h> |
1da177e4 LT |
24 | #include <linux/interrupt.h> |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
b488893a | 29 | #include <linux/pid_namespace.h> |
1da177e4 LT |
30 | #include <linux/notifier.h> |
31 | #include <linux/thread_info.h> | |
32 | #include <linux/time.h> | |
33 | #include <linux/jiffies.h> | |
34 | #include <linux/posix-timers.h> | |
35 | #include <linux/cpu.h> | |
36 | #include <linux/syscalls.h> | |
97a41e26 | 37 | #include <linux/delay.h> |
79bf2bb3 | 38 | #include <linux/tick.h> |
82f67cd9 | 39 | #include <linux/kallsyms.h> |
e360adbe | 40 | #include <linux/irq_work.h> |
eea08f32 | 41 | #include <linux/sched.h> |
5a0e3ad6 | 42 | #include <linux/slab.h> |
1da177e4 LT |
43 | |
44 | #include <asm/uaccess.h> | |
45 | #include <asm/unistd.h> | |
46 | #include <asm/div64.h> | |
47 | #include <asm/timex.h> | |
48 | #include <asm/io.h> | |
49 | ||
2b022e3d XG |
50 | #define CREATE_TRACE_POINTS |
51 | #include <trace/events/timer.h> | |
52 | ||
ecea8d19 TG |
53 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
54 | ||
55 | EXPORT_SYMBOL(jiffies_64); | |
56 | ||
1da177e4 LT |
57 | /* |
58 | * per-CPU timer vector definitions: | |
59 | */ | |
1da177e4 LT |
60 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
61 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
62 | #define TVN_SIZE (1 << TVN_BITS) | |
63 | #define TVR_SIZE (1 << TVR_BITS) | |
64 | #define TVN_MASK (TVN_SIZE - 1) | |
65 | #define TVR_MASK (TVR_SIZE - 1) | |
66 | ||
a6fa8e5a | 67 | struct tvec { |
1da177e4 | 68 | struct list_head vec[TVN_SIZE]; |
a6fa8e5a | 69 | }; |
1da177e4 | 70 | |
a6fa8e5a | 71 | struct tvec_root { |
1da177e4 | 72 | struct list_head vec[TVR_SIZE]; |
a6fa8e5a | 73 | }; |
1da177e4 | 74 | |
a6fa8e5a | 75 | struct tvec_base { |
3691c519 ON |
76 | spinlock_t lock; |
77 | struct timer_list *running_timer; | |
1da177e4 | 78 | unsigned long timer_jiffies; |
97fd9ed4 | 79 | unsigned long next_timer; |
99d5f3aa | 80 | unsigned long active_timers; |
a6fa8e5a PM |
81 | struct tvec_root tv1; |
82 | struct tvec tv2; | |
83 | struct tvec tv3; | |
84 | struct tvec tv4; | |
85 | struct tvec tv5; | |
6e453a67 | 86 | } ____cacheline_aligned; |
1da177e4 | 87 | |
a6fa8e5a | 88 | struct tvec_base boot_tvec_bases; |
3691c519 | 89 | EXPORT_SYMBOL(boot_tvec_bases); |
a6fa8e5a | 90 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 91 | |
6e453a67 | 92 | /* Functions below help us manage 'deferrable' flag */ |
a6fa8e5a | 93 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) |
6e453a67 | 94 | { |
e52b1db3 | 95 | return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE); |
6e453a67 VP |
96 | } |
97 | ||
a6fa8e5a | 98 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) |
6e453a67 | 99 | { |
e52b1db3 | 100 | return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK)); |
6e453a67 VP |
101 | } |
102 | ||
6e453a67 | 103 | static inline void |
a6fa8e5a | 104 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) |
6e453a67 | 105 | { |
e52b1db3 TH |
106 | unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK; |
107 | ||
108 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags); | |
6e453a67 VP |
109 | } |
110 | ||
9c133c46 AS |
111 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
112 | bool force_up) | |
4c36a5de AV |
113 | { |
114 | int rem; | |
115 | unsigned long original = j; | |
116 | ||
117 | /* | |
118 | * We don't want all cpus firing their timers at once hitting the | |
119 | * same lock or cachelines, so we skew each extra cpu with an extra | |
120 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
121 | * already did this. | |
122 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
123 | * extra offset again. | |
124 | */ | |
125 | j += cpu * 3; | |
126 | ||
127 | rem = j % HZ; | |
128 | ||
129 | /* | |
130 | * If the target jiffie is just after a whole second (which can happen | |
131 | * due to delays of the timer irq, long irq off times etc etc) then | |
132 | * we should round down to the whole second, not up. Use 1/4th second | |
133 | * as cutoff for this rounding as an extreme upper bound for this. | |
9c133c46 | 134 | * But never round down if @force_up is set. |
4c36a5de | 135 | */ |
9c133c46 | 136 | if (rem < HZ/4 && !force_up) /* round down */ |
4c36a5de AV |
137 | j = j - rem; |
138 | else /* round up */ | |
139 | j = j - rem + HZ; | |
140 | ||
141 | /* now that we have rounded, subtract the extra skew again */ | |
142 | j -= cpu * 3; | |
143 | ||
144 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | |
145 | return original; | |
146 | return j; | |
147 | } | |
9c133c46 AS |
148 | |
149 | /** | |
150 | * __round_jiffies - function to round jiffies to a full second | |
151 | * @j: the time in (absolute) jiffies that should be rounded | |
152 | * @cpu: the processor number on which the timeout will happen | |
153 | * | |
154 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | |
155 | * up or down to (approximately) full seconds. This is useful for timers | |
156 | * for which the exact time they fire does not matter too much, as long as | |
157 | * they fire approximately every X seconds. | |
158 | * | |
159 | * By rounding these timers to whole seconds, all such timers will fire | |
160 | * at the same time, rather than at various times spread out. The goal | |
161 | * of this is to have the CPU wake up less, which saves power. | |
162 | * | |
163 | * The exact rounding is skewed for each processor to avoid all | |
164 | * processors firing at the exact same time, which could lead | |
165 | * to lock contention or spurious cache line bouncing. | |
166 | * | |
167 | * The return value is the rounded version of the @j parameter. | |
168 | */ | |
169 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
170 | { | |
171 | return round_jiffies_common(j, cpu, false); | |
172 | } | |
4c36a5de AV |
173 | EXPORT_SYMBOL_GPL(__round_jiffies); |
174 | ||
175 | /** | |
176 | * __round_jiffies_relative - function to round jiffies to a full second | |
177 | * @j: the time in (relative) jiffies that should be rounded | |
178 | * @cpu: the processor number on which the timeout will happen | |
179 | * | |
72fd4a35 | 180 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
181 | * up or down to (approximately) full seconds. This is useful for timers |
182 | * for which the exact time they fire does not matter too much, as long as | |
183 | * they fire approximately every X seconds. | |
184 | * | |
185 | * By rounding these timers to whole seconds, all such timers will fire | |
186 | * at the same time, rather than at various times spread out. The goal | |
187 | * of this is to have the CPU wake up less, which saves power. | |
188 | * | |
189 | * The exact rounding is skewed for each processor to avoid all | |
190 | * processors firing at the exact same time, which could lead | |
191 | * to lock contention or spurious cache line bouncing. | |
192 | * | |
72fd4a35 | 193 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
194 | */ |
195 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
196 | { | |
9c133c46 AS |
197 | unsigned long j0 = jiffies; |
198 | ||
199 | /* Use j0 because jiffies might change while we run */ | |
200 | return round_jiffies_common(j + j0, cpu, false) - j0; | |
4c36a5de AV |
201 | } |
202 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
203 | ||
204 | /** | |
205 | * round_jiffies - function to round jiffies to a full second | |
206 | * @j: the time in (absolute) jiffies that should be rounded | |
207 | * | |
72fd4a35 | 208 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
209 | * up or down to (approximately) full seconds. This is useful for timers |
210 | * for which the exact time they fire does not matter too much, as long as | |
211 | * they fire approximately every X seconds. | |
212 | * | |
213 | * By rounding these timers to whole seconds, all such timers will fire | |
214 | * at the same time, rather than at various times spread out. The goal | |
215 | * of this is to have the CPU wake up less, which saves power. | |
216 | * | |
72fd4a35 | 217 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
218 | */ |
219 | unsigned long round_jiffies(unsigned long j) | |
220 | { | |
9c133c46 | 221 | return round_jiffies_common(j, raw_smp_processor_id(), false); |
4c36a5de AV |
222 | } |
223 | EXPORT_SYMBOL_GPL(round_jiffies); | |
224 | ||
225 | /** | |
226 | * round_jiffies_relative - function to round jiffies to a full second | |
227 | * @j: the time in (relative) jiffies that should be rounded | |
228 | * | |
72fd4a35 | 229 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
230 | * up or down to (approximately) full seconds. This is useful for timers |
231 | * for which the exact time they fire does not matter too much, as long as | |
232 | * they fire approximately every X seconds. | |
233 | * | |
234 | * By rounding these timers to whole seconds, all such timers will fire | |
235 | * at the same time, rather than at various times spread out. The goal | |
236 | * of this is to have the CPU wake up less, which saves power. | |
237 | * | |
72fd4a35 | 238 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
239 | */ |
240 | unsigned long round_jiffies_relative(unsigned long j) | |
241 | { | |
242 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
243 | } | |
244 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
245 | ||
9c133c46 AS |
246 | /** |
247 | * __round_jiffies_up - function to round jiffies up to a full second | |
248 | * @j: the time in (absolute) jiffies that should be rounded | |
249 | * @cpu: the processor number on which the timeout will happen | |
250 | * | |
251 | * This is the same as __round_jiffies() except that it will never | |
252 | * round down. This is useful for timeouts for which the exact time | |
253 | * of firing does not matter too much, as long as they don't fire too | |
254 | * early. | |
255 | */ | |
256 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | |
257 | { | |
258 | return round_jiffies_common(j, cpu, true); | |
259 | } | |
260 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | |
261 | ||
262 | /** | |
263 | * __round_jiffies_up_relative - function to round jiffies up to a full second | |
264 | * @j: the time in (relative) jiffies that should be rounded | |
265 | * @cpu: the processor number on which the timeout will happen | |
266 | * | |
267 | * This is the same as __round_jiffies_relative() except that it will never | |
268 | * round down. This is useful for timeouts for which the exact time | |
269 | * of firing does not matter too much, as long as they don't fire too | |
270 | * early. | |
271 | */ | |
272 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | |
273 | { | |
274 | unsigned long j0 = jiffies; | |
275 | ||
276 | /* Use j0 because jiffies might change while we run */ | |
277 | return round_jiffies_common(j + j0, cpu, true) - j0; | |
278 | } | |
279 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | |
280 | ||
281 | /** | |
282 | * round_jiffies_up - function to round jiffies up to a full second | |
283 | * @j: the time in (absolute) jiffies that should be rounded | |
284 | * | |
285 | * This is the same as round_jiffies() except that it will never | |
286 | * round down. This is useful for timeouts for which the exact time | |
287 | * of firing does not matter too much, as long as they don't fire too | |
288 | * early. | |
289 | */ | |
290 | unsigned long round_jiffies_up(unsigned long j) | |
291 | { | |
292 | return round_jiffies_common(j, raw_smp_processor_id(), true); | |
293 | } | |
294 | EXPORT_SYMBOL_GPL(round_jiffies_up); | |
295 | ||
296 | /** | |
297 | * round_jiffies_up_relative - function to round jiffies up to a full second | |
298 | * @j: the time in (relative) jiffies that should be rounded | |
299 | * | |
300 | * This is the same as round_jiffies_relative() except that it will never | |
301 | * round down. This is useful for timeouts for which the exact time | |
302 | * of firing does not matter too much, as long as they don't fire too | |
303 | * early. | |
304 | */ | |
305 | unsigned long round_jiffies_up_relative(unsigned long j) | |
306 | { | |
307 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | |
310 | ||
3bbb9ec9 AV |
311 | /** |
312 | * set_timer_slack - set the allowed slack for a timer | |
0caa6210 | 313 | * @timer: the timer to be modified |
3bbb9ec9 AV |
314 | * @slack_hz: the amount of time (in jiffies) allowed for rounding |
315 | * | |
316 | * Set the amount of time, in jiffies, that a certain timer has | |
317 | * in terms of slack. By setting this value, the timer subsystem | |
318 | * will schedule the actual timer somewhere between | |
319 | * the time mod_timer() asks for, and that time plus the slack. | |
320 | * | |
321 | * By setting the slack to -1, a percentage of the delay is used | |
322 | * instead. | |
323 | */ | |
324 | void set_timer_slack(struct timer_list *timer, int slack_hz) | |
325 | { | |
326 | timer->slack = slack_hz; | |
327 | } | |
328 | EXPORT_SYMBOL_GPL(set_timer_slack); | |
329 | ||
facbb4a7 TG |
330 | static void |
331 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |
1da177e4 LT |
332 | { |
333 | unsigned long expires = timer->expires; | |
334 | unsigned long idx = expires - base->timer_jiffies; | |
335 | struct list_head *vec; | |
336 | ||
337 | if (idx < TVR_SIZE) { | |
338 | int i = expires & TVR_MASK; | |
339 | vec = base->tv1.vec + i; | |
340 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
341 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
342 | vec = base->tv2.vec + i; | |
343 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
344 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
345 | vec = base->tv3.vec + i; | |
346 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
347 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
348 | vec = base->tv4.vec + i; | |
349 | } else if ((signed long) idx < 0) { | |
350 | /* | |
351 | * Can happen if you add a timer with expires == jiffies, | |
352 | * or you set a timer to go off in the past | |
353 | */ | |
354 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
355 | } else { | |
356 | int i; | |
357 | /* If the timeout is larger than 0xffffffff on 64-bit | |
358 | * architectures then we use the maximum timeout: | |
359 | */ | |
360 | if (idx > 0xffffffffUL) { | |
361 | idx = 0xffffffffUL; | |
362 | expires = idx + base->timer_jiffies; | |
363 | } | |
364 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
365 | vec = base->tv5.vec + i; | |
366 | } | |
367 | /* | |
368 | * Timers are FIFO: | |
369 | */ | |
370 | list_add_tail(&timer->entry, vec); | |
371 | } | |
372 | ||
facbb4a7 TG |
373 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
374 | { | |
375 | __internal_add_timer(base, timer); | |
376 | /* | |
99d5f3aa | 377 | * Update base->active_timers and base->next_timer |
facbb4a7 | 378 | */ |
99d5f3aa TG |
379 | if (!tbase_get_deferrable(timer->base)) { |
380 | if (time_before(timer->expires, base->next_timer)) | |
381 | base->next_timer = timer->expires; | |
382 | base->active_timers++; | |
383 | } | |
facbb4a7 TG |
384 | } |
385 | ||
82f67cd9 IM |
386 | #ifdef CONFIG_TIMER_STATS |
387 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |
388 | { | |
389 | if (timer->start_site) | |
390 | return; | |
391 | ||
392 | timer->start_site = addr; | |
393 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
394 | timer->start_pid = current->pid; | |
395 | } | |
c5c061b8 VP |
396 | |
397 | static void timer_stats_account_timer(struct timer_list *timer) | |
398 | { | |
399 | unsigned int flag = 0; | |
400 | ||
507e1231 HC |
401 | if (likely(!timer->start_site)) |
402 | return; | |
c5c061b8 VP |
403 | if (unlikely(tbase_get_deferrable(timer->base))) |
404 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | |
405 | ||
406 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | |
407 | timer->function, timer->start_comm, flag); | |
408 | } | |
409 | ||
410 | #else | |
411 | static void timer_stats_account_timer(struct timer_list *timer) {} | |
82f67cd9 IM |
412 | #endif |
413 | ||
c6f3a97f TG |
414 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
415 | ||
416 | static struct debug_obj_descr timer_debug_descr; | |
417 | ||
99777288 SG |
418 | static void *timer_debug_hint(void *addr) |
419 | { | |
420 | return ((struct timer_list *) addr)->function; | |
421 | } | |
422 | ||
c6f3a97f TG |
423 | /* |
424 | * fixup_init is called when: | |
425 | * - an active object is initialized | |
55c888d6 | 426 | */ |
c6f3a97f TG |
427 | static int timer_fixup_init(void *addr, enum debug_obj_state state) |
428 | { | |
429 | struct timer_list *timer = addr; | |
430 | ||
431 | switch (state) { | |
432 | case ODEBUG_STATE_ACTIVE: | |
433 | del_timer_sync(timer); | |
434 | debug_object_init(timer, &timer_debug_descr); | |
435 | return 1; | |
436 | default: | |
437 | return 0; | |
438 | } | |
439 | } | |
440 | ||
fb16b8cf SB |
441 | /* Stub timer callback for improperly used timers. */ |
442 | static void stub_timer(unsigned long data) | |
443 | { | |
444 | WARN_ON(1); | |
445 | } | |
446 | ||
c6f3a97f TG |
447 | /* |
448 | * fixup_activate is called when: | |
449 | * - an active object is activated | |
450 | * - an unknown object is activated (might be a statically initialized object) | |
451 | */ | |
452 | static int timer_fixup_activate(void *addr, enum debug_obj_state state) | |
453 | { | |
454 | struct timer_list *timer = addr; | |
455 | ||
456 | switch (state) { | |
457 | ||
458 | case ODEBUG_STATE_NOTAVAILABLE: | |
459 | /* | |
460 | * This is not really a fixup. The timer was | |
461 | * statically initialized. We just make sure that it | |
462 | * is tracked in the object tracker. | |
463 | */ | |
464 | if (timer->entry.next == NULL && | |
465 | timer->entry.prev == TIMER_ENTRY_STATIC) { | |
466 | debug_object_init(timer, &timer_debug_descr); | |
467 | debug_object_activate(timer, &timer_debug_descr); | |
468 | return 0; | |
469 | } else { | |
fb16b8cf SB |
470 | setup_timer(timer, stub_timer, 0); |
471 | return 1; | |
c6f3a97f TG |
472 | } |
473 | return 0; | |
474 | ||
475 | case ODEBUG_STATE_ACTIVE: | |
476 | WARN_ON(1); | |
477 | ||
478 | default: | |
479 | return 0; | |
480 | } | |
481 | } | |
482 | ||
483 | /* | |
484 | * fixup_free is called when: | |
485 | * - an active object is freed | |
486 | */ | |
487 | static int timer_fixup_free(void *addr, enum debug_obj_state state) | |
488 | { | |
489 | struct timer_list *timer = addr; | |
490 | ||
491 | switch (state) { | |
492 | case ODEBUG_STATE_ACTIVE: | |
493 | del_timer_sync(timer); | |
494 | debug_object_free(timer, &timer_debug_descr); | |
495 | return 1; | |
496 | default: | |
497 | return 0; | |
498 | } | |
499 | } | |
500 | ||
dc4218bd CC |
501 | /* |
502 | * fixup_assert_init is called when: | |
503 | * - an untracked/uninit-ed object is found | |
504 | */ | |
505 | static int timer_fixup_assert_init(void *addr, enum debug_obj_state state) | |
506 | { | |
507 | struct timer_list *timer = addr; | |
508 | ||
509 | switch (state) { | |
510 | case ODEBUG_STATE_NOTAVAILABLE: | |
511 | if (timer->entry.prev == TIMER_ENTRY_STATIC) { | |
512 | /* | |
513 | * This is not really a fixup. The timer was | |
514 | * statically initialized. We just make sure that it | |
515 | * is tracked in the object tracker. | |
516 | */ | |
517 | debug_object_init(timer, &timer_debug_descr); | |
518 | return 0; | |
519 | } else { | |
520 | setup_timer(timer, stub_timer, 0); | |
521 | return 1; | |
522 | } | |
523 | default: | |
524 | return 0; | |
525 | } | |
526 | } | |
527 | ||
c6f3a97f | 528 | static struct debug_obj_descr timer_debug_descr = { |
dc4218bd CC |
529 | .name = "timer_list", |
530 | .debug_hint = timer_debug_hint, | |
531 | .fixup_init = timer_fixup_init, | |
532 | .fixup_activate = timer_fixup_activate, | |
533 | .fixup_free = timer_fixup_free, | |
534 | .fixup_assert_init = timer_fixup_assert_init, | |
c6f3a97f TG |
535 | }; |
536 | ||
537 | static inline void debug_timer_init(struct timer_list *timer) | |
538 | { | |
539 | debug_object_init(timer, &timer_debug_descr); | |
540 | } | |
541 | ||
542 | static inline void debug_timer_activate(struct timer_list *timer) | |
543 | { | |
544 | debug_object_activate(timer, &timer_debug_descr); | |
545 | } | |
546 | ||
547 | static inline void debug_timer_deactivate(struct timer_list *timer) | |
548 | { | |
549 | debug_object_deactivate(timer, &timer_debug_descr); | |
550 | } | |
551 | ||
552 | static inline void debug_timer_free(struct timer_list *timer) | |
553 | { | |
554 | debug_object_free(timer, &timer_debug_descr); | |
555 | } | |
556 | ||
dc4218bd CC |
557 | static inline void debug_timer_assert_init(struct timer_list *timer) |
558 | { | |
559 | debug_object_assert_init(timer, &timer_debug_descr); | |
560 | } | |
561 | ||
fc683995 TH |
562 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
563 | const char *name, struct lock_class_key *key); | |
c6f3a97f | 564 | |
fc683995 TH |
565 | void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags, |
566 | const char *name, struct lock_class_key *key) | |
c6f3a97f TG |
567 | { |
568 | debug_object_init_on_stack(timer, &timer_debug_descr); | |
fc683995 | 569 | do_init_timer(timer, flags, name, key); |
c6f3a97f | 570 | } |
6f2b9b9a | 571 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
c6f3a97f TG |
572 | |
573 | void destroy_timer_on_stack(struct timer_list *timer) | |
574 | { | |
575 | debug_object_free(timer, &timer_debug_descr); | |
576 | } | |
577 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); | |
578 | ||
579 | #else | |
580 | static inline void debug_timer_init(struct timer_list *timer) { } | |
581 | static inline void debug_timer_activate(struct timer_list *timer) { } | |
582 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | |
dc4218bd | 583 | static inline void debug_timer_assert_init(struct timer_list *timer) { } |
c6f3a97f TG |
584 | #endif |
585 | ||
2b022e3d XG |
586 | static inline void debug_init(struct timer_list *timer) |
587 | { | |
588 | debug_timer_init(timer); | |
589 | trace_timer_init(timer); | |
590 | } | |
591 | ||
592 | static inline void | |
593 | debug_activate(struct timer_list *timer, unsigned long expires) | |
594 | { | |
595 | debug_timer_activate(timer); | |
596 | trace_timer_start(timer, expires); | |
597 | } | |
598 | ||
599 | static inline void debug_deactivate(struct timer_list *timer) | |
600 | { | |
601 | debug_timer_deactivate(timer); | |
602 | trace_timer_cancel(timer); | |
603 | } | |
604 | ||
dc4218bd CC |
605 | static inline void debug_assert_init(struct timer_list *timer) |
606 | { | |
607 | debug_timer_assert_init(timer); | |
608 | } | |
609 | ||
fc683995 TH |
610 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
611 | const char *name, struct lock_class_key *key) | |
55c888d6 | 612 | { |
fc683995 TH |
613 | struct tvec_base *base = __raw_get_cpu_var(tvec_bases); |
614 | ||
55c888d6 | 615 | timer->entry.next = NULL; |
fc683995 | 616 | timer->base = (void *)((unsigned long)base | flags); |
3bbb9ec9 | 617 | timer->slack = -1; |
82f67cd9 IM |
618 | #ifdef CONFIG_TIMER_STATS |
619 | timer->start_site = NULL; | |
620 | timer->start_pid = -1; | |
621 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
622 | #endif | |
6f2b9b9a | 623 | lockdep_init_map(&timer->lockdep_map, name, key, 0); |
55c888d6 | 624 | } |
c6f3a97f TG |
625 | |
626 | /** | |
633fe795 | 627 | * init_timer_key - initialize a timer |
c6f3a97f | 628 | * @timer: the timer to be initialized |
fc683995 | 629 | * @flags: timer flags |
633fe795 RD |
630 | * @name: name of the timer |
631 | * @key: lockdep class key of the fake lock used for tracking timer | |
632 | * sync lock dependencies | |
c6f3a97f | 633 | * |
633fe795 | 634 | * init_timer_key() must be done to a timer prior calling *any* of the |
c6f3a97f TG |
635 | * other timer functions. |
636 | */ | |
fc683995 TH |
637 | void init_timer_key(struct timer_list *timer, unsigned int flags, |
638 | const char *name, struct lock_class_key *key) | |
c6f3a97f | 639 | { |
2b022e3d | 640 | debug_init(timer); |
fc683995 | 641 | do_init_timer(timer, flags, name, key); |
c6f3a97f | 642 | } |
6f2b9b9a | 643 | EXPORT_SYMBOL(init_timer_key); |
55c888d6 | 644 | |
ec44bc7a | 645 | static inline void detach_timer(struct timer_list *timer, bool clear_pending) |
55c888d6 ON |
646 | { |
647 | struct list_head *entry = &timer->entry; | |
648 | ||
2b022e3d | 649 | debug_deactivate(timer); |
c6f3a97f | 650 | |
55c888d6 ON |
651 | __list_del(entry->prev, entry->next); |
652 | if (clear_pending) | |
653 | entry->next = NULL; | |
654 | entry->prev = LIST_POISON2; | |
655 | } | |
656 | ||
99d5f3aa TG |
657 | static inline void |
658 | detach_expired_timer(struct timer_list *timer, struct tvec_base *base) | |
659 | { | |
660 | detach_timer(timer, true); | |
661 | if (!tbase_get_deferrable(timer->base)) | |
e52b1db3 | 662 | base->active_timers--; |
99d5f3aa TG |
663 | } |
664 | ||
ec44bc7a TG |
665 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, |
666 | bool clear_pending) | |
667 | { | |
668 | if (!timer_pending(timer)) | |
669 | return 0; | |
670 | ||
671 | detach_timer(timer, clear_pending); | |
99d5f3aa | 672 | if (!tbase_get_deferrable(timer->base)) { |
e52b1db3 | 673 | base->active_timers--; |
99d5f3aa TG |
674 | if (timer->expires == base->next_timer) |
675 | base->next_timer = base->timer_jiffies; | |
676 | } | |
ec44bc7a TG |
677 | return 1; |
678 | } | |
679 | ||
55c888d6 | 680 | /* |
3691c519 | 681 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
682 | * means that all timers which are tied to this base via timer->base are |
683 | * locked, and the base itself is locked too. | |
684 | * | |
685 | * So __run_timers/migrate_timers can safely modify all timers which could | |
686 | * be found on ->tvX lists. | |
687 | * | |
688 | * When the timer's base is locked, and the timer removed from list, it is | |
689 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
690 | * locked. | |
691 | */ | |
a6fa8e5a | 692 | static struct tvec_base *lock_timer_base(struct timer_list *timer, |
55c888d6 | 693 | unsigned long *flags) |
89e7e374 | 694 | __acquires(timer->base->lock) |
55c888d6 | 695 | { |
a6fa8e5a | 696 | struct tvec_base *base; |
55c888d6 ON |
697 | |
698 | for (;;) { | |
a6fa8e5a | 699 | struct tvec_base *prelock_base = timer->base; |
6e453a67 | 700 | base = tbase_get_base(prelock_base); |
55c888d6 ON |
701 | if (likely(base != NULL)) { |
702 | spin_lock_irqsave(&base->lock, *flags); | |
6e453a67 | 703 | if (likely(prelock_base == timer->base)) |
55c888d6 ON |
704 | return base; |
705 | /* The timer has migrated to another CPU */ | |
706 | spin_unlock_irqrestore(&base->lock, *flags); | |
707 | } | |
708 | cpu_relax(); | |
709 | } | |
710 | } | |
711 | ||
74019224 | 712 | static inline int |
597d0275 AB |
713 | __mod_timer(struct timer_list *timer, unsigned long expires, |
714 | bool pending_only, int pinned) | |
1da177e4 | 715 | { |
a6fa8e5a | 716 | struct tvec_base *base, *new_base; |
1da177e4 | 717 | unsigned long flags; |
eea08f32 | 718 | int ret = 0 , cpu; |
1da177e4 | 719 | |
82f67cd9 | 720 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 721 | BUG_ON(!timer->function); |
1da177e4 | 722 | |
55c888d6 ON |
723 | base = lock_timer_base(timer, &flags); |
724 | ||
ec44bc7a TG |
725 | ret = detach_if_pending(timer, base, false); |
726 | if (!ret && pending_only) | |
727 | goto out_unlock; | |
55c888d6 | 728 | |
2b022e3d | 729 | debug_activate(timer, expires); |
c6f3a97f | 730 | |
eea08f32 AB |
731 | cpu = smp_processor_id(); |
732 | ||
733 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | |
83cd4fe2 VP |
734 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) |
735 | cpu = get_nohz_timer_target(); | |
eea08f32 AB |
736 | #endif |
737 | new_base = per_cpu(tvec_bases, cpu); | |
738 | ||
3691c519 | 739 | if (base != new_base) { |
1da177e4 | 740 | /* |
55c888d6 ON |
741 | * We are trying to schedule the timer on the local CPU. |
742 | * However we can't change timer's base while it is running, | |
743 | * otherwise del_timer_sync() can't detect that the timer's | |
744 | * handler yet has not finished. This also guarantees that | |
745 | * the timer is serialized wrt itself. | |
1da177e4 | 746 | */ |
a2c348fe | 747 | if (likely(base->running_timer != timer)) { |
55c888d6 | 748 | /* See the comment in lock_timer_base() */ |
6e453a67 | 749 | timer_set_base(timer, NULL); |
55c888d6 | 750 | spin_unlock(&base->lock); |
a2c348fe ON |
751 | base = new_base; |
752 | spin_lock(&base->lock); | |
6e453a67 | 753 | timer_set_base(timer, base); |
1da177e4 LT |
754 | } |
755 | } | |
756 | ||
1da177e4 | 757 | timer->expires = expires; |
a2c348fe | 758 | internal_add_timer(base, timer); |
74019224 IM |
759 | |
760 | out_unlock: | |
a2c348fe | 761 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
762 | |
763 | return ret; | |
764 | } | |
765 | ||
2aae4a10 | 766 | /** |
74019224 IM |
767 | * mod_timer_pending - modify a pending timer's timeout |
768 | * @timer: the pending timer to be modified | |
769 | * @expires: new timeout in jiffies | |
1da177e4 | 770 | * |
74019224 IM |
771 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
772 | * but will not re-activate and modify already deleted timers. | |
773 | * | |
774 | * It is useful for unserialized use of timers. | |
1da177e4 | 775 | */ |
74019224 | 776 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
1da177e4 | 777 | { |
597d0275 | 778 | return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); |
1da177e4 | 779 | } |
74019224 | 780 | EXPORT_SYMBOL(mod_timer_pending); |
1da177e4 | 781 | |
3bbb9ec9 AV |
782 | /* |
783 | * Decide where to put the timer while taking the slack into account | |
784 | * | |
785 | * Algorithm: | |
786 | * 1) calculate the maximum (absolute) time | |
787 | * 2) calculate the highest bit where the expires and new max are different | |
788 | * 3) use this bit to make a mask | |
789 | * 4) use the bitmask to round down the maximum time, so that all last | |
790 | * bits are zeros | |
791 | */ | |
792 | static inline | |
793 | unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | |
794 | { | |
795 | unsigned long expires_limit, mask; | |
796 | int bit; | |
797 | ||
8e63d779 | 798 | if (timer->slack >= 0) { |
f00e047e | 799 | expires_limit = expires + timer->slack; |
8e63d779 | 800 | } else { |
1c3cc116 SAS |
801 | long delta = expires - jiffies; |
802 | ||
803 | if (delta < 256) | |
804 | return expires; | |
3bbb9ec9 | 805 | |
1c3cc116 | 806 | expires_limit = expires + delta / 256; |
8e63d779 | 807 | } |
3bbb9ec9 | 808 | mask = expires ^ expires_limit; |
3bbb9ec9 AV |
809 | if (mask == 0) |
810 | return expires; | |
811 | ||
812 | bit = find_last_bit(&mask, BITS_PER_LONG); | |
813 | ||
814 | mask = (1 << bit) - 1; | |
815 | ||
816 | expires_limit = expires_limit & ~(mask); | |
817 | ||
818 | return expires_limit; | |
819 | } | |
820 | ||
2aae4a10 | 821 | /** |
1da177e4 LT |
822 | * mod_timer - modify a timer's timeout |
823 | * @timer: the timer to be modified | |
2aae4a10 | 824 | * @expires: new timeout in jiffies |
1da177e4 | 825 | * |
72fd4a35 | 826 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
827 | * active timer (if the timer is inactive it will be activated) |
828 | * | |
829 | * mod_timer(timer, expires) is equivalent to: | |
830 | * | |
831 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
832 | * | |
833 | * Note that if there are multiple unserialized concurrent users of the | |
834 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
835 | * since add_timer() cannot modify an already running timer. | |
836 | * | |
837 | * The function returns whether it has modified a pending timer or not. | |
838 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
839 | * active timer returns 1.) | |
840 | */ | |
841 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
842 | { | |
1c3cc116 SAS |
843 | expires = apply_slack(timer, expires); |
844 | ||
1da177e4 LT |
845 | /* |
846 | * This is a common optimization triggered by the | |
847 | * networking code - if the timer is re-modified | |
848 | * to be the same thing then just return: | |
849 | */ | |
4841158b | 850 | if (timer_pending(timer) && timer->expires == expires) |
1da177e4 LT |
851 | return 1; |
852 | ||
597d0275 | 853 | return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); |
1da177e4 | 854 | } |
1da177e4 LT |
855 | EXPORT_SYMBOL(mod_timer); |
856 | ||
597d0275 AB |
857 | /** |
858 | * mod_timer_pinned - modify a timer's timeout | |
859 | * @timer: the timer to be modified | |
860 | * @expires: new timeout in jiffies | |
861 | * | |
862 | * mod_timer_pinned() is a way to update the expire field of an | |
863 | * active timer (if the timer is inactive it will be activated) | |
048a0e8f PM |
864 | * and to ensure that the timer is scheduled on the current CPU. |
865 | * | |
866 | * Note that this does not prevent the timer from being migrated | |
867 | * when the current CPU goes offline. If this is a problem for | |
868 | * you, use CPU-hotplug notifiers to handle it correctly, for | |
869 | * example, cancelling the timer when the corresponding CPU goes | |
870 | * offline. | |
597d0275 AB |
871 | * |
872 | * mod_timer_pinned(timer, expires) is equivalent to: | |
873 | * | |
874 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
875 | */ | |
876 | int mod_timer_pinned(struct timer_list *timer, unsigned long expires) | |
877 | { | |
878 | if (timer->expires == expires && timer_pending(timer)) | |
879 | return 1; | |
880 | ||
881 | return __mod_timer(timer, expires, false, TIMER_PINNED); | |
882 | } | |
883 | EXPORT_SYMBOL(mod_timer_pinned); | |
884 | ||
74019224 IM |
885 | /** |
886 | * add_timer - start a timer | |
887 | * @timer: the timer to be added | |
888 | * | |
889 | * The kernel will do a ->function(->data) callback from the | |
890 | * timer interrupt at the ->expires point in the future. The | |
891 | * current time is 'jiffies'. | |
892 | * | |
893 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | |
894 | * fields must be set prior calling this function. | |
895 | * | |
896 | * Timers with an ->expires field in the past will be executed in the next | |
897 | * timer tick. | |
898 | */ | |
899 | void add_timer(struct timer_list *timer) | |
900 | { | |
901 | BUG_ON(timer_pending(timer)); | |
902 | mod_timer(timer, timer->expires); | |
903 | } | |
904 | EXPORT_SYMBOL(add_timer); | |
905 | ||
906 | /** | |
907 | * add_timer_on - start a timer on a particular CPU | |
908 | * @timer: the timer to be added | |
909 | * @cpu: the CPU to start it on | |
910 | * | |
911 | * This is not very scalable on SMP. Double adds are not possible. | |
912 | */ | |
913 | void add_timer_on(struct timer_list *timer, int cpu) | |
914 | { | |
915 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | |
916 | unsigned long flags; | |
917 | ||
918 | timer_stats_timer_set_start_info(timer); | |
919 | BUG_ON(timer_pending(timer) || !timer->function); | |
920 | spin_lock_irqsave(&base->lock, flags); | |
921 | timer_set_base(timer, base); | |
2b022e3d | 922 | debug_activate(timer, timer->expires); |
74019224 IM |
923 | internal_add_timer(base, timer); |
924 | /* | |
925 | * Check whether the other CPU is idle and needs to be | |
926 | * triggered to reevaluate the timer wheel when nohz is | |
927 | * active. We are protected against the other CPU fiddling | |
928 | * with the timer by holding the timer base lock. This also | |
929 | * makes sure that a CPU on the way to idle can not evaluate | |
930 | * the timer wheel. | |
931 | */ | |
932 | wake_up_idle_cpu(cpu); | |
933 | spin_unlock_irqrestore(&base->lock, flags); | |
934 | } | |
a9862e05 | 935 | EXPORT_SYMBOL_GPL(add_timer_on); |
74019224 | 936 | |
2aae4a10 | 937 | /** |
1da177e4 LT |
938 | * del_timer - deactive a timer. |
939 | * @timer: the timer to be deactivated | |
940 | * | |
941 | * del_timer() deactivates a timer - this works on both active and inactive | |
942 | * timers. | |
943 | * | |
944 | * The function returns whether it has deactivated a pending timer or not. | |
945 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
946 | * active timer returns 1.) | |
947 | */ | |
948 | int del_timer(struct timer_list *timer) | |
949 | { | |
a6fa8e5a | 950 | struct tvec_base *base; |
1da177e4 | 951 | unsigned long flags; |
55c888d6 | 952 | int ret = 0; |
1da177e4 | 953 | |
dc4218bd CC |
954 | debug_assert_init(timer); |
955 | ||
82f67cd9 | 956 | timer_stats_timer_clear_start_info(timer); |
55c888d6 ON |
957 | if (timer_pending(timer)) { |
958 | base = lock_timer_base(timer, &flags); | |
ec44bc7a | 959 | ret = detach_if_pending(timer, base, true); |
1da177e4 | 960 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 961 | } |
1da177e4 | 962 | |
55c888d6 | 963 | return ret; |
1da177e4 | 964 | } |
1da177e4 LT |
965 | EXPORT_SYMBOL(del_timer); |
966 | ||
2aae4a10 REB |
967 | /** |
968 | * try_to_del_timer_sync - Try to deactivate a timer | |
969 | * @timer: timer do del | |
970 | * | |
fd450b73 ON |
971 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
972 | * exit the timer is not queued and the handler is not running on any CPU. | |
fd450b73 ON |
973 | */ |
974 | int try_to_del_timer_sync(struct timer_list *timer) | |
975 | { | |
a6fa8e5a | 976 | struct tvec_base *base; |
fd450b73 ON |
977 | unsigned long flags; |
978 | int ret = -1; | |
979 | ||
dc4218bd CC |
980 | debug_assert_init(timer); |
981 | ||
fd450b73 ON |
982 | base = lock_timer_base(timer, &flags); |
983 | ||
ec44bc7a TG |
984 | if (base->running_timer != timer) { |
985 | timer_stats_timer_clear_start_info(timer); | |
986 | ret = detach_if_pending(timer, base, true); | |
fd450b73 | 987 | } |
fd450b73 ON |
988 | spin_unlock_irqrestore(&base->lock, flags); |
989 | ||
990 | return ret; | |
991 | } | |
e19dff1f DH |
992 | EXPORT_SYMBOL(try_to_del_timer_sync); |
993 | ||
6f1bc451 | 994 | #ifdef CONFIG_SMP |
2aae4a10 | 995 | /** |
1da177e4 LT |
996 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
997 | * @timer: the timer to be deactivated | |
998 | * | |
999 | * This function only differs from del_timer() on SMP: besides deactivating | |
1000 | * the timer it also makes sure the handler has finished executing on other | |
1001 | * CPUs. | |
1002 | * | |
72fd4a35 | 1003 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 | 1004 | * otherwise this function is meaningless. It must not be called from |
7ff20792 | 1005 | * interrupt contexts. The caller must not hold locks which would prevent |
55c888d6 ON |
1006 | * completion of the timer's handler. The timer's handler must not call |
1007 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
1008 | * not running on any CPU. | |
1da177e4 | 1009 | * |
48228f7b SR |
1010 | * Note: You must not hold locks that are held in interrupt context |
1011 | * while calling this function. Even if the lock has nothing to do | |
1012 | * with the timer in question. Here's why: | |
1013 | * | |
1014 | * CPU0 CPU1 | |
1015 | * ---- ---- | |
1016 | * <SOFTIRQ> | |
1017 | * call_timer_fn(); | |
1018 | * base->running_timer = mytimer; | |
1019 | * spin_lock_irq(somelock); | |
1020 | * <IRQ> | |
1021 | * spin_lock(somelock); | |
1022 | * del_timer_sync(mytimer); | |
1023 | * while (base->running_timer == mytimer); | |
1024 | * | |
1025 | * Now del_timer_sync() will never return and never release somelock. | |
1026 | * The interrupt on the other CPU is waiting to grab somelock but | |
1027 | * it has interrupted the softirq that CPU0 is waiting to finish. | |
1028 | * | |
1da177e4 | 1029 | * The function returns whether it has deactivated a pending timer or not. |
1da177e4 LT |
1030 | */ |
1031 | int del_timer_sync(struct timer_list *timer) | |
1032 | { | |
6f2b9b9a | 1033 | #ifdef CONFIG_LOCKDEP |
f266a511 PZ |
1034 | unsigned long flags; |
1035 | ||
48228f7b SR |
1036 | /* |
1037 | * If lockdep gives a backtrace here, please reference | |
1038 | * the synchronization rules above. | |
1039 | */ | |
7ff20792 | 1040 | local_irq_save(flags); |
6f2b9b9a JB |
1041 | lock_map_acquire(&timer->lockdep_map); |
1042 | lock_map_release(&timer->lockdep_map); | |
7ff20792 | 1043 | local_irq_restore(flags); |
6f2b9b9a | 1044 | #endif |
466bd303 YZ |
1045 | /* |
1046 | * don't use it in hardirq context, because it | |
1047 | * could lead to deadlock. | |
1048 | */ | |
1049 | WARN_ON(in_irq()); | |
fd450b73 ON |
1050 | for (;;) { |
1051 | int ret = try_to_del_timer_sync(timer); | |
1052 | if (ret >= 0) | |
1053 | return ret; | |
a0009652 | 1054 | cpu_relax(); |
fd450b73 | 1055 | } |
1da177e4 | 1056 | } |
55c888d6 | 1057 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
1058 | #endif |
1059 | ||
a6fa8e5a | 1060 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) |
1da177e4 LT |
1061 | { |
1062 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
1063 | struct timer_list *timer, *tmp; |
1064 | struct list_head tv_list; | |
1065 | ||
1066 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 1067 | |
1da177e4 | 1068 | /* |
3439dd86 P |
1069 | * We are removing _all_ timers from the list, so we |
1070 | * don't have to detach them individually. | |
1da177e4 | 1071 | */ |
3439dd86 | 1072 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
6e453a67 | 1073 | BUG_ON(tbase_get_base(timer->base) != base); |
facbb4a7 TG |
1074 | /* No accounting, while moving them */ |
1075 | __internal_add_timer(base, timer); | |
1da177e4 | 1076 | } |
1da177e4 LT |
1077 | |
1078 | return index; | |
1079 | } | |
1080 | ||
576da126 TG |
1081 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), |
1082 | unsigned long data) | |
1083 | { | |
1084 | int preempt_count = preempt_count(); | |
1085 | ||
1086 | #ifdef CONFIG_LOCKDEP | |
1087 | /* | |
1088 | * It is permissible to free the timer from inside the | |
1089 | * function that is called from it, this we need to take into | |
1090 | * account for lockdep too. To avoid bogus "held lock freed" | |
1091 | * warnings as well as problems when looking into | |
1092 | * timer->lockdep_map, make a copy and use that here. | |
1093 | */ | |
4d82a1de PZ |
1094 | struct lockdep_map lockdep_map; |
1095 | ||
1096 | lockdep_copy_map(&lockdep_map, &timer->lockdep_map); | |
576da126 TG |
1097 | #endif |
1098 | /* | |
1099 | * Couple the lock chain with the lock chain at | |
1100 | * del_timer_sync() by acquiring the lock_map around the fn() | |
1101 | * call here and in del_timer_sync(). | |
1102 | */ | |
1103 | lock_map_acquire(&lockdep_map); | |
1104 | ||
1105 | trace_timer_expire_entry(timer); | |
1106 | fn(data); | |
1107 | trace_timer_expire_exit(timer); | |
1108 | ||
1109 | lock_map_release(&lockdep_map); | |
1110 | ||
1111 | if (preempt_count != preempt_count()) { | |
802702e0 TG |
1112 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", |
1113 | fn, preempt_count, preempt_count()); | |
1114 | /* | |
1115 | * Restore the preempt count. That gives us a decent | |
1116 | * chance to survive and extract information. If the | |
1117 | * callback kept a lock held, bad luck, but not worse | |
1118 | * than the BUG() we had. | |
1119 | */ | |
1120 | preempt_count() = preempt_count; | |
576da126 TG |
1121 | } |
1122 | } | |
1123 | ||
2aae4a10 REB |
1124 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
1125 | ||
1126 | /** | |
1da177e4 LT |
1127 | * __run_timers - run all expired timers (if any) on this CPU. |
1128 | * @base: the timer vector to be processed. | |
1129 | * | |
1130 | * This function cascades all vectors and executes all expired timer | |
1131 | * vectors. | |
1132 | */ | |
a6fa8e5a | 1133 | static inline void __run_timers(struct tvec_base *base) |
1da177e4 LT |
1134 | { |
1135 | struct timer_list *timer; | |
1136 | ||
3691c519 | 1137 | spin_lock_irq(&base->lock); |
1da177e4 | 1138 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 1139 | struct list_head work_list; |
1da177e4 | 1140 | struct list_head *head = &work_list; |
6819457d | 1141 | int index = base->timer_jiffies & TVR_MASK; |
626ab0e6 | 1142 | |
1da177e4 LT |
1143 | /* |
1144 | * Cascade timers: | |
1145 | */ | |
1146 | if (!index && | |
1147 | (!cascade(base, &base->tv2, INDEX(0))) && | |
1148 | (!cascade(base, &base->tv3, INDEX(1))) && | |
1149 | !cascade(base, &base->tv4, INDEX(2))) | |
1150 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
1151 | ++base->timer_jiffies; |
1152 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 1153 | while (!list_empty(head)) { |
1da177e4 LT |
1154 | void (*fn)(unsigned long); |
1155 | unsigned long data; | |
1156 | ||
b5e61818 | 1157 | timer = list_first_entry(head, struct timer_list,entry); |
6819457d TG |
1158 | fn = timer->function; |
1159 | data = timer->data; | |
1da177e4 | 1160 | |
82f67cd9 IM |
1161 | timer_stats_account_timer(timer); |
1162 | ||
6f1bc451 | 1163 | base->running_timer = timer; |
99d5f3aa | 1164 | detach_expired_timer(timer, base); |
6f2b9b9a | 1165 | |
3691c519 | 1166 | spin_unlock_irq(&base->lock); |
576da126 | 1167 | call_timer_fn(timer, fn, data); |
3691c519 | 1168 | spin_lock_irq(&base->lock); |
1da177e4 LT |
1169 | } |
1170 | } | |
6f1bc451 | 1171 | base->running_timer = NULL; |
3691c519 | 1172 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
1173 | } |
1174 | ||
ee9c5785 | 1175 | #ifdef CONFIG_NO_HZ |
1da177e4 LT |
1176 | /* |
1177 | * Find out when the next timer event is due to happen. This | |
90cba64a RD |
1178 | * is used on S/390 to stop all activity when a CPU is idle. |
1179 | * This function needs to be called with interrupts disabled. | |
1da177e4 | 1180 | */ |
a6fa8e5a | 1181 | static unsigned long __next_timer_interrupt(struct tvec_base *base) |
1da177e4 | 1182 | { |
1cfd6849 | 1183 | unsigned long timer_jiffies = base->timer_jiffies; |
eaad084b | 1184 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 1185 | int index, slot, array, found = 0; |
1da177e4 | 1186 | struct timer_list *nte; |
a6fa8e5a | 1187 | struct tvec *varray[4]; |
1da177e4 LT |
1188 | |
1189 | /* Look for timer events in tv1. */ | |
1cfd6849 | 1190 | index = slot = timer_jiffies & TVR_MASK; |
1da177e4 | 1191 | do { |
1cfd6849 | 1192 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
6819457d TG |
1193 | if (tbase_get_deferrable(nte->base)) |
1194 | continue; | |
6e453a67 | 1195 | |
1cfd6849 | 1196 | found = 1; |
1da177e4 | 1197 | expires = nte->expires; |
1cfd6849 TG |
1198 | /* Look at the cascade bucket(s)? */ |
1199 | if (!index || slot < index) | |
1200 | goto cascade; | |
1201 | return expires; | |
1da177e4 | 1202 | } |
1cfd6849 TG |
1203 | slot = (slot + 1) & TVR_MASK; |
1204 | } while (slot != index); | |
1205 | ||
1206 | cascade: | |
1207 | /* Calculate the next cascade event */ | |
1208 | if (index) | |
1209 | timer_jiffies += TVR_SIZE - index; | |
1210 | timer_jiffies >>= TVR_BITS; | |
1da177e4 LT |
1211 | |
1212 | /* Check tv2-tv5. */ | |
1213 | varray[0] = &base->tv2; | |
1214 | varray[1] = &base->tv3; | |
1215 | varray[2] = &base->tv4; | |
1216 | varray[3] = &base->tv5; | |
1cfd6849 TG |
1217 | |
1218 | for (array = 0; array < 4; array++) { | |
a6fa8e5a | 1219 | struct tvec *varp = varray[array]; |
1cfd6849 TG |
1220 | |
1221 | index = slot = timer_jiffies & TVN_MASK; | |
1da177e4 | 1222 | do { |
1cfd6849 | 1223 | list_for_each_entry(nte, varp->vec + slot, entry) { |
a0419888 JH |
1224 | if (tbase_get_deferrable(nte->base)) |
1225 | continue; | |
1226 | ||
1cfd6849 | 1227 | found = 1; |
1da177e4 LT |
1228 | if (time_before(nte->expires, expires)) |
1229 | expires = nte->expires; | |
1cfd6849 TG |
1230 | } |
1231 | /* | |
1232 | * Do we still search for the first timer or are | |
1233 | * we looking up the cascade buckets ? | |
1234 | */ | |
1235 | if (found) { | |
1236 | /* Look at the cascade bucket(s)? */ | |
1237 | if (!index || slot < index) | |
1238 | break; | |
1239 | return expires; | |
1240 | } | |
1241 | slot = (slot + 1) & TVN_MASK; | |
1242 | } while (slot != index); | |
1243 | ||
1244 | if (index) | |
1245 | timer_jiffies += TVN_SIZE - index; | |
1246 | timer_jiffies >>= TVN_BITS; | |
1da177e4 | 1247 | } |
1cfd6849 TG |
1248 | return expires; |
1249 | } | |
69239749 | 1250 | |
1cfd6849 TG |
1251 | /* |
1252 | * Check, if the next hrtimer event is before the next timer wheel | |
1253 | * event: | |
1254 | */ | |
1255 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | |
1256 | unsigned long expires) | |
1257 | { | |
1258 | ktime_t hr_delta = hrtimer_get_next_event(); | |
1259 | struct timespec tsdelta; | |
9501b6cf | 1260 | unsigned long delta; |
1cfd6849 TG |
1261 | |
1262 | if (hr_delta.tv64 == KTIME_MAX) | |
1263 | return expires; | |
0662b713 | 1264 | |
9501b6cf TG |
1265 | /* |
1266 | * Expired timer available, let it expire in the next tick | |
1267 | */ | |
1268 | if (hr_delta.tv64 <= 0) | |
1269 | return now + 1; | |
69239749 | 1270 | |
1cfd6849 | 1271 | tsdelta = ktime_to_timespec(hr_delta); |
9501b6cf | 1272 | delta = timespec_to_jiffies(&tsdelta); |
eaad084b TG |
1273 | |
1274 | /* | |
1275 | * Limit the delta to the max value, which is checked in | |
1276 | * tick_nohz_stop_sched_tick(): | |
1277 | */ | |
1278 | if (delta > NEXT_TIMER_MAX_DELTA) | |
1279 | delta = NEXT_TIMER_MAX_DELTA; | |
1280 | ||
9501b6cf TG |
1281 | /* |
1282 | * Take rounding errors in to account and make sure, that it | |
1283 | * expires in the next tick. Otherwise we go into an endless | |
1284 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | |
1285 | * the timer softirq | |
1286 | */ | |
1287 | if (delta < 1) | |
1288 | delta = 1; | |
1289 | now += delta; | |
1cfd6849 TG |
1290 | if (time_before(now, expires)) |
1291 | return now; | |
1da177e4 LT |
1292 | return expires; |
1293 | } | |
1cfd6849 TG |
1294 | |
1295 | /** | |
8dce39c2 | 1296 | * get_next_timer_interrupt - return the jiffy of the next pending timer |
05fb6bf0 | 1297 | * @now: current time (in jiffies) |
1cfd6849 | 1298 | */ |
fd064b9b | 1299 | unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd6849 | 1300 | { |
7496351a | 1301 | struct tvec_base *base = __this_cpu_read(tvec_bases); |
e40468a5 | 1302 | unsigned long expires = now + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 1303 | |
dbd87b5a HC |
1304 | /* |
1305 | * Pretend that there is no timer pending if the cpu is offline. | |
1306 | * Possible pending timers will be migrated later to an active cpu. | |
1307 | */ | |
1308 | if (cpu_is_offline(smp_processor_id())) | |
e40468a5 TG |
1309 | return expires; |
1310 | ||
1cfd6849 | 1311 | spin_lock(&base->lock); |
e40468a5 TG |
1312 | if (base->active_timers) { |
1313 | if (time_before_eq(base->next_timer, base->timer_jiffies)) | |
1314 | base->next_timer = __next_timer_interrupt(base); | |
1315 | expires = base->next_timer; | |
1316 | } | |
1cfd6849 TG |
1317 | spin_unlock(&base->lock); |
1318 | ||
1319 | if (time_before_eq(expires, now)) | |
1320 | return now; | |
1321 | ||
1322 | return cmp_next_hrtimer_event(now, expires); | |
1323 | } | |
1da177e4 LT |
1324 | #endif |
1325 | ||
1da177e4 | 1326 | /* |
5b4db0c2 | 1327 | * Called from the timer interrupt handler to charge one tick to the current |
1da177e4 LT |
1328 | * process. user_tick is 1 if the tick is user time, 0 for system. |
1329 | */ | |
1330 | void update_process_times(int user_tick) | |
1331 | { | |
1332 | struct task_struct *p = current; | |
1333 | int cpu = smp_processor_id(); | |
1334 | ||
1335 | /* Note: this timer irq context must be accounted for as well. */ | |
fa13a5a1 | 1336 | account_process_tick(p, user_tick); |
1da177e4 | 1337 | run_local_timers(); |
a157229c | 1338 | rcu_check_callbacks(cpu, user_tick); |
b845b517 | 1339 | printk_tick(); |
e360adbe PZ |
1340 | #ifdef CONFIG_IRQ_WORK |
1341 | if (in_irq()) | |
1342 | irq_work_run(); | |
1343 | #endif | |
1da177e4 | 1344 | scheduler_tick(); |
6819457d | 1345 | run_posix_cpu_timers(p); |
1da177e4 LT |
1346 | } |
1347 | ||
1da177e4 LT |
1348 | /* |
1349 | * This function runs timers and the timer-tq in bottom half context. | |
1350 | */ | |
1351 | static void run_timer_softirq(struct softirq_action *h) | |
1352 | { | |
7496351a | 1353 | struct tvec_base *base = __this_cpu_read(tvec_bases); |
1da177e4 | 1354 | |
d3d74453 | 1355 | hrtimer_run_pending(); |
82f67cd9 | 1356 | |
1da177e4 LT |
1357 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1358 | __run_timers(base); | |
1359 | } | |
1360 | ||
1361 | /* | |
1362 | * Called by the local, per-CPU timer interrupt on SMP. | |
1363 | */ | |
1364 | void run_local_timers(void) | |
1365 | { | |
d3d74453 | 1366 | hrtimer_run_queues(); |
1da177e4 LT |
1367 | raise_softirq(TIMER_SOFTIRQ); |
1368 | } | |
1369 | ||
1da177e4 LT |
1370 | #ifdef __ARCH_WANT_SYS_ALARM |
1371 | ||
1372 | /* | |
1373 | * For backwards compatibility? This can be done in libc so Alpha | |
1374 | * and all newer ports shouldn't need it. | |
1375 | */ | |
58fd3aa2 | 1376 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
1da177e4 | 1377 | { |
c08b8a49 | 1378 | return alarm_setitimer(seconds); |
1da177e4 LT |
1379 | } |
1380 | ||
1381 | #endif | |
1382 | ||
1383 | #ifndef __alpha__ | |
1384 | ||
1385 | /* | |
1386 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
1387 | * should be moved into arch/i386 instead? | |
1388 | */ | |
1389 | ||
1390 | /** | |
1391 | * sys_getpid - return the thread group id of the current process | |
1392 | * | |
1393 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
1394 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
1395 | * which case the tgid is the same in all threads of the same group. | |
1396 | * | |
1397 | * This is SMP safe as current->tgid does not change. | |
1398 | */ | |
58fd3aa2 | 1399 | SYSCALL_DEFINE0(getpid) |
1da177e4 | 1400 | { |
b488893a | 1401 | return task_tgid_vnr(current); |
1da177e4 LT |
1402 | } |
1403 | ||
1404 | /* | |
6997a6fa KK |
1405 | * Accessing ->real_parent is not SMP-safe, it could |
1406 | * change from under us. However, we can use a stale | |
1407 | * value of ->real_parent under rcu_read_lock(), see | |
1408 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 | 1409 | */ |
dbf040d9 | 1410 | SYSCALL_DEFINE0(getppid) |
1da177e4 LT |
1411 | { |
1412 | int pid; | |
1da177e4 | 1413 | |
6997a6fa | 1414 | rcu_read_lock(); |
031af165 | 1415 | pid = task_tgid_vnr(rcu_dereference(current->real_parent)); |
6997a6fa | 1416 | rcu_read_unlock(); |
1da177e4 | 1417 | |
1da177e4 LT |
1418 | return pid; |
1419 | } | |
1420 | ||
dbf040d9 | 1421 | SYSCALL_DEFINE0(getuid) |
1da177e4 LT |
1422 | { |
1423 | /* Only we change this so SMP safe */ | |
a29c33f4 | 1424 | return from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4 LT |
1425 | } |
1426 | ||
dbf040d9 | 1427 | SYSCALL_DEFINE0(geteuid) |
1da177e4 LT |
1428 | { |
1429 | /* Only we change this so SMP safe */ | |
a29c33f4 | 1430 | return from_kuid_munged(current_user_ns(), current_euid()); |
1da177e4 LT |
1431 | } |
1432 | ||
dbf040d9 | 1433 | SYSCALL_DEFINE0(getgid) |
1da177e4 LT |
1434 | { |
1435 | /* Only we change this so SMP safe */ | |
a29c33f4 | 1436 | return from_kgid_munged(current_user_ns(), current_gid()); |
1da177e4 LT |
1437 | } |
1438 | ||
dbf040d9 | 1439 | SYSCALL_DEFINE0(getegid) |
1da177e4 LT |
1440 | { |
1441 | /* Only we change this so SMP safe */ | |
a29c33f4 | 1442 | return from_kgid_munged(current_user_ns(), current_egid()); |
1da177e4 LT |
1443 | } |
1444 | ||
1445 | #endif | |
1446 | ||
1447 | static void process_timeout(unsigned long __data) | |
1448 | { | |
36c8b586 | 1449 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1450 | } |
1451 | ||
1452 | /** | |
1453 | * schedule_timeout - sleep until timeout | |
1454 | * @timeout: timeout value in jiffies | |
1455 | * | |
1456 | * Make the current task sleep until @timeout jiffies have | |
1457 | * elapsed. The routine will return immediately unless | |
1458 | * the current task state has been set (see set_current_state()). | |
1459 | * | |
1460 | * You can set the task state as follows - | |
1461 | * | |
1462 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1463 | * pass before the routine returns. The routine will return 0 | |
1464 | * | |
1465 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1466 | * delivered to the current task. In this case the remaining time | |
1467 | * in jiffies will be returned, or 0 if the timer expired in time | |
1468 | * | |
1469 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1470 | * routine returns. | |
1471 | * | |
1472 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1473 | * the CPU away without a bound on the timeout. In this case the return | |
1474 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1475 | * | |
1476 | * In all cases the return value is guaranteed to be non-negative. | |
1477 | */ | |
7ad5b3a5 | 1478 | signed long __sched schedule_timeout(signed long timeout) |
1da177e4 LT |
1479 | { |
1480 | struct timer_list timer; | |
1481 | unsigned long expire; | |
1482 | ||
1483 | switch (timeout) | |
1484 | { | |
1485 | case MAX_SCHEDULE_TIMEOUT: | |
1486 | /* | |
1487 | * These two special cases are useful to be comfortable | |
1488 | * in the caller. Nothing more. We could take | |
1489 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1490 | * but I' d like to return a valid offset (>=0) to allow | |
1491 | * the caller to do everything it want with the retval. | |
1492 | */ | |
1493 | schedule(); | |
1494 | goto out; | |
1495 | default: | |
1496 | /* | |
1497 | * Another bit of PARANOID. Note that the retval will be | |
1498 | * 0 since no piece of kernel is supposed to do a check | |
1499 | * for a negative retval of schedule_timeout() (since it | |
1500 | * should never happens anyway). You just have the printk() | |
1501 | * that will tell you if something is gone wrong and where. | |
1502 | */ | |
5b149bcc | 1503 | if (timeout < 0) { |
1da177e4 | 1504 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1505 | "value %lx\n", timeout); |
1506 | dump_stack(); | |
1da177e4 LT |
1507 | current->state = TASK_RUNNING; |
1508 | goto out; | |
1509 | } | |
1510 | } | |
1511 | ||
1512 | expire = timeout + jiffies; | |
1513 | ||
c6f3a97f | 1514 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
597d0275 | 1515 | __mod_timer(&timer, expire, false, TIMER_NOT_PINNED); |
1da177e4 LT |
1516 | schedule(); |
1517 | del_singleshot_timer_sync(&timer); | |
1518 | ||
c6f3a97f TG |
1519 | /* Remove the timer from the object tracker */ |
1520 | destroy_timer_on_stack(&timer); | |
1521 | ||
1da177e4 LT |
1522 | timeout = expire - jiffies; |
1523 | ||
1524 | out: | |
1525 | return timeout < 0 ? 0 : timeout; | |
1526 | } | |
1da177e4 LT |
1527 | EXPORT_SYMBOL(schedule_timeout); |
1528 | ||
8a1c1757 AM |
1529 | /* |
1530 | * We can use __set_current_state() here because schedule_timeout() calls | |
1531 | * schedule() unconditionally. | |
1532 | */ | |
64ed93a2 NA |
1533 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1534 | { | |
a5a0d52c AM |
1535 | __set_current_state(TASK_INTERRUPTIBLE); |
1536 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1537 | } |
1538 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1539 | ||
294d5cc2 MW |
1540 | signed long __sched schedule_timeout_killable(signed long timeout) |
1541 | { | |
1542 | __set_current_state(TASK_KILLABLE); | |
1543 | return schedule_timeout(timeout); | |
1544 | } | |
1545 | EXPORT_SYMBOL(schedule_timeout_killable); | |
1546 | ||
64ed93a2 NA |
1547 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1548 | { | |
a5a0d52c AM |
1549 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1550 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1551 | } |
1552 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1553 | ||
1da177e4 | 1554 | /* Thread ID - the internal kernel "pid" */ |
58fd3aa2 | 1555 | SYSCALL_DEFINE0(gettid) |
1da177e4 | 1556 | { |
b488893a | 1557 | return task_pid_vnr(current); |
1da177e4 LT |
1558 | } |
1559 | ||
2aae4a10 | 1560 | /** |
d4d23add | 1561 | * do_sysinfo - fill in sysinfo struct |
2aae4a10 | 1562 | * @info: pointer to buffer to fill |
6819457d | 1563 | */ |
d4d23add | 1564 | int do_sysinfo(struct sysinfo *info) |
1da177e4 | 1565 | { |
1da177e4 LT |
1566 | unsigned long mem_total, sav_total; |
1567 | unsigned int mem_unit, bitcount; | |
2d02494f | 1568 | struct timespec tp; |
1da177e4 | 1569 | |
d4d23add | 1570 | memset(info, 0, sizeof(struct sysinfo)); |
1da177e4 | 1571 | |
2d02494f TG |
1572 | ktime_get_ts(&tp); |
1573 | monotonic_to_bootbased(&tp); | |
1574 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | |
1da177e4 | 1575 | |
2d02494f | 1576 | get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); |
1da177e4 | 1577 | |
2d02494f | 1578 | info->procs = nr_threads; |
1da177e4 | 1579 | |
d4d23add KM |
1580 | si_meminfo(info); |
1581 | si_swapinfo(info); | |
1da177e4 LT |
1582 | |
1583 | /* | |
1584 | * If the sum of all the available memory (i.e. ram + swap) | |
1585 | * is less than can be stored in a 32 bit unsigned long then | |
1586 | * we can be binary compatible with 2.2.x kernels. If not, | |
1587 | * well, in that case 2.2.x was broken anyways... | |
1588 | * | |
1589 | * -Erik Andersen <andersee@debian.org> | |
1590 | */ | |
1591 | ||
d4d23add KM |
1592 | mem_total = info->totalram + info->totalswap; |
1593 | if (mem_total < info->totalram || mem_total < info->totalswap) | |
1da177e4 LT |
1594 | goto out; |
1595 | bitcount = 0; | |
d4d23add | 1596 | mem_unit = info->mem_unit; |
1da177e4 LT |
1597 | while (mem_unit > 1) { |
1598 | bitcount++; | |
1599 | mem_unit >>= 1; | |
1600 | sav_total = mem_total; | |
1601 | mem_total <<= 1; | |
1602 | if (mem_total < sav_total) | |
1603 | goto out; | |
1604 | } | |
1605 | ||
1606 | /* | |
1607 | * If mem_total did not overflow, multiply all memory values by | |
d4d23add | 1608 | * info->mem_unit and set it to 1. This leaves things compatible |
1da177e4 LT |
1609 | * with 2.2.x, and also retains compatibility with earlier 2.4.x |
1610 | * kernels... | |
1611 | */ | |
1612 | ||
d4d23add KM |
1613 | info->mem_unit = 1; |
1614 | info->totalram <<= bitcount; | |
1615 | info->freeram <<= bitcount; | |
1616 | info->sharedram <<= bitcount; | |
1617 | info->bufferram <<= bitcount; | |
1618 | info->totalswap <<= bitcount; | |
1619 | info->freeswap <<= bitcount; | |
1620 | info->totalhigh <<= bitcount; | |
1621 | info->freehigh <<= bitcount; | |
1622 | ||
1623 | out: | |
1624 | return 0; | |
1625 | } | |
1626 | ||
1e7bfb21 | 1627 | SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) |
d4d23add KM |
1628 | { |
1629 | struct sysinfo val; | |
1630 | ||
1631 | do_sysinfo(&val); | |
1da177e4 | 1632 | |
1da177e4 LT |
1633 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) |
1634 | return -EFAULT; | |
1635 | ||
1636 | return 0; | |
1637 | } | |
1638 | ||
b4be6258 | 1639 | static int __cpuinit init_timers_cpu(int cpu) |
1da177e4 LT |
1640 | { |
1641 | int j; | |
a6fa8e5a | 1642 | struct tvec_base *base; |
b4be6258 | 1643 | static char __cpuinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1644 | |
ba6edfcd | 1645 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1646 | static char boot_done; |
1647 | ||
a4a6198b | 1648 | if (boot_done) { |
ba6edfcd AM |
1649 | /* |
1650 | * The APs use this path later in boot | |
1651 | */ | |
94f6030c CL |
1652 | base = kmalloc_node(sizeof(*base), |
1653 | GFP_KERNEL | __GFP_ZERO, | |
a4a6198b JB |
1654 | cpu_to_node(cpu)); |
1655 | if (!base) | |
1656 | return -ENOMEM; | |
6e453a67 VP |
1657 | |
1658 | /* Make sure that tvec_base is 2 byte aligned */ | |
1659 | if (tbase_get_deferrable(base)) { | |
1660 | WARN_ON(1); | |
1661 | kfree(base); | |
1662 | return -ENOMEM; | |
1663 | } | |
ba6edfcd | 1664 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1665 | } else { |
ba6edfcd AM |
1666 | /* |
1667 | * This is for the boot CPU - we use compile-time | |
1668 | * static initialisation because per-cpu memory isn't | |
1669 | * ready yet and because the memory allocators are not | |
1670 | * initialised either. | |
1671 | */ | |
a4a6198b | 1672 | boot_done = 1; |
ba6edfcd | 1673 | base = &boot_tvec_bases; |
a4a6198b | 1674 | } |
ba6edfcd AM |
1675 | tvec_base_done[cpu] = 1; |
1676 | } else { | |
1677 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1678 | } |
ba6edfcd | 1679 | |
3691c519 | 1680 | spin_lock_init(&base->lock); |
d730e882 | 1681 | |
1da177e4 LT |
1682 | for (j = 0; j < TVN_SIZE; j++) { |
1683 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1684 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1685 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1686 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1687 | } | |
1688 | for (j = 0; j < TVR_SIZE; j++) | |
1689 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1690 | ||
1691 | base->timer_jiffies = jiffies; | |
97fd9ed4 | 1692 | base->next_timer = base->timer_jiffies; |
99d5f3aa | 1693 | base->active_timers = 0; |
a4a6198b | 1694 | return 0; |
1da177e4 LT |
1695 | } |
1696 | ||
1697 | #ifdef CONFIG_HOTPLUG_CPU | |
a6fa8e5a | 1698 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) |
1da177e4 LT |
1699 | { |
1700 | struct timer_list *timer; | |
1701 | ||
1702 | while (!list_empty(head)) { | |
b5e61818 | 1703 | timer = list_first_entry(head, struct timer_list, entry); |
99d5f3aa | 1704 | /* We ignore the accounting on the dying cpu */ |
ec44bc7a | 1705 | detach_timer(timer, false); |
6e453a67 | 1706 | timer_set_base(timer, new_base); |
1da177e4 | 1707 | internal_add_timer(new_base, timer); |
1da177e4 | 1708 | } |
1da177e4 LT |
1709 | } |
1710 | ||
48ccf3da | 1711 | static void __cpuinit migrate_timers(int cpu) |
1da177e4 | 1712 | { |
a6fa8e5a PM |
1713 | struct tvec_base *old_base; |
1714 | struct tvec_base *new_base; | |
1da177e4 LT |
1715 | int i; |
1716 | ||
1717 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1718 | old_base = per_cpu(tvec_bases, cpu); |
1719 | new_base = get_cpu_var(tvec_bases); | |
d82f0b0f ON |
1720 | /* |
1721 | * The caller is globally serialized and nobody else | |
1722 | * takes two locks at once, deadlock is not possible. | |
1723 | */ | |
1724 | spin_lock_irq(&new_base->lock); | |
0d180406 | 1725 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
3691c519 ON |
1726 | |
1727 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1728 | |
1da177e4 | 1729 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1730 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1731 | for (i = 0; i < TVN_SIZE; i++) { | |
1732 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1733 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1734 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1735 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1736 | } | |
1737 | ||
0d180406 | 1738 | spin_unlock(&old_base->lock); |
d82f0b0f | 1739 | spin_unlock_irq(&new_base->lock); |
1da177e4 | 1740 | put_cpu_var(tvec_bases); |
1da177e4 LT |
1741 | } |
1742 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1743 | ||
8c78f307 | 1744 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1745 | unsigned long action, void *hcpu) |
1746 | { | |
1747 | long cpu = (long)hcpu; | |
80b5184c AM |
1748 | int err; |
1749 | ||
1da177e4 LT |
1750 | switch(action) { |
1751 | case CPU_UP_PREPARE: | |
8bb78442 | 1752 | case CPU_UP_PREPARE_FROZEN: |
80b5184c AM |
1753 | err = init_timers_cpu(cpu); |
1754 | if (err < 0) | |
1755 | return notifier_from_errno(err); | |
1da177e4 LT |
1756 | break; |
1757 | #ifdef CONFIG_HOTPLUG_CPU | |
1758 | case CPU_DEAD: | |
8bb78442 | 1759 | case CPU_DEAD_FROZEN: |
1da177e4 LT |
1760 | migrate_timers(cpu); |
1761 | break; | |
1762 | #endif | |
1763 | default: | |
1764 | break; | |
1765 | } | |
1766 | return NOTIFY_OK; | |
1767 | } | |
1768 | ||
8c78f307 | 1769 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1770 | .notifier_call = timer_cpu_notify, |
1771 | }; | |
1772 | ||
1773 | ||
1774 | void __init init_timers(void) | |
1775 | { | |
e52b1db3 TH |
1776 | int err; |
1777 | ||
1778 | /* ensure there are enough low bits for flags in timer->base pointer */ | |
1779 | BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); | |
07dccf33 | 1780 | |
e52b1db3 TH |
1781 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1782 | (void *)(long)smp_processor_id()); | |
82f67cd9 IM |
1783 | init_timer_stats(); |
1784 | ||
9e506f7a | 1785 | BUG_ON(err != NOTIFY_OK); |
1da177e4 | 1786 | register_cpu_notifier(&timers_nb); |
962cf36c | 1787 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1da177e4 LT |
1788 | } |
1789 | ||
1da177e4 LT |
1790 | /** |
1791 | * msleep - sleep safely even with waitqueue interruptions | |
1792 | * @msecs: Time in milliseconds to sleep for | |
1793 | */ | |
1794 | void msleep(unsigned int msecs) | |
1795 | { | |
1796 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1797 | ||
75bcc8c5 NA |
1798 | while (timeout) |
1799 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1800 | } |
1801 | ||
1802 | EXPORT_SYMBOL(msleep); | |
1803 | ||
1804 | /** | |
96ec3efd | 1805 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1806 | * @msecs: Time in milliseconds to sleep for |
1807 | */ | |
1808 | unsigned long msleep_interruptible(unsigned int msecs) | |
1809 | { | |
1810 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1811 | ||
75bcc8c5 NA |
1812 | while (timeout && !signal_pending(current)) |
1813 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1814 | return jiffies_to_msecs(timeout); |
1815 | } | |
1816 | ||
1817 | EXPORT_SYMBOL(msleep_interruptible); | |
5e7f5a17 PP |
1818 | |
1819 | static int __sched do_usleep_range(unsigned long min, unsigned long max) | |
1820 | { | |
1821 | ktime_t kmin; | |
1822 | unsigned long delta; | |
1823 | ||
1824 | kmin = ktime_set(0, min * NSEC_PER_USEC); | |
1825 | delta = (max - min) * NSEC_PER_USEC; | |
1826 | return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); | |
1827 | } | |
1828 | ||
1829 | /** | |
1830 | * usleep_range - Drop in replacement for udelay where wakeup is flexible | |
1831 | * @min: Minimum time in usecs to sleep | |
1832 | * @max: Maximum time in usecs to sleep | |
1833 | */ | |
1834 | void usleep_range(unsigned long min, unsigned long max) | |
1835 | { | |
1836 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
1837 | do_usleep_range(min, max); | |
1838 | } | |
1839 | EXPORT_SYMBOL(usleep_range); |