]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
8524070b | 4 | * Kernel internal timers, basic process system calls |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
97a41e26 | 36 | #include <linux/delay.h> |
79bf2bb3 | 37 | #include <linux/tick.h> |
82f67cd9 | 38 | #include <linux/kallsyms.h> |
1da177e4 LT |
39 | |
40 | #include <asm/uaccess.h> | |
41 | #include <asm/unistd.h> | |
42 | #include <asm/div64.h> | |
43 | #include <asm/timex.h> | |
44 | #include <asm/io.h> | |
45 | ||
ecea8d19 TG |
46 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
47 | ||
48 | EXPORT_SYMBOL(jiffies_64); | |
49 | ||
1da177e4 LT |
50 | /* |
51 | * per-CPU timer vector definitions: | |
52 | */ | |
1da177e4 LT |
53 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
54 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
55 | #define TVN_SIZE (1 << TVN_BITS) | |
56 | #define TVR_SIZE (1 << TVR_BITS) | |
57 | #define TVN_MASK (TVN_SIZE - 1) | |
58 | #define TVR_MASK (TVR_SIZE - 1) | |
59 | ||
60 | typedef struct tvec_s { | |
61 | struct list_head vec[TVN_SIZE]; | |
62 | } tvec_t; | |
63 | ||
64 | typedef struct tvec_root_s { | |
65 | struct list_head vec[TVR_SIZE]; | |
66 | } tvec_root_t; | |
67 | ||
68 | struct tvec_t_base_s { | |
3691c519 ON |
69 | spinlock_t lock; |
70 | struct timer_list *running_timer; | |
1da177e4 | 71 | unsigned long timer_jiffies; |
1da177e4 LT |
72 | tvec_root_t tv1; |
73 | tvec_t tv2; | |
74 | tvec_t tv3; | |
75 | tvec_t tv4; | |
76 | tvec_t tv5; | |
6e453a67 | 77 | } ____cacheline_aligned; |
1da177e4 LT |
78 | |
79 | typedef struct tvec_t_base_s tvec_base_t; | |
ba6edfcd | 80 | |
3691c519 ON |
81 | tvec_base_t boot_tvec_bases; |
82 | EXPORT_SYMBOL(boot_tvec_bases); | |
51d8c5ed | 83 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 84 | |
6e453a67 VP |
85 | /* |
86 | * Note that all tvec_bases is 2 byte aligned and lower bit of | |
87 | * base in timer_list is guaranteed to be zero. Use the LSB for | |
88 | * the new flag to indicate whether the timer is deferrable | |
89 | */ | |
90 | #define TBASE_DEFERRABLE_FLAG (0x1) | |
91 | ||
92 | /* Functions below help us manage 'deferrable' flag */ | |
93 | static inline unsigned int tbase_get_deferrable(tvec_base_t *base) | |
94 | { | |
e9910846 | 95 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); |
6e453a67 VP |
96 | } |
97 | ||
98 | static inline tvec_base_t *tbase_get_base(tvec_base_t *base) | |
99 | { | |
e9910846 | 100 | return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); |
6e453a67 VP |
101 | } |
102 | ||
103 | static inline void timer_set_deferrable(struct timer_list *timer) | |
104 | { | |
e9910846 | 105 | timer->base = ((tvec_base_t *)((unsigned long)(timer->base) | |
6819457d | 106 | TBASE_DEFERRABLE_FLAG)); |
6e453a67 VP |
107 | } |
108 | ||
109 | static inline void | |
110 | timer_set_base(struct timer_list *timer, tvec_base_t *new_base) | |
111 | { | |
e9910846 | 112 | timer->base = (tvec_base_t *)((unsigned long)(new_base) | |
6819457d | 113 | tbase_get_deferrable(timer->base)); |
6e453a67 VP |
114 | } |
115 | ||
4c36a5de AV |
116 | /** |
117 | * __round_jiffies - function to round jiffies to a full second | |
118 | * @j: the time in (absolute) jiffies that should be rounded | |
119 | * @cpu: the processor number on which the timeout will happen | |
120 | * | |
72fd4a35 | 121 | * __round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
122 | * up or down to (approximately) full seconds. This is useful for timers |
123 | * for which the exact time they fire does not matter too much, as long as | |
124 | * they fire approximately every X seconds. | |
125 | * | |
126 | * By rounding these timers to whole seconds, all such timers will fire | |
127 | * at the same time, rather than at various times spread out. The goal | |
128 | * of this is to have the CPU wake up less, which saves power. | |
129 | * | |
130 | * The exact rounding is skewed for each processor to avoid all | |
131 | * processors firing at the exact same time, which could lead | |
132 | * to lock contention or spurious cache line bouncing. | |
133 | * | |
72fd4a35 | 134 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
135 | */ |
136 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
137 | { | |
138 | int rem; | |
139 | unsigned long original = j; | |
140 | ||
141 | /* | |
142 | * We don't want all cpus firing their timers at once hitting the | |
143 | * same lock or cachelines, so we skew each extra cpu with an extra | |
144 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
145 | * already did this. | |
146 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
147 | * extra offset again. | |
148 | */ | |
149 | j += cpu * 3; | |
150 | ||
151 | rem = j % HZ; | |
152 | ||
153 | /* | |
154 | * If the target jiffie is just after a whole second (which can happen | |
155 | * due to delays of the timer irq, long irq off times etc etc) then | |
156 | * we should round down to the whole second, not up. Use 1/4th second | |
157 | * as cutoff for this rounding as an extreme upper bound for this. | |
158 | */ | |
159 | if (rem < HZ/4) /* round down */ | |
160 | j = j - rem; | |
161 | else /* round up */ | |
162 | j = j - rem + HZ; | |
163 | ||
164 | /* now that we have rounded, subtract the extra skew again */ | |
165 | j -= cpu * 3; | |
166 | ||
167 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | |
168 | return original; | |
169 | return j; | |
170 | } | |
171 | EXPORT_SYMBOL_GPL(__round_jiffies); | |
172 | ||
173 | /** | |
174 | * __round_jiffies_relative - function to round jiffies to a full second | |
175 | * @j: the time in (relative) jiffies that should be rounded | |
176 | * @cpu: the processor number on which the timeout will happen | |
177 | * | |
72fd4a35 | 178 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
179 | * up or down to (approximately) full seconds. This is useful for timers |
180 | * for which the exact time they fire does not matter too much, as long as | |
181 | * they fire approximately every X seconds. | |
182 | * | |
183 | * By rounding these timers to whole seconds, all such timers will fire | |
184 | * at the same time, rather than at various times spread out. The goal | |
185 | * of this is to have the CPU wake up less, which saves power. | |
186 | * | |
187 | * The exact rounding is skewed for each processor to avoid all | |
188 | * processors firing at the exact same time, which could lead | |
189 | * to lock contention or spurious cache line bouncing. | |
190 | * | |
72fd4a35 | 191 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
192 | */ |
193 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
194 | { | |
195 | /* | |
196 | * In theory the following code can skip a jiffy in case jiffies | |
197 | * increments right between the addition and the later subtraction. | |
198 | * However since the entire point of this function is to use approximate | |
199 | * timeouts, it's entirely ok to not handle that. | |
200 | */ | |
201 | return __round_jiffies(j + jiffies, cpu) - jiffies; | |
202 | } | |
203 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
204 | ||
205 | /** | |
206 | * round_jiffies - function to round jiffies to a full second | |
207 | * @j: the time in (absolute) jiffies that should be rounded | |
208 | * | |
72fd4a35 | 209 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
210 | * up or down to (approximately) full seconds. This is useful for timers |
211 | * for which the exact time they fire does not matter too much, as long as | |
212 | * they fire approximately every X seconds. | |
213 | * | |
214 | * By rounding these timers to whole seconds, all such timers will fire | |
215 | * at the same time, rather than at various times spread out. The goal | |
216 | * of this is to have the CPU wake up less, which saves power. | |
217 | * | |
72fd4a35 | 218 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
219 | */ |
220 | unsigned long round_jiffies(unsigned long j) | |
221 | { | |
222 | return __round_jiffies(j, raw_smp_processor_id()); | |
223 | } | |
224 | EXPORT_SYMBOL_GPL(round_jiffies); | |
225 | ||
226 | /** | |
227 | * round_jiffies_relative - function to round jiffies to a full second | |
228 | * @j: the time in (relative) jiffies that should be rounded | |
229 | * | |
72fd4a35 | 230 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
231 | * up or down to (approximately) full seconds. This is useful for timers |
232 | * for which the exact time they fire does not matter too much, as long as | |
233 | * they fire approximately every X seconds. | |
234 | * | |
235 | * By rounding these timers to whole seconds, all such timers will fire | |
236 | * at the same time, rather than at various times spread out. The goal | |
237 | * of this is to have the CPU wake up less, which saves power. | |
238 | * | |
72fd4a35 | 239 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
240 | */ |
241 | unsigned long round_jiffies_relative(unsigned long j) | |
242 | { | |
243 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
244 | } | |
245 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
246 | ||
247 | ||
1da177e4 LT |
248 | static inline void set_running_timer(tvec_base_t *base, |
249 | struct timer_list *timer) | |
250 | { | |
251 | #ifdef CONFIG_SMP | |
3691c519 | 252 | base->running_timer = timer; |
1da177e4 LT |
253 | #endif |
254 | } | |
255 | ||
1da177e4 LT |
256 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
257 | { | |
258 | unsigned long expires = timer->expires; | |
259 | unsigned long idx = expires - base->timer_jiffies; | |
260 | struct list_head *vec; | |
261 | ||
262 | if (idx < TVR_SIZE) { | |
263 | int i = expires & TVR_MASK; | |
264 | vec = base->tv1.vec + i; | |
265 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
266 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
267 | vec = base->tv2.vec + i; | |
268 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
269 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
270 | vec = base->tv3.vec + i; | |
271 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
272 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
273 | vec = base->tv4.vec + i; | |
274 | } else if ((signed long) idx < 0) { | |
275 | /* | |
276 | * Can happen if you add a timer with expires == jiffies, | |
277 | * or you set a timer to go off in the past | |
278 | */ | |
279 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
280 | } else { | |
281 | int i; | |
282 | /* If the timeout is larger than 0xffffffff on 64-bit | |
283 | * architectures then we use the maximum timeout: | |
284 | */ | |
285 | if (idx > 0xffffffffUL) { | |
286 | idx = 0xffffffffUL; | |
287 | expires = idx + base->timer_jiffies; | |
288 | } | |
289 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
290 | vec = base->tv5.vec + i; | |
291 | } | |
292 | /* | |
293 | * Timers are FIFO: | |
294 | */ | |
295 | list_add_tail(&timer->entry, vec); | |
296 | } | |
297 | ||
82f67cd9 IM |
298 | #ifdef CONFIG_TIMER_STATS |
299 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |
300 | { | |
301 | if (timer->start_site) | |
302 | return; | |
303 | ||
304 | timer->start_site = addr; | |
305 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
306 | timer->start_pid = current->pid; | |
307 | } | |
c5c061b8 VP |
308 | |
309 | static void timer_stats_account_timer(struct timer_list *timer) | |
310 | { | |
311 | unsigned int flag = 0; | |
312 | ||
313 | if (unlikely(tbase_get_deferrable(timer->base))) | |
314 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | |
315 | ||
316 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | |
317 | timer->function, timer->start_comm, flag); | |
318 | } | |
319 | ||
320 | #else | |
321 | static void timer_stats_account_timer(struct timer_list *timer) {} | |
82f67cd9 IM |
322 | #endif |
323 | ||
2aae4a10 | 324 | /** |
55c888d6 ON |
325 | * init_timer - initialize a timer. |
326 | * @timer: the timer to be initialized | |
327 | * | |
328 | * init_timer() must be done to a timer prior calling *any* of the | |
329 | * other timer functions. | |
330 | */ | |
331 | void fastcall init_timer(struct timer_list *timer) | |
332 | { | |
333 | timer->entry.next = NULL; | |
bfe5d834 | 334 | timer->base = __raw_get_cpu_var(tvec_bases); |
82f67cd9 IM |
335 | #ifdef CONFIG_TIMER_STATS |
336 | timer->start_site = NULL; | |
337 | timer->start_pid = -1; | |
338 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
339 | #endif | |
55c888d6 ON |
340 | } |
341 | EXPORT_SYMBOL(init_timer); | |
342 | ||
6e453a67 VP |
343 | void fastcall init_timer_deferrable(struct timer_list *timer) |
344 | { | |
345 | init_timer(timer); | |
346 | timer_set_deferrable(timer); | |
347 | } | |
348 | EXPORT_SYMBOL(init_timer_deferrable); | |
349 | ||
55c888d6 | 350 | static inline void detach_timer(struct timer_list *timer, |
82f67cd9 | 351 | int clear_pending) |
55c888d6 ON |
352 | { |
353 | struct list_head *entry = &timer->entry; | |
354 | ||
355 | __list_del(entry->prev, entry->next); | |
356 | if (clear_pending) | |
357 | entry->next = NULL; | |
358 | entry->prev = LIST_POISON2; | |
359 | } | |
360 | ||
361 | /* | |
3691c519 | 362 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
363 | * means that all timers which are tied to this base via timer->base are |
364 | * locked, and the base itself is locked too. | |
365 | * | |
366 | * So __run_timers/migrate_timers can safely modify all timers which could | |
367 | * be found on ->tvX lists. | |
368 | * | |
369 | * When the timer's base is locked, and the timer removed from list, it is | |
370 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
371 | * locked. | |
372 | */ | |
3691c519 | 373 | static tvec_base_t *lock_timer_base(struct timer_list *timer, |
55c888d6 | 374 | unsigned long *flags) |
89e7e374 | 375 | __acquires(timer->base->lock) |
55c888d6 | 376 | { |
3691c519 | 377 | tvec_base_t *base; |
55c888d6 ON |
378 | |
379 | for (;;) { | |
6e453a67 VP |
380 | tvec_base_t *prelock_base = timer->base; |
381 | base = tbase_get_base(prelock_base); | |
55c888d6 ON |
382 | if (likely(base != NULL)) { |
383 | spin_lock_irqsave(&base->lock, *flags); | |
6e453a67 | 384 | if (likely(prelock_base == timer->base)) |
55c888d6 ON |
385 | return base; |
386 | /* The timer has migrated to another CPU */ | |
387 | spin_unlock_irqrestore(&base->lock, *flags); | |
388 | } | |
389 | cpu_relax(); | |
390 | } | |
391 | } | |
392 | ||
1da177e4 LT |
393 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
394 | { | |
3691c519 | 395 | tvec_base_t *base, *new_base; |
1da177e4 LT |
396 | unsigned long flags; |
397 | int ret = 0; | |
398 | ||
82f67cd9 | 399 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 400 | BUG_ON(!timer->function); |
1da177e4 | 401 | |
55c888d6 ON |
402 | base = lock_timer_base(timer, &flags); |
403 | ||
404 | if (timer_pending(timer)) { | |
405 | detach_timer(timer, 0); | |
406 | ret = 1; | |
407 | } | |
408 | ||
a4a6198b | 409 | new_base = __get_cpu_var(tvec_bases); |
1da177e4 | 410 | |
3691c519 | 411 | if (base != new_base) { |
1da177e4 | 412 | /* |
55c888d6 ON |
413 | * We are trying to schedule the timer on the local CPU. |
414 | * However we can't change timer's base while it is running, | |
415 | * otherwise del_timer_sync() can't detect that the timer's | |
416 | * handler yet has not finished. This also guarantees that | |
417 | * the timer is serialized wrt itself. | |
1da177e4 | 418 | */ |
a2c348fe | 419 | if (likely(base->running_timer != timer)) { |
55c888d6 | 420 | /* See the comment in lock_timer_base() */ |
6e453a67 | 421 | timer_set_base(timer, NULL); |
55c888d6 | 422 | spin_unlock(&base->lock); |
a2c348fe ON |
423 | base = new_base; |
424 | spin_lock(&base->lock); | |
6e453a67 | 425 | timer_set_base(timer, base); |
1da177e4 LT |
426 | } |
427 | } | |
428 | ||
1da177e4 | 429 | timer->expires = expires; |
a2c348fe ON |
430 | internal_add_timer(base, timer); |
431 | spin_unlock_irqrestore(&base->lock, flags); | |
1da177e4 LT |
432 | |
433 | return ret; | |
434 | } | |
435 | ||
436 | EXPORT_SYMBOL(__mod_timer); | |
437 | ||
2aae4a10 | 438 | /** |
1da177e4 LT |
439 | * add_timer_on - start a timer on a particular CPU |
440 | * @timer: the timer to be added | |
441 | * @cpu: the CPU to start it on | |
442 | * | |
443 | * This is not very scalable on SMP. Double adds are not possible. | |
444 | */ | |
445 | void add_timer_on(struct timer_list *timer, int cpu) | |
446 | { | |
a4a6198b | 447 | tvec_base_t *base = per_cpu(tvec_bases, cpu); |
6819457d | 448 | unsigned long flags; |
55c888d6 | 449 | |
82f67cd9 | 450 | timer_stats_timer_set_start_info(timer); |
6819457d | 451 | BUG_ON(timer_pending(timer) || !timer->function); |
3691c519 | 452 | spin_lock_irqsave(&base->lock, flags); |
6e453a67 | 453 | timer_set_base(timer, base); |
1da177e4 | 454 | internal_add_timer(base, timer); |
3691c519 | 455 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
456 | } |
457 | ||
458 | ||
2aae4a10 | 459 | /** |
1da177e4 LT |
460 | * mod_timer - modify a timer's timeout |
461 | * @timer: the timer to be modified | |
2aae4a10 | 462 | * @expires: new timeout in jiffies |
1da177e4 | 463 | * |
72fd4a35 | 464 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
465 | * active timer (if the timer is inactive it will be activated) |
466 | * | |
467 | * mod_timer(timer, expires) is equivalent to: | |
468 | * | |
469 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
470 | * | |
471 | * Note that if there are multiple unserialized concurrent users of the | |
472 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
473 | * since add_timer() cannot modify an already running timer. | |
474 | * | |
475 | * The function returns whether it has modified a pending timer or not. | |
476 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
477 | * active timer returns 1.) | |
478 | */ | |
479 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
480 | { | |
481 | BUG_ON(!timer->function); | |
482 | ||
82f67cd9 | 483 | timer_stats_timer_set_start_info(timer); |
1da177e4 LT |
484 | /* |
485 | * This is a common optimization triggered by the | |
486 | * networking code - if the timer is re-modified | |
487 | * to be the same thing then just return: | |
488 | */ | |
489 | if (timer->expires == expires && timer_pending(timer)) | |
490 | return 1; | |
491 | ||
492 | return __mod_timer(timer, expires); | |
493 | } | |
494 | ||
495 | EXPORT_SYMBOL(mod_timer); | |
496 | ||
2aae4a10 | 497 | /** |
1da177e4 LT |
498 | * del_timer - deactive a timer. |
499 | * @timer: the timer to be deactivated | |
500 | * | |
501 | * del_timer() deactivates a timer - this works on both active and inactive | |
502 | * timers. | |
503 | * | |
504 | * The function returns whether it has deactivated a pending timer or not. | |
505 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
506 | * active timer returns 1.) | |
507 | */ | |
508 | int del_timer(struct timer_list *timer) | |
509 | { | |
3691c519 | 510 | tvec_base_t *base; |
1da177e4 | 511 | unsigned long flags; |
55c888d6 | 512 | int ret = 0; |
1da177e4 | 513 | |
82f67cd9 | 514 | timer_stats_timer_clear_start_info(timer); |
55c888d6 ON |
515 | if (timer_pending(timer)) { |
516 | base = lock_timer_base(timer, &flags); | |
517 | if (timer_pending(timer)) { | |
518 | detach_timer(timer, 1); | |
519 | ret = 1; | |
520 | } | |
1da177e4 | 521 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 522 | } |
1da177e4 | 523 | |
55c888d6 | 524 | return ret; |
1da177e4 LT |
525 | } |
526 | ||
527 | EXPORT_SYMBOL(del_timer); | |
528 | ||
529 | #ifdef CONFIG_SMP | |
2aae4a10 REB |
530 | /** |
531 | * try_to_del_timer_sync - Try to deactivate a timer | |
532 | * @timer: timer do del | |
533 | * | |
fd450b73 ON |
534 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
535 | * exit the timer is not queued and the handler is not running on any CPU. | |
536 | * | |
537 | * It must not be called from interrupt contexts. | |
538 | */ | |
539 | int try_to_del_timer_sync(struct timer_list *timer) | |
540 | { | |
3691c519 | 541 | tvec_base_t *base; |
fd450b73 ON |
542 | unsigned long flags; |
543 | int ret = -1; | |
544 | ||
545 | base = lock_timer_base(timer, &flags); | |
546 | ||
547 | if (base->running_timer == timer) | |
548 | goto out; | |
549 | ||
550 | ret = 0; | |
551 | if (timer_pending(timer)) { | |
552 | detach_timer(timer, 1); | |
553 | ret = 1; | |
554 | } | |
555 | out: | |
556 | spin_unlock_irqrestore(&base->lock, flags); | |
557 | ||
558 | return ret; | |
559 | } | |
560 | ||
e19dff1f DH |
561 | EXPORT_SYMBOL(try_to_del_timer_sync); |
562 | ||
2aae4a10 | 563 | /** |
1da177e4 LT |
564 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
565 | * @timer: the timer to be deactivated | |
566 | * | |
567 | * This function only differs from del_timer() on SMP: besides deactivating | |
568 | * the timer it also makes sure the handler has finished executing on other | |
569 | * CPUs. | |
570 | * | |
72fd4a35 | 571 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 LT |
572 | * otherwise this function is meaningless. It must not be called from |
573 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
574 | * completion of the timer's handler. The timer's handler must not call |
575 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
576 | * not running on any CPU. | |
1da177e4 LT |
577 | * |
578 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
579 | */ |
580 | int del_timer_sync(struct timer_list *timer) | |
581 | { | |
fd450b73 ON |
582 | for (;;) { |
583 | int ret = try_to_del_timer_sync(timer); | |
584 | if (ret >= 0) | |
585 | return ret; | |
a0009652 | 586 | cpu_relax(); |
fd450b73 | 587 | } |
1da177e4 | 588 | } |
1da177e4 | 589 | |
55c888d6 | 590 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
591 | #endif |
592 | ||
593 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
594 | { | |
595 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
596 | struct timer_list *timer, *tmp; |
597 | struct list_head tv_list; | |
598 | ||
599 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 600 | |
1da177e4 | 601 | /* |
3439dd86 P |
602 | * We are removing _all_ timers from the list, so we |
603 | * don't have to detach them individually. | |
1da177e4 | 604 | */ |
3439dd86 | 605 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
6e453a67 | 606 | BUG_ON(tbase_get_base(timer->base) != base); |
3439dd86 | 607 | internal_add_timer(base, timer); |
1da177e4 | 608 | } |
1da177e4 LT |
609 | |
610 | return index; | |
611 | } | |
612 | ||
2aae4a10 REB |
613 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
614 | ||
615 | /** | |
1da177e4 LT |
616 | * __run_timers - run all expired timers (if any) on this CPU. |
617 | * @base: the timer vector to be processed. | |
618 | * | |
619 | * This function cascades all vectors and executes all expired timer | |
620 | * vectors. | |
621 | */ | |
1da177e4 LT |
622 | static inline void __run_timers(tvec_base_t *base) |
623 | { | |
624 | struct timer_list *timer; | |
625 | ||
3691c519 | 626 | spin_lock_irq(&base->lock); |
1da177e4 | 627 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 628 | struct list_head work_list; |
1da177e4 | 629 | struct list_head *head = &work_list; |
6819457d | 630 | int index = base->timer_jiffies & TVR_MASK; |
626ab0e6 | 631 | |
1da177e4 LT |
632 | /* |
633 | * Cascade timers: | |
634 | */ | |
635 | if (!index && | |
636 | (!cascade(base, &base->tv2, INDEX(0))) && | |
637 | (!cascade(base, &base->tv3, INDEX(1))) && | |
638 | !cascade(base, &base->tv4, INDEX(2))) | |
639 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
640 | ++base->timer_jiffies; |
641 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 642 | while (!list_empty(head)) { |
1da177e4 LT |
643 | void (*fn)(unsigned long); |
644 | unsigned long data; | |
645 | ||
b5e61818 | 646 | timer = list_first_entry(head, struct timer_list,entry); |
6819457d TG |
647 | fn = timer->function; |
648 | data = timer->data; | |
1da177e4 | 649 | |
82f67cd9 IM |
650 | timer_stats_account_timer(timer); |
651 | ||
1da177e4 | 652 | set_running_timer(base, timer); |
55c888d6 | 653 | detach_timer(timer, 1); |
3691c519 | 654 | spin_unlock_irq(&base->lock); |
1da177e4 | 655 | { |
be5b4fbd | 656 | int preempt_count = preempt_count(); |
1da177e4 LT |
657 | fn(data); |
658 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
659 | printk(KERN_WARNING "huh, entered %p " |
660 | "with preempt_count %08x, exited" | |
661 | " with %08x?\n", | |
662 | fn, preempt_count, | |
663 | preempt_count()); | |
1da177e4 LT |
664 | BUG(); |
665 | } | |
666 | } | |
3691c519 | 667 | spin_lock_irq(&base->lock); |
1da177e4 LT |
668 | } |
669 | } | |
670 | set_running_timer(base, NULL); | |
3691c519 | 671 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
672 | } |
673 | ||
fd064b9b | 674 | #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ) |
1da177e4 LT |
675 | /* |
676 | * Find out when the next timer event is due to happen. This | |
677 | * is used on S/390 to stop all activity when a cpus is idle. | |
678 | * This functions needs to be called disabled. | |
679 | */ | |
1cfd6849 | 680 | static unsigned long __next_timer_interrupt(tvec_base_t *base) |
1da177e4 | 681 | { |
1cfd6849 | 682 | unsigned long timer_jiffies = base->timer_jiffies; |
eaad084b | 683 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 684 | int index, slot, array, found = 0; |
1da177e4 | 685 | struct timer_list *nte; |
1da177e4 | 686 | tvec_t *varray[4]; |
1da177e4 LT |
687 | |
688 | /* Look for timer events in tv1. */ | |
1cfd6849 | 689 | index = slot = timer_jiffies & TVR_MASK; |
1da177e4 | 690 | do { |
1cfd6849 | 691 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
6819457d TG |
692 | if (tbase_get_deferrable(nte->base)) |
693 | continue; | |
6e453a67 | 694 | |
1cfd6849 | 695 | found = 1; |
1da177e4 | 696 | expires = nte->expires; |
1cfd6849 TG |
697 | /* Look at the cascade bucket(s)? */ |
698 | if (!index || slot < index) | |
699 | goto cascade; | |
700 | return expires; | |
1da177e4 | 701 | } |
1cfd6849 TG |
702 | slot = (slot + 1) & TVR_MASK; |
703 | } while (slot != index); | |
704 | ||
705 | cascade: | |
706 | /* Calculate the next cascade event */ | |
707 | if (index) | |
708 | timer_jiffies += TVR_SIZE - index; | |
709 | timer_jiffies >>= TVR_BITS; | |
1da177e4 LT |
710 | |
711 | /* Check tv2-tv5. */ | |
712 | varray[0] = &base->tv2; | |
713 | varray[1] = &base->tv3; | |
714 | varray[2] = &base->tv4; | |
715 | varray[3] = &base->tv5; | |
1cfd6849 TG |
716 | |
717 | for (array = 0; array < 4; array++) { | |
718 | tvec_t *varp = varray[array]; | |
719 | ||
720 | index = slot = timer_jiffies & TVN_MASK; | |
1da177e4 | 721 | do { |
1cfd6849 TG |
722 | list_for_each_entry(nte, varp->vec + slot, entry) { |
723 | found = 1; | |
1da177e4 LT |
724 | if (time_before(nte->expires, expires)) |
725 | expires = nte->expires; | |
1cfd6849 TG |
726 | } |
727 | /* | |
728 | * Do we still search for the first timer or are | |
729 | * we looking up the cascade buckets ? | |
730 | */ | |
731 | if (found) { | |
732 | /* Look at the cascade bucket(s)? */ | |
733 | if (!index || slot < index) | |
734 | break; | |
735 | return expires; | |
736 | } | |
737 | slot = (slot + 1) & TVN_MASK; | |
738 | } while (slot != index); | |
739 | ||
740 | if (index) | |
741 | timer_jiffies += TVN_SIZE - index; | |
742 | timer_jiffies >>= TVN_BITS; | |
1da177e4 | 743 | } |
1cfd6849 TG |
744 | return expires; |
745 | } | |
69239749 | 746 | |
1cfd6849 TG |
747 | /* |
748 | * Check, if the next hrtimer event is before the next timer wheel | |
749 | * event: | |
750 | */ | |
751 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | |
752 | unsigned long expires) | |
753 | { | |
754 | ktime_t hr_delta = hrtimer_get_next_event(); | |
755 | struct timespec tsdelta; | |
9501b6cf | 756 | unsigned long delta; |
1cfd6849 TG |
757 | |
758 | if (hr_delta.tv64 == KTIME_MAX) | |
759 | return expires; | |
0662b713 | 760 | |
9501b6cf TG |
761 | /* |
762 | * Expired timer available, let it expire in the next tick | |
763 | */ | |
764 | if (hr_delta.tv64 <= 0) | |
765 | return now + 1; | |
69239749 | 766 | |
1cfd6849 | 767 | tsdelta = ktime_to_timespec(hr_delta); |
9501b6cf | 768 | delta = timespec_to_jiffies(&tsdelta); |
eaad084b TG |
769 | |
770 | /* | |
771 | * Limit the delta to the max value, which is checked in | |
772 | * tick_nohz_stop_sched_tick(): | |
773 | */ | |
774 | if (delta > NEXT_TIMER_MAX_DELTA) | |
775 | delta = NEXT_TIMER_MAX_DELTA; | |
776 | ||
9501b6cf TG |
777 | /* |
778 | * Take rounding errors in to account and make sure, that it | |
779 | * expires in the next tick. Otherwise we go into an endless | |
780 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | |
781 | * the timer softirq | |
782 | */ | |
783 | if (delta < 1) | |
784 | delta = 1; | |
785 | now += delta; | |
1cfd6849 TG |
786 | if (time_before(now, expires)) |
787 | return now; | |
1da177e4 LT |
788 | return expires; |
789 | } | |
1cfd6849 TG |
790 | |
791 | /** | |
792 | * next_timer_interrupt - return the jiffy of the next pending timer | |
05fb6bf0 | 793 | * @now: current time (in jiffies) |
1cfd6849 | 794 | */ |
fd064b9b | 795 | unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd6849 TG |
796 | { |
797 | tvec_base_t *base = __get_cpu_var(tvec_bases); | |
fd064b9b | 798 | unsigned long expires; |
1cfd6849 TG |
799 | |
800 | spin_lock(&base->lock); | |
801 | expires = __next_timer_interrupt(base); | |
802 | spin_unlock(&base->lock); | |
803 | ||
804 | if (time_before_eq(expires, now)) | |
805 | return now; | |
806 | ||
807 | return cmp_next_hrtimer_event(now, expires); | |
808 | } | |
fd064b9b TG |
809 | |
810 | #ifdef CONFIG_NO_IDLE_HZ | |
811 | unsigned long next_timer_interrupt(void) | |
812 | { | |
813 | return get_next_timer_interrupt(jiffies); | |
814 | } | |
815 | #endif | |
816 | ||
1da177e4 LT |
817 | #endif |
818 | ||
1da177e4 LT |
819 | /* |
820 | * Called from the timer interrupt handler to charge one tick to the current | |
821 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
822 | */ | |
823 | void update_process_times(int user_tick) | |
824 | { | |
825 | struct task_struct *p = current; | |
826 | int cpu = smp_processor_id(); | |
827 | ||
828 | /* Note: this timer irq context must be accounted for as well. */ | |
829 | if (user_tick) | |
830 | account_user_time(p, jiffies_to_cputime(1)); | |
831 | else | |
832 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
833 | run_local_timers(); | |
834 | if (rcu_pending(cpu)) | |
835 | rcu_check_callbacks(cpu, user_tick); | |
836 | scheduler_tick(); | |
6819457d | 837 | run_posix_cpu_timers(p); |
1da177e4 LT |
838 | } |
839 | ||
840 | /* | |
841 | * Nr of active tasks - counted in fixed-point numbers | |
842 | */ | |
843 | static unsigned long count_active_tasks(void) | |
844 | { | |
db1b1fef | 845 | return nr_active() * FIXED_1; |
1da177e4 LT |
846 | } |
847 | ||
848 | /* | |
849 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
850 | * imply that avenrun[] is the standard name for this kind of thing. | |
851 | * Nothing else seems to be standardized: the fractional size etc | |
852 | * all seem to differ on different machines. | |
853 | * | |
854 | * Requires xtime_lock to access. | |
855 | */ | |
856 | unsigned long avenrun[3]; | |
857 | ||
858 | EXPORT_SYMBOL(avenrun); | |
859 | ||
860 | /* | |
861 | * calc_load - given tick count, update the avenrun load estimates. | |
862 | * This is called while holding a write_lock on xtime_lock. | |
863 | */ | |
864 | static inline void calc_load(unsigned long ticks) | |
865 | { | |
866 | unsigned long active_tasks; /* fixed-point */ | |
867 | static int count = LOAD_FREQ; | |
868 | ||
cd7175ed ED |
869 | count -= ticks; |
870 | if (unlikely(count < 0)) { | |
871 | active_tasks = count_active_tasks(); | |
872 | do { | |
873 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
874 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
875 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
876 | count += LOAD_FREQ; | |
877 | } while (count < 0); | |
1da177e4 LT |
878 | } |
879 | } | |
880 | ||
1da177e4 LT |
881 | /* |
882 | * This function runs timers and the timer-tq in bottom half context. | |
883 | */ | |
884 | static void run_timer_softirq(struct softirq_action *h) | |
885 | { | |
a4a6198b | 886 | tvec_base_t *base = __get_cpu_var(tvec_bases); |
1da177e4 | 887 | |
82f67cd9 IM |
888 | hrtimer_run_queues(); |
889 | ||
1da177e4 LT |
890 | if (time_after_eq(jiffies, base->timer_jiffies)) |
891 | __run_timers(base); | |
892 | } | |
893 | ||
894 | /* | |
895 | * Called by the local, per-CPU timer interrupt on SMP. | |
896 | */ | |
897 | void run_local_timers(void) | |
898 | { | |
899 | raise_softirq(TIMER_SOFTIRQ); | |
6687a97d | 900 | softlockup_tick(); |
1da177e4 LT |
901 | } |
902 | ||
903 | /* | |
904 | * Called by the timer interrupt. xtime_lock must already be taken | |
905 | * by the timer IRQ! | |
906 | */ | |
3171a030 | 907 | static inline void update_times(unsigned long ticks) |
1da177e4 | 908 | { |
ad596171 | 909 | update_wall_time(); |
1da177e4 LT |
910 | calc_load(ticks); |
911 | } | |
6819457d | 912 | |
1da177e4 LT |
913 | /* |
914 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
915 | * without sampling the sequence number in xtime_lock. | |
916 | * jiffies is defined in the linker script... | |
917 | */ | |
918 | ||
3171a030 | 919 | void do_timer(unsigned long ticks) |
1da177e4 | 920 | { |
3171a030 AN |
921 | jiffies_64 += ticks; |
922 | update_times(ticks); | |
1da177e4 LT |
923 | } |
924 | ||
925 | #ifdef __ARCH_WANT_SYS_ALARM | |
926 | ||
927 | /* | |
928 | * For backwards compatibility? This can be done in libc so Alpha | |
929 | * and all newer ports shouldn't need it. | |
930 | */ | |
931 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
932 | { | |
c08b8a49 | 933 | return alarm_setitimer(seconds); |
1da177e4 LT |
934 | } |
935 | ||
936 | #endif | |
937 | ||
938 | #ifndef __alpha__ | |
939 | ||
940 | /* | |
941 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
942 | * should be moved into arch/i386 instead? | |
943 | */ | |
944 | ||
945 | /** | |
946 | * sys_getpid - return the thread group id of the current process | |
947 | * | |
948 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
949 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
950 | * which case the tgid is the same in all threads of the same group. | |
951 | * | |
952 | * This is SMP safe as current->tgid does not change. | |
953 | */ | |
954 | asmlinkage long sys_getpid(void) | |
955 | { | |
956 | return current->tgid; | |
957 | } | |
958 | ||
959 | /* | |
6997a6fa KK |
960 | * Accessing ->real_parent is not SMP-safe, it could |
961 | * change from under us. However, we can use a stale | |
962 | * value of ->real_parent under rcu_read_lock(), see | |
963 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 LT |
964 | */ |
965 | asmlinkage long sys_getppid(void) | |
966 | { | |
967 | int pid; | |
1da177e4 | 968 | |
6997a6fa KK |
969 | rcu_read_lock(); |
970 | pid = rcu_dereference(current->real_parent)->tgid; | |
971 | rcu_read_unlock(); | |
1da177e4 | 972 | |
1da177e4 LT |
973 | return pid; |
974 | } | |
975 | ||
976 | asmlinkage long sys_getuid(void) | |
977 | { | |
978 | /* Only we change this so SMP safe */ | |
979 | return current->uid; | |
980 | } | |
981 | ||
982 | asmlinkage long sys_geteuid(void) | |
983 | { | |
984 | /* Only we change this so SMP safe */ | |
985 | return current->euid; | |
986 | } | |
987 | ||
988 | asmlinkage long sys_getgid(void) | |
989 | { | |
990 | /* Only we change this so SMP safe */ | |
991 | return current->gid; | |
992 | } | |
993 | ||
994 | asmlinkage long sys_getegid(void) | |
995 | { | |
996 | /* Only we change this so SMP safe */ | |
997 | return current->egid; | |
998 | } | |
999 | ||
1000 | #endif | |
1001 | ||
1002 | static void process_timeout(unsigned long __data) | |
1003 | { | |
36c8b586 | 1004 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1005 | } |
1006 | ||
1007 | /** | |
1008 | * schedule_timeout - sleep until timeout | |
1009 | * @timeout: timeout value in jiffies | |
1010 | * | |
1011 | * Make the current task sleep until @timeout jiffies have | |
1012 | * elapsed. The routine will return immediately unless | |
1013 | * the current task state has been set (see set_current_state()). | |
1014 | * | |
1015 | * You can set the task state as follows - | |
1016 | * | |
1017 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1018 | * pass before the routine returns. The routine will return 0 | |
1019 | * | |
1020 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1021 | * delivered to the current task. In this case the remaining time | |
1022 | * in jiffies will be returned, or 0 if the timer expired in time | |
1023 | * | |
1024 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1025 | * routine returns. | |
1026 | * | |
1027 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1028 | * the CPU away without a bound on the timeout. In this case the return | |
1029 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1030 | * | |
1031 | * In all cases the return value is guaranteed to be non-negative. | |
1032 | */ | |
1033 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1034 | { | |
1035 | struct timer_list timer; | |
1036 | unsigned long expire; | |
1037 | ||
1038 | switch (timeout) | |
1039 | { | |
1040 | case MAX_SCHEDULE_TIMEOUT: | |
1041 | /* | |
1042 | * These two special cases are useful to be comfortable | |
1043 | * in the caller. Nothing more. We could take | |
1044 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1045 | * but I' d like to return a valid offset (>=0) to allow | |
1046 | * the caller to do everything it want with the retval. | |
1047 | */ | |
1048 | schedule(); | |
1049 | goto out; | |
1050 | default: | |
1051 | /* | |
1052 | * Another bit of PARANOID. Note that the retval will be | |
1053 | * 0 since no piece of kernel is supposed to do a check | |
1054 | * for a negative retval of schedule_timeout() (since it | |
1055 | * should never happens anyway). You just have the printk() | |
1056 | * that will tell you if something is gone wrong and where. | |
1057 | */ | |
5b149bcc | 1058 | if (timeout < 0) { |
1da177e4 | 1059 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1060 | "value %lx\n", timeout); |
1061 | dump_stack(); | |
1da177e4 LT |
1062 | current->state = TASK_RUNNING; |
1063 | goto out; | |
1064 | } | |
1065 | } | |
1066 | ||
1067 | expire = timeout + jiffies; | |
1068 | ||
a8db2db1 ON |
1069 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1070 | __mod_timer(&timer, expire); | |
1da177e4 LT |
1071 | schedule(); |
1072 | del_singleshot_timer_sync(&timer); | |
1073 | ||
1074 | timeout = expire - jiffies; | |
1075 | ||
1076 | out: | |
1077 | return timeout < 0 ? 0 : timeout; | |
1078 | } | |
1da177e4 LT |
1079 | EXPORT_SYMBOL(schedule_timeout); |
1080 | ||
8a1c1757 AM |
1081 | /* |
1082 | * We can use __set_current_state() here because schedule_timeout() calls | |
1083 | * schedule() unconditionally. | |
1084 | */ | |
64ed93a2 NA |
1085 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1086 | { | |
a5a0d52c AM |
1087 | __set_current_state(TASK_INTERRUPTIBLE); |
1088 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1089 | } |
1090 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1091 | ||
1092 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |
1093 | { | |
a5a0d52c AM |
1094 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1095 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1096 | } |
1097 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1098 | ||
1da177e4 LT |
1099 | /* Thread ID - the internal kernel "pid" */ |
1100 | asmlinkage long sys_gettid(void) | |
1101 | { | |
1102 | return current->pid; | |
1103 | } | |
1104 | ||
2aae4a10 | 1105 | /** |
d4d23add | 1106 | * do_sysinfo - fill in sysinfo struct |
2aae4a10 | 1107 | * @info: pointer to buffer to fill |
6819457d | 1108 | */ |
d4d23add | 1109 | int do_sysinfo(struct sysinfo *info) |
1da177e4 | 1110 | { |
1da177e4 LT |
1111 | unsigned long mem_total, sav_total; |
1112 | unsigned int mem_unit, bitcount; | |
1113 | unsigned long seq; | |
1114 | ||
d4d23add | 1115 | memset(info, 0, sizeof(struct sysinfo)); |
1da177e4 LT |
1116 | |
1117 | do { | |
1118 | struct timespec tp; | |
1119 | seq = read_seqbegin(&xtime_lock); | |
1120 | ||
1121 | /* | |
1122 | * This is annoying. The below is the same thing | |
1123 | * posix_get_clock_monotonic() does, but it wants to | |
1124 | * take the lock which we want to cover the loads stuff | |
1125 | * too. | |
1126 | */ | |
1127 | ||
1128 | getnstimeofday(&tp); | |
1129 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1130 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
d6214141 | 1131 | monotonic_to_bootbased(&tp); |
1da177e4 LT |
1132 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { |
1133 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1134 | tp.tv_sec++; | |
1135 | } | |
d4d23add | 1136 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
1da177e4 | 1137 | |
d4d23add KM |
1138 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); |
1139 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1140 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1da177e4 | 1141 | |
d4d23add | 1142 | info->procs = nr_threads; |
1da177e4 LT |
1143 | } while (read_seqretry(&xtime_lock, seq)); |
1144 | ||
d4d23add KM |
1145 | si_meminfo(info); |
1146 | si_swapinfo(info); | |
1da177e4 LT |
1147 | |
1148 | /* | |
1149 | * If the sum of all the available memory (i.e. ram + swap) | |
1150 | * is less than can be stored in a 32 bit unsigned long then | |
1151 | * we can be binary compatible with 2.2.x kernels. If not, | |
1152 | * well, in that case 2.2.x was broken anyways... | |
1153 | * | |
1154 | * -Erik Andersen <andersee@debian.org> | |
1155 | */ | |
1156 | ||
d4d23add KM |
1157 | mem_total = info->totalram + info->totalswap; |
1158 | if (mem_total < info->totalram || mem_total < info->totalswap) | |
1da177e4 LT |
1159 | goto out; |
1160 | bitcount = 0; | |
d4d23add | 1161 | mem_unit = info->mem_unit; |
1da177e4 LT |
1162 | while (mem_unit > 1) { |
1163 | bitcount++; | |
1164 | mem_unit >>= 1; | |
1165 | sav_total = mem_total; | |
1166 | mem_total <<= 1; | |
1167 | if (mem_total < sav_total) | |
1168 | goto out; | |
1169 | } | |
1170 | ||
1171 | /* | |
1172 | * If mem_total did not overflow, multiply all memory values by | |
d4d23add | 1173 | * info->mem_unit and set it to 1. This leaves things compatible |
1da177e4 LT |
1174 | * with 2.2.x, and also retains compatibility with earlier 2.4.x |
1175 | * kernels... | |
1176 | */ | |
1177 | ||
d4d23add KM |
1178 | info->mem_unit = 1; |
1179 | info->totalram <<= bitcount; | |
1180 | info->freeram <<= bitcount; | |
1181 | info->sharedram <<= bitcount; | |
1182 | info->bufferram <<= bitcount; | |
1183 | info->totalswap <<= bitcount; | |
1184 | info->freeswap <<= bitcount; | |
1185 | info->totalhigh <<= bitcount; | |
1186 | info->freehigh <<= bitcount; | |
1187 | ||
1188 | out: | |
1189 | return 0; | |
1190 | } | |
1191 | ||
1192 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1193 | { | |
1194 | struct sysinfo val; | |
1195 | ||
1196 | do_sysinfo(&val); | |
1da177e4 | 1197 | |
1da177e4 LT |
1198 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) |
1199 | return -EFAULT; | |
1200 | ||
1201 | return 0; | |
1202 | } | |
1203 | ||
d730e882 IM |
1204 | /* |
1205 | * lockdep: we want to track each per-CPU base as a separate lock-class, | |
1206 | * but timer-bases are kmalloc()-ed, so we need to attach separate | |
1207 | * keys to them: | |
1208 | */ | |
1209 | static struct lock_class_key base_lock_keys[NR_CPUS]; | |
1210 | ||
a4a6198b | 1211 | static int __devinit init_timers_cpu(int cpu) |
1da177e4 LT |
1212 | { |
1213 | int j; | |
1214 | tvec_base_t *base; | |
ba6edfcd | 1215 | static char __devinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1216 | |
ba6edfcd | 1217 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1218 | static char boot_done; |
1219 | ||
a4a6198b | 1220 | if (boot_done) { |
ba6edfcd AM |
1221 | /* |
1222 | * The APs use this path later in boot | |
1223 | */ | |
94f6030c CL |
1224 | base = kmalloc_node(sizeof(*base), |
1225 | GFP_KERNEL | __GFP_ZERO, | |
a4a6198b JB |
1226 | cpu_to_node(cpu)); |
1227 | if (!base) | |
1228 | return -ENOMEM; | |
6e453a67 VP |
1229 | |
1230 | /* Make sure that tvec_base is 2 byte aligned */ | |
1231 | if (tbase_get_deferrable(base)) { | |
1232 | WARN_ON(1); | |
1233 | kfree(base); | |
1234 | return -ENOMEM; | |
1235 | } | |
ba6edfcd | 1236 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1237 | } else { |
ba6edfcd AM |
1238 | /* |
1239 | * This is for the boot CPU - we use compile-time | |
1240 | * static initialisation because per-cpu memory isn't | |
1241 | * ready yet and because the memory allocators are not | |
1242 | * initialised either. | |
1243 | */ | |
a4a6198b | 1244 | boot_done = 1; |
ba6edfcd | 1245 | base = &boot_tvec_bases; |
a4a6198b | 1246 | } |
ba6edfcd AM |
1247 | tvec_base_done[cpu] = 1; |
1248 | } else { | |
1249 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1250 | } |
ba6edfcd | 1251 | |
3691c519 | 1252 | spin_lock_init(&base->lock); |
d730e882 IM |
1253 | lockdep_set_class(&base->lock, base_lock_keys + cpu); |
1254 | ||
1da177e4 LT |
1255 | for (j = 0; j < TVN_SIZE; j++) { |
1256 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1257 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1258 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1259 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1260 | } | |
1261 | for (j = 0; j < TVR_SIZE; j++) | |
1262 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1263 | ||
1264 | base->timer_jiffies = jiffies; | |
a4a6198b | 1265 | return 0; |
1da177e4 LT |
1266 | } |
1267 | ||
1268 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1269 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1270 | { |
1271 | struct timer_list *timer; | |
1272 | ||
1273 | while (!list_empty(head)) { | |
b5e61818 | 1274 | timer = list_first_entry(head, struct timer_list, entry); |
55c888d6 | 1275 | detach_timer(timer, 0); |
6e453a67 | 1276 | timer_set_base(timer, new_base); |
1da177e4 | 1277 | internal_add_timer(new_base, timer); |
1da177e4 | 1278 | } |
1da177e4 LT |
1279 | } |
1280 | ||
1281 | static void __devinit migrate_timers(int cpu) | |
1282 | { | |
1283 | tvec_base_t *old_base; | |
1284 | tvec_base_t *new_base; | |
1285 | int i; | |
1286 | ||
1287 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1288 | old_base = per_cpu(tvec_bases, cpu); |
1289 | new_base = get_cpu_var(tvec_bases); | |
1da177e4 LT |
1290 | |
1291 | local_irq_disable(); | |
e81ce1f7 HC |
1292 | double_spin_lock(&new_base->lock, &old_base->lock, |
1293 | smp_processor_id() < cpu); | |
3691c519 ON |
1294 | |
1295 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1296 | |
1da177e4 | 1297 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1298 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1299 | for (i = 0; i < TVN_SIZE; i++) { | |
1300 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1301 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1302 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1303 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1304 | } | |
1305 | ||
e81ce1f7 HC |
1306 | double_spin_unlock(&new_base->lock, &old_base->lock, |
1307 | smp_processor_id() < cpu); | |
1da177e4 LT |
1308 | local_irq_enable(); |
1309 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1310 | } |
1311 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1312 | ||
8c78f307 | 1313 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1314 | unsigned long action, void *hcpu) |
1315 | { | |
1316 | long cpu = (long)hcpu; | |
1317 | switch(action) { | |
1318 | case CPU_UP_PREPARE: | |
8bb78442 | 1319 | case CPU_UP_PREPARE_FROZEN: |
a4a6198b JB |
1320 | if (init_timers_cpu(cpu) < 0) |
1321 | return NOTIFY_BAD; | |
1da177e4 LT |
1322 | break; |
1323 | #ifdef CONFIG_HOTPLUG_CPU | |
1324 | case CPU_DEAD: | |
8bb78442 | 1325 | case CPU_DEAD_FROZEN: |
1da177e4 LT |
1326 | migrate_timers(cpu); |
1327 | break; | |
1328 | #endif | |
1329 | default: | |
1330 | break; | |
1331 | } | |
1332 | return NOTIFY_OK; | |
1333 | } | |
1334 | ||
8c78f307 | 1335 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1336 | .notifier_call = timer_cpu_notify, |
1337 | }; | |
1338 | ||
1339 | ||
1340 | void __init init_timers(void) | |
1341 | { | |
07dccf33 | 1342 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1da177e4 | 1343 | (void *)(long)smp_processor_id()); |
07dccf33 | 1344 | |
82f67cd9 IM |
1345 | init_timer_stats(); |
1346 | ||
07dccf33 | 1347 | BUG_ON(err == NOTIFY_BAD); |
1da177e4 LT |
1348 | register_cpu_notifier(&timers_nb); |
1349 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1350 | } | |
1351 | ||
1da177e4 LT |
1352 | /** |
1353 | * msleep - sleep safely even with waitqueue interruptions | |
1354 | * @msecs: Time in milliseconds to sleep for | |
1355 | */ | |
1356 | void msleep(unsigned int msecs) | |
1357 | { | |
1358 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1359 | ||
75bcc8c5 NA |
1360 | while (timeout) |
1361 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1362 | } |
1363 | ||
1364 | EXPORT_SYMBOL(msleep); | |
1365 | ||
1366 | /** | |
96ec3efd | 1367 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1368 | * @msecs: Time in milliseconds to sleep for |
1369 | */ | |
1370 | unsigned long msleep_interruptible(unsigned int msecs) | |
1371 | { | |
1372 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1373 | ||
75bcc8c5 NA |
1374 | while (timeout && !signal_pending(current)) |
1375 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1376 | return jiffies_to_msecs(timeout); |
1377 | } | |
1378 | ||
1379 | EXPORT_SYMBOL(msleep_interruptible); |