]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4 | * Kernel internal timers, kernel timekeeping, basic process system calls | |
5 | * | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
97a41e26 | 36 | #include <linux/delay.h> |
1da177e4 LT |
37 | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/unistd.h> | |
40 | #include <asm/div64.h> | |
41 | #include <asm/timex.h> | |
42 | #include <asm/io.h> | |
43 | ||
ecea8d19 TG |
44 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
45 | ||
46 | EXPORT_SYMBOL(jiffies_64); | |
47 | ||
1da177e4 LT |
48 | /* |
49 | * per-CPU timer vector definitions: | |
50 | */ | |
1da177e4 LT |
51 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
52 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
53 | #define TVN_SIZE (1 << TVN_BITS) | |
54 | #define TVR_SIZE (1 << TVR_BITS) | |
55 | #define TVN_MASK (TVN_SIZE - 1) | |
56 | #define TVR_MASK (TVR_SIZE - 1) | |
57 | ||
58 | typedef struct tvec_s { | |
59 | struct list_head vec[TVN_SIZE]; | |
60 | } tvec_t; | |
61 | ||
62 | typedef struct tvec_root_s { | |
63 | struct list_head vec[TVR_SIZE]; | |
64 | } tvec_root_t; | |
65 | ||
66 | struct tvec_t_base_s { | |
3691c519 ON |
67 | spinlock_t lock; |
68 | struct timer_list *running_timer; | |
1da177e4 | 69 | unsigned long timer_jiffies; |
1da177e4 LT |
70 | tvec_root_t tv1; |
71 | tvec_t tv2; | |
72 | tvec_t tv3; | |
73 | tvec_t tv4; | |
74 | tvec_t tv5; | |
75 | } ____cacheline_aligned_in_smp; | |
76 | ||
77 | typedef struct tvec_t_base_s tvec_base_t; | |
ba6edfcd | 78 | |
3691c519 ON |
79 | tvec_base_t boot_tvec_bases; |
80 | EXPORT_SYMBOL(boot_tvec_bases); | |
51d8c5ed | 81 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 82 | |
4c36a5de AV |
83 | /** |
84 | * __round_jiffies - function to round jiffies to a full second | |
85 | * @j: the time in (absolute) jiffies that should be rounded | |
86 | * @cpu: the processor number on which the timeout will happen | |
87 | * | |
72fd4a35 | 88 | * __round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
89 | * up or down to (approximately) full seconds. This is useful for timers |
90 | * for which the exact time they fire does not matter too much, as long as | |
91 | * they fire approximately every X seconds. | |
92 | * | |
93 | * By rounding these timers to whole seconds, all such timers will fire | |
94 | * at the same time, rather than at various times spread out. The goal | |
95 | * of this is to have the CPU wake up less, which saves power. | |
96 | * | |
97 | * The exact rounding is skewed for each processor to avoid all | |
98 | * processors firing at the exact same time, which could lead | |
99 | * to lock contention or spurious cache line bouncing. | |
100 | * | |
72fd4a35 | 101 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
102 | */ |
103 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
104 | { | |
105 | int rem; | |
106 | unsigned long original = j; | |
107 | ||
108 | /* | |
109 | * We don't want all cpus firing their timers at once hitting the | |
110 | * same lock or cachelines, so we skew each extra cpu with an extra | |
111 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
112 | * already did this. | |
113 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
114 | * extra offset again. | |
115 | */ | |
116 | j += cpu * 3; | |
117 | ||
118 | rem = j % HZ; | |
119 | ||
120 | /* | |
121 | * If the target jiffie is just after a whole second (which can happen | |
122 | * due to delays of the timer irq, long irq off times etc etc) then | |
123 | * we should round down to the whole second, not up. Use 1/4th second | |
124 | * as cutoff for this rounding as an extreme upper bound for this. | |
125 | */ | |
126 | if (rem < HZ/4) /* round down */ | |
127 | j = j - rem; | |
128 | else /* round up */ | |
129 | j = j - rem + HZ; | |
130 | ||
131 | /* now that we have rounded, subtract the extra skew again */ | |
132 | j -= cpu * 3; | |
133 | ||
134 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | |
135 | return original; | |
136 | return j; | |
137 | } | |
138 | EXPORT_SYMBOL_GPL(__round_jiffies); | |
139 | ||
140 | /** | |
141 | * __round_jiffies_relative - function to round jiffies to a full second | |
142 | * @j: the time in (relative) jiffies that should be rounded | |
143 | * @cpu: the processor number on which the timeout will happen | |
144 | * | |
72fd4a35 | 145 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
146 | * up or down to (approximately) full seconds. This is useful for timers |
147 | * for which the exact time they fire does not matter too much, as long as | |
148 | * they fire approximately every X seconds. | |
149 | * | |
150 | * By rounding these timers to whole seconds, all such timers will fire | |
151 | * at the same time, rather than at various times spread out. The goal | |
152 | * of this is to have the CPU wake up less, which saves power. | |
153 | * | |
154 | * The exact rounding is skewed for each processor to avoid all | |
155 | * processors firing at the exact same time, which could lead | |
156 | * to lock contention or spurious cache line bouncing. | |
157 | * | |
72fd4a35 | 158 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
159 | */ |
160 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
161 | { | |
162 | /* | |
163 | * In theory the following code can skip a jiffy in case jiffies | |
164 | * increments right between the addition and the later subtraction. | |
165 | * However since the entire point of this function is to use approximate | |
166 | * timeouts, it's entirely ok to not handle that. | |
167 | */ | |
168 | return __round_jiffies(j + jiffies, cpu) - jiffies; | |
169 | } | |
170 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
171 | ||
172 | /** | |
173 | * round_jiffies - function to round jiffies to a full second | |
174 | * @j: the time in (absolute) jiffies that should be rounded | |
175 | * | |
72fd4a35 | 176 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
177 | * up or down to (approximately) full seconds. This is useful for timers |
178 | * for which the exact time they fire does not matter too much, as long as | |
179 | * they fire approximately every X seconds. | |
180 | * | |
181 | * By rounding these timers to whole seconds, all such timers will fire | |
182 | * at the same time, rather than at various times spread out. The goal | |
183 | * of this is to have the CPU wake up less, which saves power. | |
184 | * | |
72fd4a35 | 185 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
186 | */ |
187 | unsigned long round_jiffies(unsigned long j) | |
188 | { | |
189 | return __round_jiffies(j, raw_smp_processor_id()); | |
190 | } | |
191 | EXPORT_SYMBOL_GPL(round_jiffies); | |
192 | ||
193 | /** | |
194 | * round_jiffies_relative - function to round jiffies to a full second | |
195 | * @j: the time in (relative) jiffies that should be rounded | |
196 | * | |
72fd4a35 | 197 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
198 | * up or down to (approximately) full seconds. This is useful for timers |
199 | * for which the exact time they fire does not matter too much, as long as | |
200 | * they fire approximately every X seconds. | |
201 | * | |
202 | * By rounding these timers to whole seconds, all such timers will fire | |
203 | * at the same time, rather than at various times spread out. The goal | |
204 | * of this is to have the CPU wake up less, which saves power. | |
205 | * | |
72fd4a35 | 206 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
207 | */ |
208 | unsigned long round_jiffies_relative(unsigned long j) | |
209 | { | |
210 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
211 | } | |
212 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
213 | ||
214 | ||
1da177e4 LT |
215 | static inline void set_running_timer(tvec_base_t *base, |
216 | struct timer_list *timer) | |
217 | { | |
218 | #ifdef CONFIG_SMP | |
3691c519 | 219 | base->running_timer = timer; |
1da177e4 LT |
220 | #endif |
221 | } | |
222 | ||
1da177e4 LT |
223 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
224 | { | |
225 | unsigned long expires = timer->expires; | |
226 | unsigned long idx = expires - base->timer_jiffies; | |
227 | struct list_head *vec; | |
228 | ||
229 | if (idx < TVR_SIZE) { | |
230 | int i = expires & TVR_MASK; | |
231 | vec = base->tv1.vec + i; | |
232 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
233 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
234 | vec = base->tv2.vec + i; | |
235 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
236 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
237 | vec = base->tv3.vec + i; | |
238 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
239 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
240 | vec = base->tv4.vec + i; | |
241 | } else if ((signed long) idx < 0) { | |
242 | /* | |
243 | * Can happen if you add a timer with expires == jiffies, | |
244 | * or you set a timer to go off in the past | |
245 | */ | |
246 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
247 | } else { | |
248 | int i; | |
249 | /* If the timeout is larger than 0xffffffff on 64-bit | |
250 | * architectures then we use the maximum timeout: | |
251 | */ | |
252 | if (idx > 0xffffffffUL) { | |
253 | idx = 0xffffffffUL; | |
254 | expires = idx + base->timer_jiffies; | |
255 | } | |
256 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
257 | vec = base->tv5.vec + i; | |
258 | } | |
259 | /* | |
260 | * Timers are FIFO: | |
261 | */ | |
262 | list_add_tail(&timer->entry, vec); | |
263 | } | |
264 | ||
2aae4a10 | 265 | /** |
55c888d6 ON |
266 | * init_timer - initialize a timer. |
267 | * @timer: the timer to be initialized | |
268 | * | |
269 | * init_timer() must be done to a timer prior calling *any* of the | |
270 | * other timer functions. | |
271 | */ | |
272 | void fastcall init_timer(struct timer_list *timer) | |
273 | { | |
274 | timer->entry.next = NULL; | |
bfe5d834 | 275 | timer->base = __raw_get_cpu_var(tvec_bases); |
55c888d6 ON |
276 | } |
277 | EXPORT_SYMBOL(init_timer); | |
278 | ||
279 | static inline void detach_timer(struct timer_list *timer, | |
280 | int clear_pending) | |
281 | { | |
282 | struct list_head *entry = &timer->entry; | |
283 | ||
284 | __list_del(entry->prev, entry->next); | |
285 | if (clear_pending) | |
286 | entry->next = NULL; | |
287 | entry->prev = LIST_POISON2; | |
288 | } | |
289 | ||
290 | /* | |
3691c519 | 291 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
292 | * means that all timers which are tied to this base via timer->base are |
293 | * locked, and the base itself is locked too. | |
294 | * | |
295 | * So __run_timers/migrate_timers can safely modify all timers which could | |
296 | * be found on ->tvX lists. | |
297 | * | |
298 | * When the timer's base is locked, and the timer removed from list, it is | |
299 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
300 | * locked. | |
301 | */ | |
3691c519 | 302 | static tvec_base_t *lock_timer_base(struct timer_list *timer, |
55c888d6 | 303 | unsigned long *flags) |
89e7e374 | 304 | __acquires(timer->base->lock) |
55c888d6 | 305 | { |
3691c519 | 306 | tvec_base_t *base; |
55c888d6 ON |
307 | |
308 | for (;;) { | |
309 | base = timer->base; | |
310 | if (likely(base != NULL)) { | |
311 | spin_lock_irqsave(&base->lock, *flags); | |
312 | if (likely(base == timer->base)) | |
313 | return base; | |
314 | /* The timer has migrated to another CPU */ | |
315 | spin_unlock_irqrestore(&base->lock, *flags); | |
316 | } | |
317 | cpu_relax(); | |
318 | } | |
319 | } | |
320 | ||
1da177e4 LT |
321 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
322 | { | |
3691c519 | 323 | tvec_base_t *base, *new_base; |
1da177e4 LT |
324 | unsigned long flags; |
325 | int ret = 0; | |
326 | ||
327 | BUG_ON(!timer->function); | |
1da177e4 | 328 | |
55c888d6 ON |
329 | base = lock_timer_base(timer, &flags); |
330 | ||
331 | if (timer_pending(timer)) { | |
332 | detach_timer(timer, 0); | |
333 | ret = 1; | |
334 | } | |
335 | ||
a4a6198b | 336 | new_base = __get_cpu_var(tvec_bases); |
1da177e4 | 337 | |
3691c519 | 338 | if (base != new_base) { |
1da177e4 | 339 | /* |
55c888d6 ON |
340 | * We are trying to schedule the timer on the local CPU. |
341 | * However we can't change timer's base while it is running, | |
342 | * otherwise del_timer_sync() can't detect that the timer's | |
343 | * handler yet has not finished. This also guarantees that | |
344 | * the timer is serialized wrt itself. | |
1da177e4 | 345 | */ |
a2c348fe | 346 | if (likely(base->running_timer != timer)) { |
55c888d6 ON |
347 | /* See the comment in lock_timer_base() */ |
348 | timer->base = NULL; | |
349 | spin_unlock(&base->lock); | |
a2c348fe ON |
350 | base = new_base; |
351 | spin_lock(&base->lock); | |
352 | timer->base = base; | |
1da177e4 LT |
353 | } |
354 | } | |
355 | ||
1da177e4 | 356 | timer->expires = expires; |
a2c348fe ON |
357 | internal_add_timer(base, timer); |
358 | spin_unlock_irqrestore(&base->lock, flags); | |
1da177e4 LT |
359 | |
360 | return ret; | |
361 | } | |
362 | ||
363 | EXPORT_SYMBOL(__mod_timer); | |
364 | ||
2aae4a10 | 365 | /** |
1da177e4 LT |
366 | * add_timer_on - start a timer on a particular CPU |
367 | * @timer: the timer to be added | |
368 | * @cpu: the CPU to start it on | |
369 | * | |
370 | * This is not very scalable on SMP. Double adds are not possible. | |
371 | */ | |
372 | void add_timer_on(struct timer_list *timer, int cpu) | |
373 | { | |
a4a6198b | 374 | tvec_base_t *base = per_cpu(tvec_bases, cpu); |
1da177e4 | 375 | unsigned long flags; |
55c888d6 | 376 | |
1da177e4 | 377 | BUG_ON(timer_pending(timer) || !timer->function); |
3691c519 ON |
378 | spin_lock_irqsave(&base->lock, flags); |
379 | timer->base = base; | |
1da177e4 | 380 | internal_add_timer(base, timer); |
3691c519 | 381 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
382 | } |
383 | ||
384 | ||
2aae4a10 | 385 | /** |
1da177e4 LT |
386 | * mod_timer - modify a timer's timeout |
387 | * @timer: the timer to be modified | |
2aae4a10 | 388 | * @expires: new timeout in jiffies |
1da177e4 | 389 | * |
72fd4a35 | 390 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
391 | * active timer (if the timer is inactive it will be activated) |
392 | * | |
393 | * mod_timer(timer, expires) is equivalent to: | |
394 | * | |
395 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
396 | * | |
397 | * Note that if there are multiple unserialized concurrent users of the | |
398 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
399 | * since add_timer() cannot modify an already running timer. | |
400 | * | |
401 | * The function returns whether it has modified a pending timer or not. | |
402 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
403 | * active timer returns 1.) | |
404 | */ | |
405 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
406 | { | |
407 | BUG_ON(!timer->function); | |
408 | ||
1da177e4 LT |
409 | /* |
410 | * This is a common optimization triggered by the | |
411 | * networking code - if the timer is re-modified | |
412 | * to be the same thing then just return: | |
413 | */ | |
414 | if (timer->expires == expires && timer_pending(timer)) | |
415 | return 1; | |
416 | ||
417 | return __mod_timer(timer, expires); | |
418 | } | |
419 | ||
420 | EXPORT_SYMBOL(mod_timer); | |
421 | ||
2aae4a10 | 422 | /** |
1da177e4 LT |
423 | * del_timer - deactive a timer. |
424 | * @timer: the timer to be deactivated | |
425 | * | |
426 | * del_timer() deactivates a timer - this works on both active and inactive | |
427 | * timers. | |
428 | * | |
429 | * The function returns whether it has deactivated a pending timer or not. | |
430 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
431 | * active timer returns 1.) | |
432 | */ | |
433 | int del_timer(struct timer_list *timer) | |
434 | { | |
3691c519 | 435 | tvec_base_t *base; |
1da177e4 | 436 | unsigned long flags; |
55c888d6 | 437 | int ret = 0; |
1da177e4 | 438 | |
55c888d6 ON |
439 | if (timer_pending(timer)) { |
440 | base = lock_timer_base(timer, &flags); | |
441 | if (timer_pending(timer)) { | |
442 | detach_timer(timer, 1); | |
443 | ret = 1; | |
444 | } | |
1da177e4 | 445 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 446 | } |
1da177e4 | 447 | |
55c888d6 | 448 | return ret; |
1da177e4 LT |
449 | } |
450 | ||
451 | EXPORT_SYMBOL(del_timer); | |
452 | ||
453 | #ifdef CONFIG_SMP | |
2aae4a10 REB |
454 | /** |
455 | * try_to_del_timer_sync - Try to deactivate a timer | |
456 | * @timer: timer do del | |
457 | * | |
fd450b73 ON |
458 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
459 | * exit the timer is not queued and the handler is not running on any CPU. | |
460 | * | |
461 | * It must not be called from interrupt contexts. | |
462 | */ | |
463 | int try_to_del_timer_sync(struct timer_list *timer) | |
464 | { | |
3691c519 | 465 | tvec_base_t *base; |
fd450b73 ON |
466 | unsigned long flags; |
467 | int ret = -1; | |
468 | ||
469 | base = lock_timer_base(timer, &flags); | |
470 | ||
471 | if (base->running_timer == timer) | |
472 | goto out; | |
473 | ||
474 | ret = 0; | |
475 | if (timer_pending(timer)) { | |
476 | detach_timer(timer, 1); | |
477 | ret = 1; | |
478 | } | |
479 | out: | |
480 | spin_unlock_irqrestore(&base->lock, flags); | |
481 | ||
482 | return ret; | |
483 | } | |
484 | ||
2aae4a10 | 485 | /** |
1da177e4 LT |
486 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
487 | * @timer: the timer to be deactivated | |
488 | * | |
489 | * This function only differs from del_timer() on SMP: besides deactivating | |
490 | * the timer it also makes sure the handler has finished executing on other | |
491 | * CPUs. | |
492 | * | |
72fd4a35 | 493 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 LT |
494 | * otherwise this function is meaningless. It must not be called from |
495 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
496 | * completion of the timer's handler. The timer's handler must not call |
497 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
498 | * not running on any CPU. | |
1da177e4 LT |
499 | * |
500 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
501 | */ |
502 | int del_timer_sync(struct timer_list *timer) | |
503 | { | |
fd450b73 ON |
504 | for (;;) { |
505 | int ret = try_to_del_timer_sync(timer); | |
506 | if (ret >= 0) | |
507 | return ret; | |
a0009652 | 508 | cpu_relax(); |
fd450b73 | 509 | } |
1da177e4 | 510 | } |
1da177e4 | 511 | |
55c888d6 | 512 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
513 | #endif |
514 | ||
515 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
516 | { | |
517 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
518 | struct timer_list *timer, *tmp; |
519 | struct list_head tv_list; | |
520 | ||
521 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 522 | |
1da177e4 | 523 | /* |
3439dd86 P |
524 | * We are removing _all_ timers from the list, so we |
525 | * don't have to detach them individually. | |
1da177e4 | 526 | */ |
3439dd86 P |
527 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
528 | BUG_ON(timer->base != base); | |
529 | internal_add_timer(base, timer); | |
1da177e4 | 530 | } |
1da177e4 LT |
531 | |
532 | return index; | |
533 | } | |
534 | ||
2aae4a10 REB |
535 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
536 | ||
537 | /** | |
1da177e4 LT |
538 | * __run_timers - run all expired timers (if any) on this CPU. |
539 | * @base: the timer vector to be processed. | |
540 | * | |
541 | * This function cascades all vectors and executes all expired timer | |
542 | * vectors. | |
543 | */ | |
1da177e4 LT |
544 | static inline void __run_timers(tvec_base_t *base) |
545 | { | |
546 | struct timer_list *timer; | |
547 | ||
3691c519 | 548 | spin_lock_irq(&base->lock); |
1da177e4 | 549 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 550 | struct list_head work_list; |
1da177e4 LT |
551 | struct list_head *head = &work_list; |
552 | int index = base->timer_jiffies & TVR_MASK; | |
626ab0e6 | 553 | |
1da177e4 LT |
554 | /* |
555 | * Cascade timers: | |
556 | */ | |
557 | if (!index && | |
558 | (!cascade(base, &base->tv2, INDEX(0))) && | |
559 | (!cascade(base, &base->tv3, INDEX(1))) && | |
560 | !cascade(base, &base->tv4, INDEX(2))) | |
561 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
562 | ++base->timer_jiffies; |
563 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 564 | while (!list_empty(head)) { |
1da177e4 LT |
565 | void (*fn)(unsigned long); |
566 | unsigned long data; | |
567 | ||
568 | timer = list_entry(head->next,struct timer_list,entry); | |
569 | fn = timer->function; | |
570 | data = timer->data; | |
571 | ||
1da177e4 | 572 | set_running_timer(base, timer); |
55c888d6 | 573 | detach_timer(timer, 1); |
3691c519 | 574 | spin_unlock_irq(&base->lock); |
1da177e4 | 575 | { |
be5b4fbd | 576 | int preempt_count = preempt_count(); |
1da177e4 LT |
577 | fn(data); |
578 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
579 | printk(KERN_WARNING "huh, entered %p " |
580 | "with preempt_count %08x, exited" | |
581 | " with %08x?\n", | |
582 | fn, preempt_count, | |
583 | preempt_count()); | |
1da177e4 LT |
584 | BUG(); |
585 | } | |
586 | } | |
3691c519 | 587 | spin_lock_irq(&base->lock); |
1da177e4 LT |
588 | } |
589 | } | |
590 | set_running_timer(base, NULL); | |
3691c519 | 591 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
592 | } |
593 | ||
594 | #ifdef CONFIG_NO_IDLE_HZ | |
595 | /* | |
596 | * Find out when the next timer event is due to happen. This | |
597 | * is used on S/390 to stop all activity when a cpus is idle. | |
598 | * This functions needs to be called disabled. | |
599 | */ | |
600 | unsigned long next_timer_interrupt(void) | |
601 | { | |
602 | tvec_base_t *base; | |
603 | struct list_head *list; | |
604 | struct timer_list *nte; | |
605 | unsigned long expires; | |
69239749 TL |
606 | unsigned long hr_expires = MAX_JIFFY_OFFSET; |
607 | ktime_t hr_delta; | |
1da177e4 LT |
608 | tvec_t *varray[4]; |
609 | int i, j; | |
610 | ||
69239749 TL |
611 | hr_delta = hrtimer_get_next_event(); |
612 | if (hr_delta.tv64 != KTIME_MAX) { | |
613 | struct timespec tsdelta; | |
614 | tsdelta = ktime_to_timespec(hr_delta); | |
615 | hr_expires = timespec_to_jiffies(&tsdelta); | |
616 | if (hr_expires < 3) | |
617 | return hr_expires + jiffies; | |
618 | } | |
619 | hr_expires += jiffies; | |
620 | ||
a4a6198b | 621 | base = __get_cpu_var(tvec_bases); |
3691c519 | 622 | spin_lock(&base->lock); |
1da177e4 | 623 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
53f087fe | 624 | list = NULL; |
1da177e4 LT |
625 | |
626 | /* Look for timer events in tv1. */ | |
627 | j = base->timer_jiffies & TVR_MASK; | |
628 | do { | |
629 | list_for_each_entry(nte, base->tv1.vec + j, entry) { | |
630 | expires = nte->expires; | |
631 | if (j < (base->timer_jiffies & TVR_MASK)) | |
632 | list = base->tv2.vec + (INDEX(0)); | |
633 | goto found; | |
634 | } | |
635 | j = (j + 1) & TVR_MASK; | |
636 | } while (j != (base->timer_jiffies & TVR_MASK)); | |
637 | ||
638 | /* Check tv2-tv5. */ | |
639 | varray[0] = &base->tv2; | |
640 | varray[1] = &base->tv3; | |
641 | varray[2] = &base->tv4; | |
642 | varray[3] = &base->tv5; | |
643 | for (i = 0; i < 4; i++) { | |
644 | j = INDEX(i); | |
645 | do { | |
646 | if (list_empty(varray[i]->vec + j)) { | |
647 | j = (j + 1) & TVN_MASK; | |
648 | continue; | |
649 | } | |
650 | list_for_each_entry(nte, varray[i]->vec + j, entry) | |
651 | if (time_before(nte->expires, expires)) | |
652 | expires = nte->expires; | |
653 | if (j < (INDEX(i)) && i < 3) | |
654 | list = varray[i + 1]->vec + (INDEX(i + 1)); | |
655 | goto found; | |
656 | } while (j != (INDEX(i))); | |
657 | } | |
658 | found: | |
659 | if (list) { | |
660 | /* | |
661 | * The search wrapped. We need to look at the next list | |
662 | * from next tv element that would cascade into tv element | |
663 | * where we found the timer element. | |
664 | */ | |
665 | list_for_each_entry(nte, list, entry) { | |
666 | if (time_before(nte->expires, expires)) | |
667 | expires = nte->expires; | |
668 | } | |
669 | } | |
3691c519 | 670 | spin_unlock(&base->lock); |
69239749 | 671 | |
0662b713 ZA |
672 | /* |
673 | * It can happen that other CPUs service timer IRQs and increment | |
674 | * jiffies, but we have not yet got a local timer tick to process | |
675 | * the timer wheels. In that case, the expiry time can be before | |
676 | * jiffies, but since the high-resolution timer here is relative to | |
677 | * jiffies, the default expression when high-resolution timers are | |
678 | * not active, | |
679 | * | |
680 | * time_before(MAX_JIFFY_OFFSET + jiffies, expires) | |
681 | * | |
682 | * would falsely evaluate to true. If that is the case, just | |
683 | * return jiffies so that we can immediately fire the local timer | |
684 | */ | |
685 | if (time_before(expires, jiffies)) | |
686 | return jiffies; | |
687 | ||
69239749 TL |
688 | if (time_before(hr_expires, expires)) |
689 | return hr_expires; | |
690 | ||
1da177e4 LT |
691 | return expires; |
692 | } | |
693 | #endif | |
694 | ||
695 | /******************************************************************/ | |
696 | ||
1da177e4 LT |
697 | /* |
698 | * The current time | |
699 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
700 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
701 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
702 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
703 | * the usual normalization. | |
704 | */ | |
705 | struct timespec xtime __attribute__ ((aligned (16))); | |
706 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
707 | ||
708 | EXPORT_SYMBOL(xtime); | |
709 | ||
726c14bf | 710 | |
ad596171 JS |
711 | /* XXX - all of this timekeeping code should be later moved to time.c */ |
712 | #include <linux/clocksource.h> | |
713 | static struct clocksource *clock; /* pointer to current clocksource */ | |
cf3c769b JS |
714 | |
715 | #ifdef CONFIG_GENERIC_TIME | |
716 | /** | |
717 | * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook | |
718 | * | |
719 | * private function, must hold xtime_lock lock when being | |
720 | * called. Returns the number of nanoseconds since the | |
721 | * last call to update_wall_time() (adjusted by NTP scaling) | |
722 | */ | |
723 | static inline s64 __get_nsec_offset(void) | |
724 | { | |
725 | cycle_t cycle_now, cycle_delta; | |
726 | s64 ns_offset; | |
727 | ||
728 | /* read clocksource: */ | |
a2752549 | 729 | cycle_now = clocksource_read(clock); |
cf3c769b JS |
730 | |
731 | /* calculate the delta since the last update_wall_time: */ | |
19923c19 | 732 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
cf3c769b JS |
733 | |
734 | /* convert to nanoseconds: */ | |
735 | ns_offset = cyc2ns(clock, cycle_delta); | |
736 | ||
737 | return ns_offset; | |
738 | } | |
739 | ||
740 | /** | |
741 | * __get_realtime_clock_ts - Returns the time of day in a timespec | |
742 | * @ts: pointer to the timespec to be set | |
743 | * | |
744 | * Returns the time of day in a timespec. Used by | |
745 | * do_gettimeofday() and get_realtime_clock_ts(). | |
746 | */ | |
747 | static inline void __get_realtime_clock_ts(struct timespec *ts) | |
748 | { | |
749 | unsigned long seq; | |
750 | s64 nsecs; | |
751 | ||
752 | do { | |
753 | seq = read_seqbegin(&xtime_lock); | |
754 | ||
755 | *ts = xtime; | |
756 | nsecs = __get_nsec_offset(); | |
757 | ||
758 | } while (read_seqretry(&xtime_lock, seq)); | |
759 | ||
760 | timespec_add_ns(ts, nsecs); | |
761 | } | |
762 | ||
763 | /** | |
a2752549 | 764 | * getnstimeofday - Returns the time of day in a timespec |
cf3c769b JS |
765 | * @ts: pointer to the timespec to be set |
766 | * | |
767 | * Returns the time of day in a timespec. | |
768 | */ | |
769 | void getnstimeofday(struct timespec *ts) | |
770 | { | |
771 | __get_realtime_clock_ts(ts); | |
772 | } | |
773 | ||
774 | EXPORT_SYMBOL(getnstimeofday); | |
775 | ||
776 | /** | |
777 | * do_gettimeofday - Returns the time of day in a timeval | |
778 | * @tv: pointer to the timeval to be set | |
779 | * | |
780 | * NOTE: Users should be converted to using get_realtime_clock_ts() | |
781 | */ | |
782 | void do_gettimeofday(struct timeval *tv) | |
783 | { | |
784 | struct timespec now; | |
785 | ||
786 | __get_realtime_clock_ts(&now); | |
787 | tv->tv_sec = now.tv_sec; | |
788 | tv->tv_usec = now.tv_nsec/1000; | |
789 | } | |
790 | ||
791 | EXPORT_SYMBOL(do_gettimeofday); | |
792 | /** | |
793 | * do_settimeofday - Sets the time of day | |
794 | * @tv: pointer to the timespec variable containing the new time | |
795 | * | |
796 | * Sets the time of day to the new time and update NTP and notify hrtimers | |
797 | */ | |
798 | int do_settimeofday(struct timespec *tv) | |
799 | { | |
800 | unsigned long flags; | |
801 | time_t wtm_sec, sec = tv->tv_sec; | |
802 | long wtm_nsec, nsec = tv->tv_nsec; | |
803 | ||
804 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
805 | return -EINVAL; | |
806 | ||
807 | write_seqlock_irqsave(&xtime_lock, flags); | |
808 | ||
809 | nsec -= __get_nsec_offset(); | |
810 | ||
811 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
812 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
813 | ||
814 | set_normalized_timespec(&xtime, sec, nsec); | |
815 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
816 | ||
e154ff3d | 817 | clock->error = 0; |
cf3c769b JS |
818 | ntp_clear(); |
819 | ||
820 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
821 | ||
822 | /* signal hrtimers about time change */ | |
823 | clock_was_set(); | |
824 | ||
825 | return 0; | |
826 | } | |
827 | ||
828 | EXPORT_SYMBOL(do_settimeofday); | |
829 | ||
830 | /** | |
831 | * change_clocksource - Swaps clocksources if a new one is available | |
832 | * | |
833 | * Accumulates current time interval and initializes new clocksource | |
834 | */ | |
5d8b34fd | 835 | static void change_clocksource(void) |
cf3c769b JS |
836 | { |
837 | struct clocksource *new; | |
838 | cycle_t now; | |
839 | u64 nsec; | |
5d8b34fd | 840 | |
a2752549 | 841 | new = clocksource_get_next(); |
5d8b34fd TG |
842 | |
843 | if (clock == new) | |
844 | return; | |
845 | ||
846 | now = clocksource_read(new); | |
847 | nsec = __get_nsec_offset(); | |
848 | timespec_add_ns(&xtime, nsec); | |
849 | ||
850 | clock = new; | |
851 | clock->cycle_last = now; | |
852 | ||
853 | clock->error = 0; | |
854 | clock->xtime_nsec = 0; | |
855 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | |
856 | ||
857 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | |
858 | clock->name); | |
cf3c769b JS |
859 | } |
860 | #else | |
5d8b34fd | 861 | static inline void change_clocksource(void) { } |
cf3c769b JS |
862 | #endif |
863 | ||
864 | /** | |
865 | * timeofday_is_continuous - check to see if timekeeping is free running | |
866 | */ | |
867 | int timekeeping_is_continuous(void) | |
868 | { | |
869 | unsigned long seq; | |
870 | int ret; | |
871 | ||
872 | do { | |
873 | seq = read_seqbegin(&xtime_lock); | |
874 | ||
5d8b34fd | 875 | ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
cf3c769b JS |
876 | |
877 | } while (read_seqretry(&xtime_lock, seq)); | |
878 | ||
879 | return ret; | |
880 | } | |
881 | ||
411187fb JS |
882 | /** |
883 | * read_persistent_clock - Return time in seconds from the persistent clock. | |
884 | * | |
885 | * Weak dummy function for arches that do not yet support it. | |
886 | * Returns seconds from epoch using the battery backed persistent clock. | |
887 | * Returns zero if unsupported. | |
888 | * | |
889 | * XXX - Do be sure to remove it once all arches implement it. | |
890 | */ | |
891 | unsigned long __attribute__((weak)) read_persistent_clock(void) | |
892 | { | |
893 | return 0; | |
894 | } | |
895 | ||
1da177e4 | 896 | /* |
ad596171 | 897 | * timekeeping_init - Initializes the clocksource and common timekeeping values |
1da177e4 | 898 | */ |
ad596171 | 899 | void __init timekeeping_init(void) |
1da177e4 | 900 | { |
ad596171 | 901 | unsigned long flags; |
411187fb | 902 | unsigned long sec = read_persistent_clock(); |
ad596171 JS |
903 | |
904 | write_seqlock_irqsave(&xtime_lock, flags); | |
b0ee7556 RZ |
905 | |
906 | ntp_clear(); | |
907 | ||
a2752549 | 908 | clock = clocksource_get_next(); |
f4304ab2 | 909 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
19923c19 | 910 | clock->cycle_last = clocksource_read(clock); |
b0ee7556 | 911 | |
411187fb JS |
912 | xtime.tv_sec = sec; |
913 | xtime.tv_nsec = 0; | |
914 | set_normalized_timespec(&wall_to_monotonic, | |
915 | -xtime.tv_sec, -xtime.tv_nsec); | |
916 | ||
ad596171 JS |
917 | write_sequnlock_irqrestore(&xtime_lock, flags); |
918 | } | |
919 | ||
920 | ||
411187fb | 921 | /* flag for if timekeeping is suspended */ |
3e143475 | 922 | static int timekeeping_suspended; |
411187fb JS |
923 | /* time in seconds when suspend began */ |
924 | static unsigned long timekeeping_suspend_time; | |
925 | ||
2aae4a10 | 926 | /** |
ad596171 JS |
927 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
928 | * @dev: unused | |
929 | * | |
930 | * This is for the generic clocksource timekeeping. | |
8ef38609 | 931 | * xtime/wall_to_monotonic/jiffies/etc are |
ad596171 JS |
932 | * still managed by arch specific suspend/resume code. |
933 | */ | |
934 | static int timekeeping_resume(struct sys_device *dev) | |
935 | { | |
936 | unsigned long flags; | |
411187fb | 937 | unsigned long now = read_persistent_clock(); |
ad596171 JS |
938 | |
939 | write_seqlock_irqsave(&xtime_lock, flags); | |
411187fb JS |
940 | |
941 | if (now && (now > timekeeping_suspend_time)) { | |
942 | unsigned long sleep_length = now - timekeeping_suspend_time; | |
943 | ||
944 | xtime.tv_sec += sleep_length; | |
945 | wall_to_monotonic.tv_sec -= sleep_length; | |
946 | } | |
947 | /* re-base the last cycle value */ | |
19923c19 | 948 | clock->cycle_last = clocksource_read(clock); |
3e143475 JS |
949 | clock->error = 0; |
950 | timekeeping_suspended = 0; | |
951 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
411187fb JS |
952 | |
953 | touch_softlockup_watchdog(); | |
954 | hrtimer_notify_resume(); | |
955 | ||
3e143475 JS |
956 | return 0; |
957 | } | |
958 | ||
959 | static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |
960 | { | |
961 | unsigned long flags; | |
962 | ||
963 | write_seqlock_irqsave(&xtime_lock, flags); | |
964 | timekeeping_suspended = 1; | |
411187fb | 965 | timekeeping_suspend_time = read_persistent_clock(); |
ad596171 JS |
966 | write_sequnlock_irqrestore(&xtime_lock, flags); |
967 | return 0; | |
968 | } | |
969 | ||
970 | /* sysfs resume/suspend bits for timekeeping */ | |
971 | static struct sysdev_class timekeeping_sysclass = { | |
972 | .resume = timekeeping_resume, | |
3e143475 | 973 | .suspend = timekeeping_suspend, |
ad596171 JS |
974 | set_kset_name("timekeeping"), |
975 | }; | |
976 | ||
977 | static struct sys_device device_timer = { | |
978 | .id = 0, | |
979 | .cls = &timekeeping_sysclass, | |
980 | }; | |
981 | ||
982 | static int __init timekeeping_init_device(void) | |
983 | { | |
984 | int error = sysdev_class_register(&timekeeping_sysclass); | |
985 | if (!error) | |
986 | error = sysdev_register(&device_timer); | |
987 | return error; | |
988 | } | |
989 | ||
990 | device_initcall(timekeeping_init_device); | |
991 | ||
19923c19 | 992 | /* |
e154ff3d | 993 | * If the error is already larger, we look ahead even further |
19923c19 RZ |
994 | * to compensate for late or lost adjustments. |
995 | */ | |
f5f1a24a DW |
996 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, |
997 | s64 *offset) | |
19923c19 | 998 | { |
e154ff3d RZ |
999 | s64 tick_error, i; |
1000 | u32 look_ahead, adj; | |
1001 | s32 error2, mult; | |
19923c19 RZ |
1002 | |
1003 | /* | |
e154ff3d RZ |
1004 | * Use the current error value to determine how much to look ahead. |
1005 | * The larger the error the slower we adjust for it to avoid problems | |
1006 | * with losing too many ticks, otherwise we would overadjust and | |
1007 | * produce an even larger error. The smaller the adjustment the | |
1008 | * faster we try to adjust for it, as lost ticks can do less harm | |
1009 | * here. This is tuned so that an error of about 1 msec is adusted | |
1010 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | |
19923c19 | 1011 | */ |
e154ff3d RZ |
1012 | error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); |
1013 | error2 = abs(error2); | |
1014 | for (look_ahead = 0; error2 > 0; look_ahead++) | |
1015 | error2 >>= 2; | |
19923c19 RZ |
1016 | |
1017 | /* | |
e154ff3d RZ |
1018 | * Now calculate the error in (1 << look_ahead) ticks, but first |
1019 | * remove the single look ahead already included in the error. | |
19923c19 | 1020 | */ |
f5f1a24a DW |
1021 | tick_error = current_tick_length() >> |
1022 | (TICK_LENGTH_SHIFT - clock->shift + 1); | |
e154ff3d RZ |
1023 | tick_error -= clock->xtime_interval >> 1; |
1024 | error = ((error - tick_error) >> look_ahead) + tick_error; | |
1025 | ||
1026 | /* Finally calculate the adjustment shift value. */ | |
1027 | i = *interval; | |
1028 | mult = 1; | |
1029 | if (error < 0) { | |
1030 | error = -error; | |
1031 | *interval = -*interval; | |
1032 | *offset = -*offset; | |
1033 | mult = -1; | |
19923c19 | 1034 | } |
e154ff3d RZ |
1035 | for (adj = 0; error > i; adj++) |
1036 | error >>= 1; | |
19923c19 RZ |
1037 | |
1038 | *interval <<= adj; | |
1039 | *offset <<= adj; | |
e154ff3d | 1040 | return mult << adj; |
19923c19 RZ |
1041 | } |
1042 | ||
1043 | /* | |
1044 | * Adjust the multiplier to reduce the error value, | |
1045 | * this is optimized for the most common adjustments of -1,0,1, | |
1046 | * for other values we can do a bit more work. | |
1047 | */ | |
1048 | static void clocksource_adjust(struct clocksource *clock, s64 offset) | |
1049 | { | |
1050 | s64 error, interval = clock->cycle_interval; | |
1051 | int adj; | |
1052 | ||
1053 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | |
1054 | if (error > interval) { | |
e154ff3d RZ |
1055 | error >>= 2; |
1056 | if (likely(error <= interval)) | |
1057 | adj = 1; | |
1058 | else | |
1059 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 | 1060 | } else if (error < -interval) { |
e154ff3d RZ |
1061 | error >>= 2; |
1062 | if (likely(error >= -interval)) { | |
1063 | adj = -1; | |
1064 | interval = -interval; | |
1065 | offset = -offset; | |
1066 | } else | |
1067 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 RZ |
1068 | } else |
1069 | return; | |
1070 | ||
1071 | clock->mult += adj; | |
1072 | clock->xtime_interval += interval; | |
1073 | clock->xtime_nsec -= offset; | |
f5f1a24a DW |
1074 | clock->error -= (interval - offset) << |
1075 | (TICK_LENGTH_SHIFT - clock->shift); | |
19923c19 RZ |
1076 | } |
1077 | ||
2aae4a10 | 1078 | /** |
ad596171 JS |
1079 | * update_wall_time - Uses the current clocksource to increment the wall time |
1080 | * | |
1081 | * Called from the timer interrupt, must hold a write on xtime_lock. | |
1082 | */ | |
1083 | static void update_wall_time(void) | |
1084 | { | |
19923c19 | 1085 | cycle_t offset; |
ad596171 | 1086 | |
3e143475 JS |
1087 | /* Make sure we're fully resumed: */ |
1088 | if (unlikely(timekeeping_suspended)) | |
1089 | return; | |
5eb6d205 | 1090 | |
19923c19 RZ |
1091 | #ifdef CONFIG_GENERIC_TIME |
1092 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | |
1093 | #else | |
1094 | offset = clock->cycle_interval; | |
1095 | #endif | |
3e143475 | 1096 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; |
ad596171 JS |
1097 | |
1098 | /* normally this loop will run just once, however in the | |
1099 | * case of lost or late ticks, it will accumulate correctly. | |
1100 | */ | |
19923c19 | 1101 | while (offset >= clock->cycle_interval) { |
ad596171 | 1102 | /* accumulate one interval */ |
19923c19 RZ |
1103 | clock->xtime_nsec += clock->xtime_interval; |
1104 | clock->cycle_last += clock->cycle_interval; | |
1105 | offset -= clock->cycle_interval; | |
1106 | ||
1107 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | |
1108 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | |
1109 | xtime.tv_sec++; | |
1110 | second_overflow(); | |
1111 | } | |
ad596171 | 1112 | |
5eb6d205 | 1113 | /* interpolator bits */ |
19923c19 | 1114 | time_interpolator_update(clock->xtime_interval |
5eb6d205 | 1115 | >> clock->shift); |
5eb6d205 JS |
1116 | |
1117 | /* accumulate error between NTP and clock interval */ | |
19923c19 RZ |
1118 | clock->error += current_tick_length(); |
1119 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | |
1120 | } | |
5eb6d205 | 1121 | |
19923c19 RZ |
1122 | /* correct the clock when NTP error is too big */ |
1123 | clocksource_adjust(clock, offset); | |
5eb6d205 | 1124 | |
5eb6d205 | 1125 | /* store full nanoseconds into xtime */ |
e154ff3d | 1126 | xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; |
19923c19 | 1127 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
cf3c769b JS |
1128 | |
1129 | /* check to see if there is a new clocksource to use */ | |
5d8b34fd | 1130 | change_clocksource(); |
1da177e4 LT |
1131 | } |
1132 | ||
1133 | /* | |
1134 | * Called from the timer interrupt handler to charge one tick to the current | |
1135 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
1136 | */ | |
1137 | void update_process_times(int user_tick) | |
1138 | { | |
1139 | struct task_struct *p = current; | |
1140 | int cpu = smp_processor_id(); | |
1141 | ||
1142 | /* Note: this timer irq context must be accounted for as well. */ | |
1143 | if (user_tick) | |
1144 | account_user_time(p, jiffies_to_cputime(1)); | |
1145 | else | |
1146 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
1147 | run_local_timers(); | |
1148 | if (rcu_pending(cpu)) | |
1149 | rcu_check_callbacks(cpu, user_tick); | |
1150 | scheduler_tick(); | |
1151 | run_posix_cpu_timers(p); | |
1152 | } | |
1153 | ||
1154 | /* | |
1155 | * Nr of active tasks - counted in fixed-point numbers | |
1156 | */ | |
1157 | static unsigned long count_active_tasks(void) | |
1158 | { | |
db1b1fef | 1159 | return nr_active() * FIXED_1; |
1da177e4 LT |
1160 | } |
1161 | ||
1162 | /* | |
1163 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
1164 | * imply that avenrun[] is the standard name for this kind of thing. | |
1165 | * Nothing else seems to be standardized: the fractional size etc | |
1166 | * all seem to differ on different machines. | |
1167 | * | |
1168 | * Requires xtime_lock to access. | |
1169 | */ | |
1170 | unsigned long avenrun[3]; | |
1171 | ||
1172 | EXPORT_SYMBOL(avenrun); | |
1173 | ||
1174 | /* | |
1175 | * calc_load - given tick count, update the avenrun load estimates. | |
1176 | * This is called while holding a write_lock on xtime_lock. | |
1177 | */ | |
1178 | static inline void calc_load(unsigned long ticks) | |
1179 | { | |
1180 | unsigned long active_tasks; /* fixed-point */ | |
1181 | static int count = LOAD_FREQ; | |
1182 | ||
cd7175ed ED |
1183 | count -= ticks; |
1184 | if (unlikely(count < 0)) { | |
1185 | active_tasks = count_active_tasks(); | |
1186 | do { | |
1187 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
1188 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
1189 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
1190 | count += LOAD_FREQ; | |
1191 | } while (count < 0); | |
1da177e4 LT |
1192 | } |
1193 | } | |
1194 | ||
1da177e4 LT |
1195 | /* |
1196 | * This read-write spinlock protects us from races in SMP while | |
1197 | * playing with xtime and avenrun. | |
1198 | */ | |
5809f9d4 | 1199 | __attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
1da177e4 LT |
1200 | |
1201 | EXPORT_SYMBOL(xtime_lock); | |
1da177e4 LT |
1202 | |
1203 | /* | |
1204 | * This function runs timers and the timer-tq in bottom half context. | |
1205 | */ | |
1206 | static void run_timer_softirq(struct softirq_action *h) | |
1207 | { | |
a4a6198b | 1208 | tvec_base_t *base = __get_cpu_var(tvec_bases); |
1da177e4 | 1209 | |
c0a31329 | 1210 | hrtimer_run_queues(); |
1da177e4 LT |
1211 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1212 | __run_timers(base); | |
1213 | } | |
1214 | ||
1215 | /* | |
1216 | * Called by the local, per-CPU timer interrupt on SMP. | |
1217 | */ | |
1218 | void run_local_timers(void) | |
1219 | { | |
1220 | raise_softirq(TIMER_SOFTIRQ); | |
6687a97d | 1221 | softlockup_tick(); |
1da177e4 LT |
1222 | } |
1223 | ||
1224 | /* | |
1225 | * Called by the timer interrupt. xtime_lock must already be taken | |
1226 | * by the timer IRQ! | |
1227 | */ | |
3171a030 | 1228 | static inline void update_times(unsigned long ticks) |
1da177e4 | 1229 | { |
ad596171 | 1230 | update_wall_time(); |
1da177e4 LT |
1231 | calc_load(ticks); |
1232 | } | |
1233 | ||
1234 | /* | |
1235 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
1236 | * without sampling the sequence number in xtime_lock. | |
1237 | * jiffies is defined in the linker script... | |
1238 | */ | |
1239 | ||
3171a030 | 1240 | void do_timer(unsigned long ticks) |
1da177e4 | 1241 | { |
3171a030 AN |
1242 | jiffies_64 += ticks; |
1243 | update_times(ticks); | |
1da177e4 LT |
1244 | } |
1245 | ||
1246 | #ifdef __ARCH_WANT_SYS_ALARM | |
1247 | ||
1248 | /* | |
1249 | * For backwards compatibility? This can be done in libc so Alpha | |
1250 | * and all newer ports shouldn't need it. | |
1251 | */ | |
1252 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
1253 | { | |
c08b8a49 | 1254 | return alarm_setitimer(seconds); |
1da177e4 LT |
1255 | } |
1256 | ||
1257 | #endif | |
1258 | ||
1259 | #ifndef __alpha__ | |
1260 | ||
1261 | /* | |
1262 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
1263 | * should be moved into arch/i386 instead? | |
1264 | */ | |
1265 | ||
1266 | /** | |
1267 | * sys_getpid - return the thread group id of the current process | |
1268 | * | |
1269 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
1270 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
1271 | * which case the tgid is the same in all threads of the same group. | |
1272 | * | |
1273 | * This is SMP safe as current->tgid does not change. | |
1274 | */ | |
1275 | asmlinkage long sys_getpid(void) | |
1276 | { | |
1277 | return current->tgid; | |
1278 | } | |
1279 | ||
1280 | /* | |
6997a6fa KK |
1281 | * Accessing ->real_parent is not SMP-safe, it could |
1282 | * change from under us. However, we can use a stale | |
1283 | * value of ->real_parent under rcu_read_lock(), see | |
1284 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 LT |
1285 | */ |
1286 | asmlinkage long sys_getppid(void) | |
1287 | { | |
1288 | int pid; | |
1da177e4 | 1289 | |
6997a6fa KK |
1290 | rcu_read_lock(); |
1291 | pid = rcu_dereference(current->real_parent)->tgid; | |
1292 | rcu_read_unlock(); | |
1da177e4 | 1293 | |
1da177e4 LT |
1294 | return pid; |
1295 | } | |
1296 | ||
1297 | asmlinkage long sys_getuid(void) | |
1298 | { | |
1299 | /* Only we change this so SMP safe */ | |
1300 | return current->uid; | |
1301 | } | |
1302 | ||
1303 | asmlinkage long sys_geteuid(void) | |
1304 | { | |
1305 | /* Only we change this so SMP safe */ | |
1306 | return current->euid; | |
1307 | } | |
1308 | ||
1309 | asmlinkage long sys_getgid(void) | |
1310 | { | |
1311 | /* Only we change this so SMP safe */ | |
1312 | return current->gid; | |
1313 | } | |
1314 | ||
1315 | asmlinkage long sys_getegid(void) | |
1316 | { | |
1317 | /* Only we change this so SMP safe */ | |
1318 | return current->egid; | |
1319 | } | |
1320 | ||
1321 | #endif | |
1322 | ||
1323 | static void process_timeout(unsigned long __data) | |
1324 | { | |
36c8b586 | 1325 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1326 | } |
1327 | ||
1328 | /** | |
1329 | * schedule_timeout - sleep until timeout | |
1330 | * @timeout: timeout value in jiffies | |
1331 | * | |
1332 | * Make the current task sleep until @timeout jiffies have | |
1333 | * elapsed. The routine will return immediately unless | |
1334 | * the current task state has been set (see set_current_state()). | |
1335 | * | |
1336 | * You can set the task state as follows - | |
1337 | * | |
1338 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1339 | * pass before the routine returns. The routine will return 0 | |
1340 | * | |
1341 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1342 | * delivered to the current task. In this case the remaining time | |
1343 | * in jiffies will be returned, or 0 if the timer expired in time | |
1344 | * | |
1345 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1346 | * routine returns. | |
1347 | * | |
1348 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1349 | * the CPU away without a bound on the timeout. In this case the return | |
1350 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1351 | * | |
1352 | * In all cases the return value is guaranteed to be non-negative. | |
1353 | */ | |
1354 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1355 | { | |
1356 | struct timer_list timer; | |
1357 | unsigned long expire; | |
1358 | ||
1359 | switch (timeout) | |
1360 | { | |
1361 | case MAX_SCHEDULE_TIMEOUT: | |
1362 | /* | |
1363 | * These two special cases are useful to be comfortable | |
1364 | * in the caller. Nothing more. We could take | |
1365 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1366 | * but I' d like to return a valid offset (>=0) to allow | |
1367 | * the caller to do everything it want with the retval. | |
1368 | */ | |
1369 | schedule(); | |
1370 | goto out; | |
1371 | default: | |
1372 | /* | |
1373 | * Another bit of PARANOID. Note that the retval will be | |
1374 | * 0 since no piece of kernel is supposed to do a check | |
1375 | * for a negative retval of schedule_timeout() (since it | |
1376 | * should never happens anyway). You just have the printk() | |
1377 | * that will tell you if something is gone wrong and where. | |
1378 | */ | |
5b149bcc | 1379 | if (timeout < 0) { |
1da177e4 | 1380 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1381 | "value %lx\n", timeout); |
1382 | dump_stack(); | |
1da177e4 LT |
1383 | current->state = TASK_RUNNING; |
1384 | goto out; | |
1385 | } | |
1386 | } | |
1387 | ||
1388 | expire = timeout + jiffies; | |
1389 | ||
a8db2db1 ON |
1390 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1391 | __mod_timer(&timer, expire); | |
1da177e4 LT |
1392 | schedule(); |
1393 | del_singleshot_timer_sync(&timer); | |
1394 | ||
1395 | timeout = expire - jiffies; | |
1396 | ||
1397 | out: | |
1398 | return timeout < 0 ? 0 : timeout; | |
1399 | } | |
1da177e4 LT |
1400 | EXPORT_SYMBOL(schedule_timeout); |
1401 | ||
8a1c1757 AM |
1402 | /* |
1403 | * We can use __set_current_state() here because schedule_timeout() calls | |
1404 | * schedule() unconditionally. | |
1405 | */ | |
64ed93a2 NA |
1406 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1407 | { | |
a5a0d52c AM |
1408 | __set_current_state(TASK_INTERRUPTIBLE); |
1409 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1410 | } |
1411 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1412 | ||
1413 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |
1414 | { | |
a5a0d52c AM |
1415 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1416 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1417 | } |
1418 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1419 | ||
1da177e4 LT |
1420 | /* Thread ID - the internal kernel "pid" */ |
1421 | asmlinkage long sys_gettid(void) | |
1422 | { | |
1423 | return current->pid; | |
1424 | } | |
1425 | ||
2aae4a10 | 1426 | /** |
d4d23add | 1427 | * do_sysinfo - fill in sysinfo struct |
2aae4a10 | 1428 | * @info: pointer to buffer to fill |
1da177e4 | 1429 | */ |
d4d23add | 1430 | int do_sysinfo(struct sysinfo *info) |
1da177e4 | 1431 | { |
1da177e4 LT |
1432 | unsigned long mem_total, sav_total; |
1433 | unsigned int mem_unit, bitcount; | |
1434 | unsigned long seq; | |
1435 | ||
d4d23add | 1436 | memset(info, 0, sizeof(struct sysinfo)); |
1da177e4 LT |
1437 | |
1438 | do { | |
1439 | struct timespec tp; | |
1440 | seq = read_seqbegin(&xtime_lock); | |
1441 | ||
1442 | /* | |
1443 | * This is annoying. The below is the same thing | |
1444 | * posix_get_clock_monotonic() does, but it wants to | |
1445 | * take the lock which we want to cover the loads stuff | |
1446 | * too. | |
1447 | */ | |
1448 | ||
1449 | getnstimeofday(&tp); | |
1450 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1451 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
1452 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | |
1453 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1454 | tp.tv_sec++; | |
1455 | } | |
d4d23add | 1456 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
1da177e4 | 1457 | |
d4d23add KM |
1458 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); |
1459 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1460 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1da177e4 | 1461 | |
d4d23add | 1462 | info->procs = nr_threads; |
1da177e4 LT |
1463 | } while (read_seqretry(&xtime_lock, seq)); |
1464 | ||
d4d23add KM |
1465 | si_meminfo(info); |
1466 | si_swapinfo(info); | |
1da177e4 LT |
1467 | |
1468 | /* | |
1469 | * If the sum of all the available memory (i.e. ram + swap) | |
1470 | * is less than can be stored in a 32 bit unsigned long then | |
1471 | * we can be binary compatible with 2.2.x kernels. If not, | |
1472 | * well, in that case 2.2.x was broken anyways... | |
1473 | * | |
1474 | * -Erik Andersen <andersee@debian.org> | |
1475 | */ | |
1476 | ||
d4d23add KM |
1477 | mem_total = info->totalram + info->totalswap; |
1478 | if (mem_total < info->totalram || mem_total < info->totalswap) | |
1da177e4 LT |
1479 | goto out; |
1480 | bitcount = 0; | |
d4d23add | 1481 | mem_unit = info->mem_unit; |
1da177e4 LT |
1482 | while (mem_unit > 1) { |
1483 | bitcount++; | |
1484 | mem_unit >>= 1; | |
1485 | sav_total = mem_total; | |
1486 | mem_total <<= 1; | |
1487 | if (mem_total < sav_total) | |
1488 | goto out; | |
1489 | } | |
1490 | ||
1491 | /* | |
1492 | * If mem_total did not overflow, multiply all memory values by | |
d4d23add | 1493 | * info->mem_unit and set it to 1. This leaves things compatible |
1da177e4 LT |
1494 | * with 2.2.x, and also retains compatibility with earlier 2.4.x |
1495 | * kernels... | |
1496 | */ | |
1497 | ||
d4d23add KM |
1498 | info->mem_unit = 1; |
1499 | info->totalram <<= bitcount; | |
1500 | info->freeram <<= bitcount; | |
1501 | info->sharedram <<= bitcount; | |
1502 | info->bufferram <<= bitcount; | |
1503 | info->totalswap <<= bitcount; | |
1504 | info->freeswap <<= bitcount; | |
1505 | info->totalhigh <<= bitcount; | |
1506 | info->freehigh <<= bitcount; | |
1507 | ||
1508 | out: | |
1509 | return 0; | |
1510 | } | |
1511 | ||
1512 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1513 | { | |
1514 | struct sysinfo val; | |
1515 | ||
1516 | do_sysinfo(&val); | |
1da177e4 | 1517 | |
1da177e4 LT |
1518 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) |
1519 | return -EFAULT; | |
1520 | ||
1521 | return 0; | |
1522 | } | |
1523 | ||
d730e882 IM |
1524 | /* |
1525 | * lockdep: we want to track each per-CPU base as a separate lock-class, | |
1526 | * but timer-bases are kmalloc()-ed, so we need to attach separate | |
1527 | * keys to them: | |
1528 | */ | |
1529 | static struct lock_class_key base_lock_keys[NR_CPUS]; | |
1530 | ||
a4a6198b | 1531 | static int __devinit init_timers_cpu(int cpu) |
1da177e4 LT |
1532 | { |
1533 | int j; | |
1534 | tvec_base_t *base; | |
ba6edfcd | 1535 | static char __devinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1536 | |
ba6edfcd | 1537 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1538 | static char boot_done; |
1539 | ||
a4a6198b | 1540 | if (boot_done) { |
ba6edfcd AM |
1541 | /* |
1542 | * The APs use this path later in boot | |
1543 | */ | |
a4a6198b JB |
1544 | base = kmalloc_node(sizeof(*base), GFP_KERNEL, |
1545 | cpu_to_node(cpu)); | |
1546 | if (!base) | |
1547 | return -ENOMEM; | |
1548 | memset(base, 0, sizeof(*base)); | |
ba6edfcd | 1549 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1550 | } else { |
ba6edfcd AM |
1551 | /* |
1552 | * This is for the boot CPU - we use compile-time | |
1553 | * static initialisation because per-cpu memory isn't | |
1554 | * ready yet and because the memory allocators are not | |
1555 | * initialised either. | |
1556 | */ | |
a4a6198b | 1557 | boot_done = 1; |
ba6edfcd | 1558 | base = &boot_tvec_bases; |
a4a6198b | 1559 | } |
ba6edfcd AM |
1560 | tvec_base_done[cpu] = 1; |
1561 | } else { | |
1562 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1563 | } |
ba6edfcd | 1564 | |
3691c519 | 1565 | spin_lock_init(&base->lock); |
d730e882 IM |
1566 | lockdep_set_class(&base->lock, base_lock_keys + cpu); |
1567 | ||
1da177e4 LT |
1568 | for (j = 0; j < TVN_SIZE; j++) { |
1569 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1570 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1571 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1572 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1573 | } | |
1574 | for (j = 0; j < TVR_SIZE; j++) | |
1575 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1576 | ||
1577 | base->timer_jiffies = jiffies; | |
a4a6198b | 1578 | return 0; |
1da177e4 LT |
1579 | } |
1580 | ||
1581 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1582 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1583 | { |
1584 | struct timer_list *timer; | |
1585 | ||
1586 | while (!list_empty(head)) { | |
1587 | timer = list_entry(head->next, struct timer_list, entry); | |
55c888d6 | 1588 | detach_timer(timer, 0); |
3691c519 | 1589 | timer->base = new_base; |
1da177e4 | 1590 | internal_add_timer(new_base, timer); |
1da177e4 | 1591 | } |
1da177e4 LT |
1592 | } |
1593 | ||
1594 | static void __devinit migrate_timers(int cpu) | |
1595 | { | |
1596 | tvec_base_t *old_base; | |
1597 | tvec_base_t *new_base; | |
1598 | int i; | |
1599 | ||
1600 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1601 | old_base = per_cpu(tvec_bases, cpu); |
1602 | new_base = get_cpu_var(tvec_bases); | |
1da177e4 LT |
1603 | |
1604 | local_irq_disable(); | |
3691c519 ON |
1605 | spin_lock(&new_base->lock); |
1606 | spin_lock(&old_base->lock); | |
1607 | ||
1608 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1609 | |
1da177e4 | 1610 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1611 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1612 | for (i = 0; i < TVN_SIZE; i++) { | |
1613 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1614 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1615 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1616 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1617 | } | |
1618 | ||
3691c519 ON |
1619 | spin_unlock(&old_base->lock); |
1620 | spin_unlock(&new_base->lock); | |
1da177e4 LT |
1621 | local_irq_enable(); |
1622 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1623 | } |
1624 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1625 | ||
8c78f307 | 1626 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1627 | unsigned long action, void *hcpu) |
1628 | { | |
1629 | long cpu = (long)hcpu; | |
1630 | switch(action) { | |
1631 | case CPU_UP_PREPARE: | |
a4a6198b JB |
1632 | if (init_timers_cpu(cpu) < 0) |
1633 | return NOTIFY_BAD; | |
1da177e4 LT |
1634 | break; |
1635 | #ifdef CONFIG_HOTPLUG_CPU | |
1636 | case CPU_DEAD: | |
1637 | migrate_timers(cpu); | |
1638 | break; | |
1639 | #endif | |
1640 | default: | |
1641 | break; | |
1642 | } | |
1643 | return NOTIFY_OK; | |
1644 | } | |
1645 | ||
8c78f307 | 1646 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1647 | .notifier_call = timer_cpu_notify, |
1648 | }; | |
1649 | ||
1650 | ||
1651 | void __init init_timers(void) | |
1652 | { | |
07dccf33 | 1653 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1da177e4 | 1654 | (void *)(long)smp_processor_id()); |
07dccf33 AM |
1655 | |
1656 | BUG_ON(err == NOTIFY_BAD); | |
1da177e4 LT |
1657 | register_cpu_notifier(&timers_nb); |
1658 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1659 | } | |
1660 | ||
1661 | #ifdef CONFIG_TIME_INTERPOLATION | |
1662 | ||
67890d70 CL |
1663 | struct time_interpolator *time_interpolator __read_mostly; |
1664 | static struct time_interpolator *time_interpolator_list __read_mostly; | |
1da177e4 LT |
1665 | static DEFINE_SPINLOCK(time_interpolator_lock); |
1666 | ||
3db5db4f | 1667 | static inline cycles_t time_interpolator_get_cycles(unsigned int src) |
1da177e4 LT |
1668 | { |
1669 | unsigned long (*x)(void); | |
1670 | ||
1671 | switch (src) | |
1672 | { | |
1673 | case TIME_SOURCE_FUNCTION: | |
1674 | x = time_interpolator->addr; | |
1675 | return x(); | |
1676 | ||
1677 | case TIME_SOURCE_MMIO64 : | |
685db65e | 1678 | return readq_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1679 | |
1680 | case TIME_SOURCE_MMIO32 : | |
685db65e | 1681 | return readl_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1682 | |
1683 | default: return get_cycles(); | |
1684 | } | |
1685 | } | |
1686 | ||
486d46ae | 1687 | static inline u64 time_interpolator_get_counter(int writelock) |
1da177e4 LT |
1688 | { |
1689 | unsigned int src = time_interpolator->source; | |
1690 | ||
1691 | if (time_interpolator->jitter) | |
1692 | { | |
3db5db4f HD |
1693 | cycles_t lcycle; |
1694 | cycles_t now; | |
1da177e4 LT |
1695 | |
1696 | do { | |
1697 | lcycle = time_interpolator->last_cycle; | |
1698 | now = time_interpolator_get_cycles(src); | |
1699 | if (lcycle && time_after(lcycle, now)) | |
1700 | return lcycle; | |
486d46ae AW |
1701 | |
1702 | /* When holding the xtime write lock, there's no need | |
1703 | * to add the overhead of the cmpxchg. Readers are | |
1704 | * force to retry until the write lock is released. | |
1705 | */ | |
1706 | if (writelock) { | |
1707 | time_interpolator->last_cycle = now; | |
1708 | return now; | |
1709 | } | |
1da177e4 LT |
1710 | /* Keep track of the last timer value returned. The use of cmpxchg here |
1711 | * will cause contention in an SMP environment. | |
1712 | */ | |
1713 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | |
1714 | return now; | |
1715 | } | |
1716 | else | |
1717 | return time_interpolator_get_cycles(src); | |
1718 | } | |
1719 | ||
1720 | void time_interpolator_reset(void) | |
1721 | { | |
1722 | time_interpolator->offset = 0; | |
486d46ae | 1723 | time_interpolator->last_counter = time_interpolator_get_counter(1); |
1da177e4 LT |
1724 | } |
1725 | ||
1726 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | |
1727 | ||
1728 | unsigned long time_interpolator_get_offset(void) | |
1729 | { | |
1730 | /* If we do not have a time interpolator set up then just return zero */ | |
1731 | if (!time_interpolator) | |
1732 | return 0; | |
1733 | ||
1734 | return time_interpolator->offset + | |
486d46ae | 1735 | GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator); |
1da177e4 LT |
1736 | } |
1737 | ||
1738 | #define INTERPOLATOR_ADJUST 65536 | |
1739 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | |
1740 | ||
4c7ee8de | 1741 | void time_interpolator_update(long delta_nsec) |
1da177e4 LT |
1742 | { |
1743 | u64 counter; | |
1744 | unsigned long offset; | |
1745 | ||
1746 | /* If there is no time interpolator set up then do nothing */ | |
1747 | if (!time_interpolator) | |
1748 | return; | |
1749 | ||
a5a0d52c AM |
1750 | /* |
1751 | * The interpolator compensates for late ticks by accumulating the late | |
1752 | * time in time_interpolator->offset. A tick earlier than expected will | |
1753 | * lead to a reset of the offset and a corresponding jump of the clock | |
1754 | * forward. Again this only works if the interpolator clock is running | |
1755 | * slightly slower than the regular clock and the tuning logic insures | |
1756 | * that. | |
1757 | */ | |
1da177e4 | 1758 | |
486d46ae | 1759 | counter = time_interpolator_get_counter(1); |
a5a0d52c AM |
1760 | offset = time_interpolator->offset + |
1761 | GET_TI_NSECS(counter, time_interpolator); | |
1da177e4 LT |
1762 | |
1763 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | |
1764 | time_interpolator->offset = offset - delta_nsec; | |
1765 | else { | |
1766 | time_interpolator->skips++; | |
1767 | time_interpolator->ns_skipped += delta_nsec - offset; | |
1768 | time_interpolator->offset = 0; | |
1769 | } | |
1770 | time_interpolator->last_counter = counter; | |
1771 | ||
1772 | /* Tuning logic for time interpolator invoked every minute or so. | |
1773 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | |
1774 | * Increase interpolator clock speed if we skip too much time. | |
1775 | */ | |
1776 | if (jiffies % INTERPOLATOR_ADJUST == 0) | |
1777 | { | |
b20367a6 | 1778 | if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) |
1da177e4 LT |
1779 | time_interpolator->nsec_per_cyc--; |
1780 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | |
1781 | time_interpolator->nsec_per_cyc++; | |
1782 | time_interpolator->skips = 0; | |
1783 | time_interpolator->ns_skipped = 0; | |
1784 | } | |
1785 | } | |
1786 | ||
1787 | static inline int | |
1788 | is_better_time_interpolator(struct time_interpolator *new) | |
1789 | { | |
1790 | if (!time_interpolator) | |
1791 | return 1; | |
1792 | return new->frequency > 2*time_interpolator->frequency || | |
1793 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | |
1794 | } | |
1795 | ||
1796 | void | |
1797 | register_time_interpolator(struct time_interpolator *ti) | |
1798 | { | |
1799 | unsigned long flags; | |
1800 | ||
1801 | /* Sanity check */ | |
9f31252c | 1802 | BUG_ON(ti->frequency == 0 || ti->mask == 0); |
1da177e4 LT |
1803 | |
1804 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | |
1805 | spin_lock(&time_interpolator_lock); | |
1806 | write_seqlock_irqsave(&xtime_lock, flags); | |
1807 | if (is_better_time_interpolator(ti)) { | |
1808 | time_interpolator = ti; | |
1809 | time_interpolator_reset(); | |
1810 | } | |
1811 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1812 | ||
1813 | ti->next = time_interpolator_list; | |
1814 | time_interpolator_list = ti; | |
1815 | spin_unlock(&time_interpolator_lock); | |
1816 | } | |
1817 | ||
1818 | void | |
1819 | unregister_time_interpolator(struct time_interpolator *ti) | |
1820 | { | |
1821 | struct time_interpolator *curr, **prev; | |
1822 | unsigned long flags; | |
1823 | ||
1824 | spin_lock(&time_interpolator_lock); | |
1825 | prev = &time_interpolator_list; | |
1826 | for (curr = *prev; curr; curr = curr->next) { | |
1827 | if (curr == ti) { | |
1828 | *prev = curr->next; | |
1829 | break; | |
1830 | } | |
1831 | prev = &curr->next; | |
1832 | } | |
1833 | ||
1834 | write_seqlock_irqsave(&xtime_lock, flags); | |
1835 | if (ti == time_interpolator) { | |
1836 | /* we lost the best time-interpolator: */ | |
1837 | time_interpolator = NULL; | |
1838 | /* find the next-best interpolator */ | |
1839 | for (curr = time_interpolator_list; curr; curr = curr->next) | |
1840 | if (is_better_time_interpolator(curr)) | |
1841 | time_interpolator = curr; | |
1842 | time_interpolator_reset(); | |
1843 | } | |
1844 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1845 | spin_unlock(&time_interpolator_lock); | |
1846 | } | |
1847 | #endif /* CONFIG_TIME_INTERPOLATION */ | |
1848 | ||
1849 | /** | |
1850 | * msleep - sleep safely even with waitqueue interruptions | |
1851 | * @msecs: Time in milliseconds to sleep for | |
1852 | */ | |
1853 | void msleep(unsigned int msecs) | |
1854 | { | |
1855 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1856 | ||
75bcc8c5 NA |
1857 | while (timeout) |
1858 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1859 | } |
1860 | ||
1861 | EXPORT_SYMBOL(msleep); | |
1862 | ||
1863 | /** | |
96ec3efd | 1864 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1865 | * @msecs: Time in milliseconds to sleep for |
1866 | */ | |
1867 | unsigned long msleep_interruptible(unsigned int msecs) | |
1868 | { | |
1869 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1870 | ||
75bcc8c5 NA |
1871 | while (timeout && !signal_pending(current)) |
1872 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1873 | return jiffies_to_msecs(timeout); |
1874 | } | |
1875 | ||
1876 | EXPORT_SYMBOL(msleep_interruptible); |