]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4 | * Kernel internal timers, kernel timekeeping, basic process system calls | |
5 | * | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
97a41e26 | 36 | #include <linux/delay.h> |
1da177e4 LT |
37 | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/unistd.h> | |
40 | #include <asm/div64.h> | |
41 | #include <asm/timex.h> | |
42 | #include <asm/io.h> | |
43 | ||
44 | #ifdef CONFIG_TIME_INTERPOLATION | |
45 | static void time_interpolator_update(long delta_nsec); | |
46 | #else | |
47 | #define time_interpolator_update(x) | |
48 | #endif | |
49 | ||
ecea8d19 TG |
50 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
51 | ||
52 | EXPORT_SYMBOL(jiffies_64); | |
53 | ||
1da177e4 LT |
54 | /* |
55 | * per-CPU timer vector definitions: | |
56 | */ | |
1da177e4 LT |
57 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
58 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
59 | #define TVN_SIZE (1 << TVN_BITS) | |
60 | #define TVR_SIZE (1 << TVR_BITS) | |
61 | #define TVN_MASK (TVN_SIZE - 1) | |
62 | #define TVR_MASK (TVR_SIZE - 1) | |
63 | ||
64 | typedef struct tvec_s { | |
65 | struct list_head vec[TVN_SIZE]; | |
66 | } tvec_t; | |
67 | ||
68 | typedef struct tvec_root_s { | |
69 | struct list_head vec[TVR_SIZE]; | |
70 | } tvec_root_t; | |
71 | ||
72 | struct tvec_t_base_s { | |
3691c519 ON |
73 | spinlock_t lock; |
74 | struct timer_list *running_timer; | |
1da177e4 | 75 | unsigned long timer_jiffies; |
1da177e4 LT |
76 | tvec_root_t tv1; |
77 | tvec_t tv2; | |
78 | tvec_t tv3; | |
79 | tvec_t tv4; | |
80 | tvec_t tv5; | |
81 | } ____cacheline_aligned_in_smp; | |
82 | ||
83 | typedef struct tvec_t_base_s tvec_base_t; | |
ba6edfcd | 84 | |
3691c519 ON |
85 | tvec_base_t boot_tvec_bases; |
86 | EXPORT_SYMBOL(boot_tvec_bases); | |
51d8c5ed | 87 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
1da177e4 LT |
88 | |
89 | static inline void set_running_timer(tvec_base_t *base, | |
90 | struct timer_list *timer) | |
91 | { | |
92 | #ifdef CONFIG_SMP | |
3691c519 | 93 | base->running_timer = timer; |
1da177e4 LT |
94 | #endif |
95 | } | |
96 | ||
1da177e4 LT |
97 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
98 | { | |
99 | unsigned long expires = timer->expires; | |
100 | unsigned long idx = expires - base->timer_jiffies; | |
101 | struct list_head *vec; | |
102 | ||
103 | if (idx < TVR_SIZE) { | |
104 | int i = expires & TVR_MASK; | |
105 | vec = base->tv1.vec + i; | |
106 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
107 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
108 | vec = base->tv2.vec + i; | |
109 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
110 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
111 | vec = base->tv3.vec + i; | |
112 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
113 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
114 | vec = base->tv4.vec + i; | |
115 | } else if ((signed long) idx < 0) { | |
116 | /* | |
117 | * Can happen if you add a timer with expires == jiffies, | |
118 | * or you set a timer to go off in the past | |
119 | */ | |
120 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
121 | } else { | |
122 | int i; | |
123 | /* If the timeout is larger than 0xffffffff on 64-bit | |
124 | * architectures then we use the maximum timeout: | |
125 | */ | |
126 | if (idx > 0xffffffffUL) { | |
127 | idx = 0xffffffffUL; | |
128 | expires = idx + base->timer_jiffies; | |
129 | } | |
130 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
131 | vec = base->tv5.vec + i; | |
132 | } | |
133 | /* | |
134 | * Timers are FIFO: | |
135 | */ | |
136 | list_add_tail(&timer->entry, vec); | |
137 | } | |
138 | ||
2aae4a10 | 139 | /** |
55c888d6 ON |
140 | * init_timer - initialize a timer. |
141 | * @timer: the timer to be initialized | |
142 | * | |
143 | * init_timer() must be done to a timer prior calling *any* of the | |
144 | * other timer functions. | |
145 | */ | |
146 | void fastcall init_timer(struct timer_list *timer) | |
147 | { | |
148 | timer->entry.next = NULL; | |
bfe5d834 | 149 | timer->base = __raw_get_cpu_var(tvec_bases); |
55c888d6 ON |
150 | } |
151 | EXPORT_SYMBOL(init_timer); | |
152 | ||
153 | static inline void detach_timer(struct timer_list *timer, | |
154 | int clear_pending) | |
155 | { | |
156 | struct list_head *entry = &timer->entry; | |
157 | ||
158 | __list_del(entry->prev, entry->next); | |
159 | if (clear_pending) | |
160 | entry->next = NULL; | |
161 | entry->prev = LIST_POISON2; | |
162 | } | |
163 | ||
164 | /* | |
3691c519 | 165 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
166 | * means that all timers which are tied to this base via timer->base are |
167 | * locked, and the base itself is locked too. | |
168 | * | |
169 | * So __run_timers/migrate_timers can safely modify all timers which could | |
170 | * be found on ->tvX lists. | |
171 | * | |
172 | * When the timer's base is locked, and the timer removed from list, it is | |
173 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
174 | * locked. | |
175 | */ | |
3691c519 | 176 | static tvec_base_t *lock_timer_base(struct timer_list *timer, |
55c888d6 | 177 | unsigned long *flags) |
89e7e374 | 178 | __acquires(timer->base->lock) |
55c888d6 | 179 | { |
3691c519 | 180 | tvec_base_t *base; |
55c888d6 ON |
181 | |
182 | for (;;) { | |
183 | base = timer->base; | |
184 | if (likely(base != NULL)) { | |
185 | spin_lock_irqsave(&base->lock, *flags); | |
186 | if (likely(base == timer->base)) | |
187 | return base; | |
188 | /* The timer has migrated to another CPU */ | |
189 | spin_unlock_irqrestore(&base->lock, *flags); | |
190 | } | |
191 | cpu_relax(); | |
192 | } | |
193 | } | |
194 | ||
1da177e4 LT |
195 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
196 | { | |
3691c519 | 197 | tvec_base_t *base, *new_base; |
1da177e4 LT |
198 | unsigned long flags; |
199 | int ret = 0; | |
200 | ||
201 | BUG_ON(!timer->function); | |
1da177e4 | 202 | |
55c888d6 ON |
203 | base = lock_timer_base(timer, &flags); |
204 | ||
205 | if (timer_pending(timer)) { | |
206 | detach_timer(timer, 0); | |
207 | ret = 1; | |
208 | } | |
209 | ||
a4a6198b | 210 | new_base = __get_cpu_var(tvec_bases); |
1da177e4 | 211 | |
3691c519 | 212 | if (base != new_base) { |
1da177e4 | 213 | /* |
55c888d6 ON |
214 | * We are trying to schedule the timer on the local CPU. |
215 | * However we can't change timer's base while it is running, | |
216 | * otherwise del_timer_sync() can't detect that the timer's | |
217 | * handler yet has not finished. This also guarantees that | |
218 | * the timer is serialized wrt itself. | |
1da177e4 | 219 | */ |
a2c348fe | 220 | if (likely(base->running_timer != timer)) { |
55c888d6 ON |
221 | /* See the comment in lock_timer_base() */ |
222 | timer->base = NULL; | |
223 | spin_unlock(&base->lock); | |
a2c348fe ON |
224 | base = new_base; |
225 | spin_lock(&base->lock); | |
226 | timer->base = base; | |
1da177e4 LT |
227 | } |
228 | } | |
229 | ||
1da177e4 | 230 | timer->expires = expires; |
a2c348fe ON |
231 | internal_add_timer(base, timer); |
232 | spin_unlock_irqrestore(&base->lock, flags); | |
1da177e4 LT |
233 | |
234 | return ret; | |
235 | } | |
236 | ||
237 | EXPORT_SYMBOL(__mod_timer); | |
238 | ||
2aae4a10 | 239 | /** |
1da177e4 LT |
240 | * add_timer_on - start a timer on a particular CPU |
241 | * @timer: the timer to be added | |
242 | * @cpu: the CPU to start it on | |
243 | * | |
244 | * This is not very scalable on SMP. Double adds are not possible. | |
245 | */ | |
246 | void add_timer_on(struct timer_list *timer, int cpu) | |
247 | { | |
a4a6198b | 248 | tvec_base_t *base = per_cpu(tvec_bases, cpu); |
1da177e4 | 249 | unsigned long flags; |
55c888d6 | 250 | |
1da177e4 | 251 | BUG_ON(timer_pending(timer) || !timer->function); |
3691c519 ON |
252 | spin_lock_irqsave(&base->lock, flags); |
253 | timer->base = base; | |
1da177e4 | 254 | internal_add_timer(base, timer); |
3691c519 | 255 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
256 | } |
257 | ||
258 | ||
2aae4a10 | 259 | /** |
1da177e4 LT |
260 | * mod_timer - modify a timer's timeout |
261 | * @timer: the timer to be modified | |
2aae4a10 | 262 | * @expires: new timeout in jiffies |
1da177e4 LT |
263 | * |
264 | * mod_timer is a more efficient way to update the expire field of an | |
265 | * active timer (if the timer is inactive it will be activated) | |
266 | * | |
267 | * mod_timer(timer, expires) is equivalent to: | |
268 | * | |
269 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
270 | * | |
271 | * Note that if there are multiple unserialized concurrent users of the | |
272 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
273 | * since add_timer() cannot modify an already running timer. | |
274 | * | |
275 | * The function returns whether it has modified a pending timer or not. | |
276 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
277 | * active timer returns 1.) | |
278 | */ | |
279 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
280 | { | |
281 | BUG_ON(!timer->function); | |
282 | ||
1da177e4 LT |
283 | /* |
284 | * This is a common optimization triggered by the | |
285 | * networking code - if the timer is re-modified | |
286 | * to be the same thing then just return: | |
287 | */ | |
288 | if (timer->expires == expires && timer_pending(timer)) | |
289 | return 1; | |
290 | ||
291 | return __mod_timer(timer, expires); | |
292 | } | |
293 | ||
294 | EXPORT_SYMBOL(mod_timer); | |
295 | ||
2aae4a10 | 296 | /** |
1da177e4 LT |
297 | * del_timer - deactive a timer. |
298 | * @timer: the timer to be deactivated | |
299 | * | |
300 | * del_timer() deactivates a timer - this works on both active and inactive | |
301 | * timers. | |
302 | * | |
303 | * The function returns whether it has deactivated a pending timer or not. | |
304 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
305 | * active timer returns 1.) | |
306 | */ | |
307 | int del_timer(struct timer_list *timer) | |
308 | { | |
3691c519 | 309 | tvec_base_t *base; |
1da177e4 | 310 | unsigned long flags; |
55c888d6 | 311 | int ret = 0; |
1da177e4 | 312 | |
55c888d6 ON |
313 | if (timer_pending(timer)) { |
314 | base = lock_timer_base(timer, &flags); | |
315 | if (timer_pending(timer)) { | |
316 | detach_timer(timer, 1); | |
317 | ret = 1; | |
318 | } | |
1da177e4 | 319 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 320 | } |
1da177e4 | 321 | |
55c888d6 | 322 | return ret; |
1da177e4 LT |
323 | } |
324 | ||
325 | EXPORT_SYMBOL(del_timer); | |
326 | ||
327 | #ifdef CONFIG_SMP | |
2aae4a10 REB |
328 | /** |
329 | * try_to_del_timer_sync - Try to deactivate a timer | |
330 | * @timer: timer do del | |
331 | * | |
fd450b73 ON |
332 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
333 | * exit the timer is not queued and the handler is not running on any CPU. | |
334 | * | |
335 | * It must not be called from interrupt contexts. | |
336 | */ | |
337 | int try_to_del_timer_sync(struct timer_list *timer) | |
338 | { | |
3691c519 | 339 | tvec_base_t *base; |
fd450b73 ON |
340 | unsigned long flags; |
341 | int ret = -1; | |
342 | ||
343 | base = lock_timer_base(timer, &flags); | |
344 | ||
345 | if (base->running_timer == timer) | |
346 | goto out; | |
347 | ||
348 | ret = 0; | |
349 | if (timer_pending(timer)) { | |
350 | detach_timer(timer, 1); | |
351 | ret = 1; | |
352 | } | |
353 | out: | |
354 | spin_unlock_irqrestore(&base->lock, flags); | |
355 | ||
356 | return ret; | |
357 | } | |
358 | ||
2aae4a10 | 359 | /** |
1da177e4 LT |
360 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
361 | * @timer: the timer to be deactivated | |
362 | * | |
363 | * This function only differs from del_timer() on SMP: besides deactivating | |
364 | * the timer it also makes sure the handler has finished executing on other | |
365 | * CPUs. | |
366 | * | |
367 | * Synchronization rules: callers must prevent restarting of the timer, | |
368 | * otherwise this function is meaningless. It must not be called from | |
369 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
370 | * completion of the timer's handler. The timer's handler must not call |
371 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
372 | * not running on any CPU. | |
1da177e4 LT |
373 | * |
374 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
375 | */ |
376 | int del_timer_sync(struct timer_list *timer) | |
377 | { | |
fd450b73 ON |
378 | for (;;) { |
379 | int ret = try_to_del_timer_sync(timer); | |
380 | if (ret >= 0) | |
381 | return ret; | |
a0009652 | 382 | cpu_relax(); |
fd450b73 | 383 | } |
1da177e4 | 384 | } |
1da177e4 | 385 | |
55c888d6 | 386 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
387 | #endif |
388 | ||
389 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
390 | { | |
391 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
392 | struct timer_list *timer, *tmp; |
393 | struct list_head tv_list; | |
394 | ||
395 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 396 | |
1da177e4 | 397 | /* |
3439dd86 P |
398 | * We are removing _all_ timers from the list, so we |
399 | * don't have to detach them individually. | |
1da177e4 | 400 | */ |
3439dd86 P |
401 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
402 | BUG_ON(timer->base != base); | |
403 | internal_add_timer(base, timer); | |
1da177e4 | 404 | } |
1da177e4 LT |
405 | |
406 | return index; | |
407 | } | |
408 | ||
2aae4a10 REB |
409 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
410 | ||
411 | /** | |
1da177e4 LT |
412 | * __run_timers - run all expired timers (if any) on this CPU. |
413 | * @base: the timer vector to be processed. | |
414 | * | |
415 | * This function cascades all vectors and executes all expired timer | |
416 | * vectors. | |
417 | */ | |
1da177e4 LT |
418 | static inline void __run_timers(tvec_base_t *base) |
419 | { | |
420 | struct timer_list *timer; | |
421 | ||
3691c519 | 422 | spin_lock_irq(&base->lock); |
1da177e4 | 423 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 424 | struct list_head work_list; |
1da177e4 LT |
425 | struct list_head *head = &work_list; |
426 | int index = base->timer_jiffies & TVR_MASK; | |
626ab0e6 | 427 | |
1da177e4 LT |
428 | /* |
429 | * Cascade timers: | |
430 | */ | |
431 | if (!index && | |
432 | (!cascade(base, &base->tv2, INDEX(0))) && | |
433 | (!cascade(base, &base->tv3, INDEX(1))) && | |
434 | !cascade(base, &base->tv4, INDEX(2))) | |
435 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
436 | ++base->timer_jiffies; |
437 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 438 | while (!list_empty(head)) { |
1da177e4 LT |
439 | void (*fn)(unsigned long); |
440 | unsigned long data; | |
441 | ||
442 | timer = list_entry(head->next,struct timer_list,entry); | |
443 | fn = timer->function; | |
444 | data = timer->data; | |
445 | ||
1da177e4 | 446 | set_running_timer(base, timer); |
55c888d6 | 447 | detach_timer(timer, 1); |
3691c519 | 448 | spin_unlock_irq(&base->lock); |
1da177e4 | 449 | { |
be5b4fbd | 450 | int preempt_count = preempt_count(); |
1da177e4 LT |
451 | fn(data); |
452 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
453 | printk(KERN_WARNING "huh, entered %p " |
454 | "with preempt_count %08x, exited" | |
455 | " with %08x?\n", | |
456 | fn, preempt_count, | |
457 | preempt_count()); | |
1da177e4 LT |
458 | BUG(); |
459 | } | |
460 | } | |
3691c519 | 461 | spin_lock_irq(&base->lock); |
1da177e4 LT |
462 | } |
463 | } | |
464 | set_running_timer(base, NULL); | |
3691c519 | 465 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
466 | } |
467 | ||
468 | #ifdef CONFIG_NO_IDLE_HZ | |
469 | /* | |
470 | * Find out when the next timer event is due to happen. This | |
471 | * is used on S/390 to stop all activity when a cpus is idle. | |
472 | * This functions needs to be called disabled. | |
473 | */ | |
474 | unsigned long next_timer_interrupt(void) | |
475 | { | |
476 | tvec_base_t *base; | |
477 | struct list_head *list; | |
478 | struct timer_list *nte; | |
479 | unsigned long expires; | |
69239749 TL |
480 | unsigned long hr_expires = MAX_JIFFY_OFFSET; |
481 | ktime_t hr_delta; | |
1da177e4 LT |
482 | tvec_t *varray[4]; |
483 | int i, j; | |
484 | ||
69239749 TL |
485 | hr_delta = hrtimer_get_next_event(); |
486 | if (hr_delta.tv64 != KTIME_MAX) { | |
487 | struct timespec tsdelta; | |
488 | tsdelta = ktime_to_timespec(hr_delta); | |
489 | hr_expires = timespec_to_jiffies(&tsdelta); | |
490 | if (hr_expires < 3) | |
491 | return hr_expires + jiffies; | |
492 | } | |
493 | hr_expires += jiffies; | |
494 | ||
a4a6198b | 495 | base = __get_cpu_var(tvec_bases); |
3691c519 | 496 | spin_lock(&base->lock); |
1da177e4 | 497 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
53f087fe | 498 | list = NULL; |
1da177e4 LT |
499 | |
500 | /* Look for timer events in tv1. */ | |
501 | j = base->timer_jiffies & TVR_MASK; | |
502 | do { | |
503 | list_for_each_entry(nte, base->tv1.vec + j, entry) { | |
504 | expires = nte->expires; | |
505 | if (j < (base->timer_jiffies & TVR_MASK)) | |
506 | list = base->tv2.vec + (INDEX(0)); | |
507 | goto found; | |
508 | } | |
509 | j = (j + 1) & TVR_MASK; | |
510 | } while (j != (base->timer_jiffies & TVR_MASK)); | |
511 | ||
512 | /* Check tv2-tv5. */ | |
513 | varray[0] = &base->tv2; | |
514 | varray[1] = &base->tv3; | |
515 | varray[2] = &base->tv4; | |
516 | varray[3] = &base->tv5; | |
517 | for (i = 0; i < 4; i++) { | |
518 | j = INDEX(i); | |
519 | do { | |
520 | if (list_empty(varray[i]->vec + j)) { | |
521 | j = (j + 1) & TVN_MASK; | |
522 | continue; | |
523 | } | |
524 | list_for_each_entry(nte, varray[i]->vec + j, entry) | |
525 | if (time_before(nte->expires, expires)) | |
526 | expires = nte->expires; | |
527 | if (j < (INDEX(i)) && i < 3) | |
528 | list = varray[i + 1]->vec + (INDEX(i + 1)); | |
529 | goto found; | |
530 | } while (j != (INDEX(i))); | |
531 | } | |
532 | found: | |
533 | if (list) { | |
534 | /* | |
535 | * The search wrapped. We need to look at the next list | |
536 | * from next tv element that would cascade into tv element | |
537 | * where we found the timer element. | |
538 | */ | |
539 | list_for_each_entry(nte, list, entry) { | |
540 | if (time_before(nte->expires, expires)) | |
541 | expires = nte->expires; | |
542 | } | |
543 | } | |
3691c519 | 544 | spin_unlock(&base->lock); |
69239749 | 545 | |
0662b713 ZA |
546 | /* |
547 | * It can happen that other CPUs service timer IRQs and increment | |
548 | * jiffies, but we have not yet got a local timer tick to process | |
549 | * the timer wheels. In that case, the expiry time can be before | |
550 | * jiffies, but since the high-resolution timer here is relative to | |
551 | * jiffies, the default expression when high-resolution timers are | |
552 | * not active, | |
553 | * | |
554 | * time_before(MAX_JIFFY_OFFSET + jiffies, expires) | |
555 | * | |
556 | * would falsely evaluate to true. If that is the case, just | |
557 | * return jiffies so that we can immediately fire the local timer | |
558 | */ | |
559 | if (time_before(expires, jiffies)) | |
560 | return jiffies; | |
561 | ||
69239749 TL |
562 | if (time_before(hr_expires, expires)) |
563 | return hr_expires; | |
564 | ||
1da177e4 LT |
565 | return expires; |
566 | } | |
567 | #endif | |
568 | ||
569 | /******************************************************************/ | |
570 | ||
571 | /* | |
572 | * Timekeeping variables | |
573 | */ | |
574 | unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ | |
575 | unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */ | |
576 | ||
577 | /* | |
578 | * The current time | |
579 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
580 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
581 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
582 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
583 | * the usual normalization. | |
584 | */ | |
585 | struct timespec xtime __attribute__ ((aligned (16))); | |
586 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
587 | ||
588 | EXPORT_SYMBOL(xtime); | |
589 | ||
590 | /* Don't completely fail for HZ > 500. */ | |
591 | int tickadj = 500/HZ ? : 1; /* microsecs */ | |
592 | ||
593 | ||
594 | /* | |
595 | * phase-lock loop variables | |
596 | */ | |
597 | /* TIME_ERROR prevents overwriting the CMOS clock */ | |
598 | int time_state = TIME_OK; /* clock synchronization status */ | |
599 | int time_status = STA_UNSYNC; /* clock status bits */ | |
600 | long time_offset; /* time adjustment (us) */ | |
601 | long time_constant = 2; /* pll time constant */ | |
602 | long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ | |
603 | long time_precision = 1; /* clock precision (us) */ | |
604 | long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ | |
605 | long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ | |
1da177e4 LT |
606 | long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; |
607 | /* frequency offset (scaled ppm)*/ | |
608 | static long time_adj; /* tick adjust (scaled 1 / HZ) */ | |
609 | long time_reftime; /* time at last adjustment (s) */ | |
610 | long time_adjust; | |
611 | long time_next_adjust; | |
612 | ||
613 | /* | |
614 | * this routine handles the overflow of the microsecond field | |
615 | * | |
616 | * The tricky bits of code to handle the accurate clock support | |
617 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | |
618 | * They were originally developed for SUN and DEC kernels. | |
619 | * All the kudos should go to Dave for this stuff. | |
620 | * | |
621 | */ | |
622 | static void second_overflow(void) | |
623 | { | |
a5a0d52c AM |
624 | long ltemp; |
625 | ||
626 | /* Bump the maxerror field */ | |
627 | time_maxerror += time_tolerance >> SHIFT_USEC; | |
628 | if (time_maxerror > NTP_PHASE_LIMIT) { | |
629 | time_maxerror = NTP_PHASE_LIMIT; | |
630 | time_status |= STA_UNSYNC; | |
1da177e4 | 631 | } |
a5a0d52c AM |
632 | |
633 | /* | |
634 | * Leap second processing. If in leap-insert state at the end of the | |
635 | * day, the system clock is set back one second; if in leap-delete | |
636 | * state, the system clock is set ahead one second. The microtime() | |
637 | * routine or external clock driver will insure that reported time is | |
638 | * always monotonic. The ugly divides should be replaced. | |
639 | */ | |
640 | switch (time_state) { | |
641 | case TIME_OK: | |
642 | if (time_status & STA_INS) | |
643 | time_state = TIME_INS; | |
644 | else if (time_status & STA_DEL) | |
645 | time_state = TIME_DEL; | |
646 | break; | |
647 | case TIME_INS: | |
648 | if (xtime.tv_sec % 86400 == 0) { | |
649 | xtime.tv_sec--; | |
650 | wall_to_monotonic.tv_sec++; | |
651 | /* | |
652 | * The timer interpolator will make time change | |
653 | * gradually instead of an immediate jump by one second | |
654 | */ | |
655 | time_interpolator_update(-NSEC_PER_SEC); | |
656 | time_state = TIME_OOP; | |
657 | clock_was_set(); | |
658 | printk(KERN_NOTICE "Clock: inserting leap second " | |
659 | "23:59:60 UTC\n"); | |
660 | } | |
661 | break; | |
662 | case TIME_DEL: | |
663 | if ((xtime.tv_sec + 1) % 86400 == 0) { | |
664 | xtime.tv_sec++; | |
665 | wall_to_monotonic.tv_sec--; | |
666 | /* | |
667 | * Use of time interpolator for a gradual change of | |
668 | * time | |
669 | */ | |
670 | time_interpolator_update(NSEC_PER_SEC); | |
671 | time_state = TIME_WAIT; | |
672 | clock_was_set(); | |
673 | printk(KERN_NOTICE "Clock: deleting leap second " | |
674 | "23:59:59 UTC\n"); | |
675 | } | |
676 | break; | |
677 | case TIME_OOP: | |
678 | time_state = TIME_WAIT; | |
679 | break; | |
680 | case TIME_WAIT: | |
681 | if (!(time_status & (STA_INS | STA_DEL))) | |
682 | time_state = TIME_OK; | |
1da177e4 | 683 | } |
a5a0d52c AM |
684 | |
685 | /* | |
686 | * Compute the phase adjustment for the next second. In PLL mode, the | |
687 | * offset is reduced by a fixed factor times the time constant. In FLL | |
688 | * mode the offset is used directly. In either mode, the maximum phase | |
689 | * adjustment for each second is clamped so as to spread the adjustment | |
690 | * over not more than the number of seconds between updates. | |
691 | */ | |
1da177e4 LT |
692 | ltemp = time_offset; |
693 | if (!(time_status & STA_FLL)) | |
1bb34a41 JS |
694 | ltemp = shift_right(ltemp, SHIFT_KG + time_constant); |
695 | ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE); | |
696 | ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE); | |
1da177e4 LT |
697 | time_offset -= ltemp; |
698 | time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); | |
1da177e4 | 699 | |
a5a0d52c AM |
700 | /* |
701 | * Compute the frequency estimate and additional phase adjustment due | |
5ddcfa87 | 702 | * to frequency error for the next second. |
a5a0d52c | 703 | */ |
5ddcfa87 | 704 | ltemp = time_freq; |
a5a0d52c | 705 | time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); |
1da177e4 LT |
706 | |
707 | #if HZ == 100 | |
a5a0d52c AM |
708 | /* |
709 | * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to | |
710 | * get 128.125; => only 0.125% error (p. 14) | |
711 | */ | |
712 | time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); | |
1da177e4 | 713 | #endif |
4b8f573b | 714 | #if HZ == 250 |
a5a0d52c AM |
715 | /* |
716 | * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and | |
717 | * 0.78125% to get 255.85938; => only 0.05% error (p. 14) | |
718 | */ | |
719 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | |
4b8f573b | 720 | #endif |
1da177e4 | 721 | #if HZ == 1000 |
a5a0d52c AM |
722 | /* |
723 | * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and | |
724 | * 0.78125% to get 1023.4375; => only 0.05% error (p. 14) | |
725 | */ | |
726 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | |
1da177e4 LT |
727 | #endif |
728 | } | |
729 | ||
726c14bf PM |
730 | /* |
731 | * Returns how many microseconds we need to add to xtime this tick | |
732 | * in doing an adjustment requested with adjtime. | |
733 | */ | |
734 | static long adjtime_adjustment(void) | |
1da177e4 | 735 | { |
726c14bf | 736 | long time_adjust_step; |
1da177e4 | 737 | |
726c14bf PM |
738 | time_adjust_step = time_adjust; |
739 | if (time_adjust_step) { | |
a5a0d52c AM |
740 | /* |
741 | * We are doing an adjtime thing. Prepare time_adjust_step to | |
742 | * be within bounds. Note that a positive time_adjust means we | |
743 | * want the clock to run faster. | |
744 | * | |
745 | * Limit the amount of the step to be in the range | |
746 | * -tickadj .. +tickadj | |
747 | */ | |
748 | time_adjust_step = min(time_adjust_step, (long)tickadj); | |
749 | time_adjust_step = max(time_adjust_step, (long)-tickadj); | |
726c14bf PM |
750 | } |
751 | return time_adjust_step; | |
752 | } | |
a5a0d52c | 753 | |
726c14bf | 754 | /* in the NTP reference this is called "hardclock()" */ |
5eb6d205 | 755 | static void update_ntp_one_tick(void) |
726c14bf | 756 | { |
5eb6d205 | 757 | long time_adjust_step; |
726c14bf PM |
758 | |
759 | time_adjust_step = adjtime_adjustment(); | |
760 | if (time_adjust_step) | |
a5a0d52c AM |
761 | /* Reduce by this step the amount of time left */ |
762 | time_adjust -= time_adjust_step; | |
1da177e4 LT |
763 | |
764 | /* Changes by adjtime() do not take effect till next tick. */ | |
765 | if (time_next_adjust != 0) { | |
766 | time_adjust = time_next_adjust; | |
767 | time_next_adjust = 0; | |
768 | } | |
769 | } | |
770 | ||
726c14bf PM |
771 | /* |
772 | * Return how long ticks are at the moment, that is, how much time | |
773 | * update_wall_time_one_tick will add to xtime next time we call it | |
774 | * (assuming no calls to do_adjtimex in the meantime). | |
260a4230 JS |
775 | * The return value is in fixed-point nanoseconds shifted by the |
776 | * specified number of bits to the right of the binary point. | |
726c14bf PM |
777 | * This function has no side-effects. |
778 | */ | |
19923c19 | 779 | u64 current_tick_length(void) |
726c14bf PM |
780 | { |
781 | long delta_nsec; | |
260a4230 | 782 | u64 ret; |
726c14bf | 783 | |
260a4230 JS |
784 | /* calculate the finest interval NTP will allow. |
785 | * ie: nanosecond value shifted by (SHIFT_SCALE - 10) | |
786 | */ | |
726c14bf | 787 | delta_nsec = tick_nsec + adjtime_adjustment() * 1000; |
19923c19 RZ |
788 | ret = (u64)delta_nsec << TICK_LENGTH_SHIFT; |
789 | ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); | |
260a4230 JS |
790 | |
791 | return ret; | |
726c14bf PM |
792 | } |
793 | ||
ad596171 JS |
794 | /* XXX - all of this timekeeping code should be later moved to time.c */ |
795 | #include <linux/clocksource.h> | |
796 | static struct clocksource *clock; /* pointer to current clocksource */ | |
cf3c769b JS |
797 | |
798 | #ifdef CONFIG_GENERIC_TIME | |
799 | /** | |
800 | * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook | |
801 | * | |
802 | * private function, must hold xtime_lock lock when being | |
803 | * called. Returns the number of nanoseconds since the | |
804 | * last call to update_wall_time() (adjusted by NTP scaling) | |
805 | */ | |
806 | static inline s64 __get_nsec_offset(void) | |
807 | { | |
808 | cycle_t cycle_now, cycle_delta; | |
809 | s64 ns_offset; | |
810 | ||
811 | /* read clocksource: */ | |
a2752549 | 812 | cycle_now = clocksource_read(clock); |
cf3c769b JS |
813 | |
814 | /* calculate the delta since the last update_wall_time: */ | |
19923c19 | 815 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
cf3c769b JS |
816 | |
817 | /* convert to nanoseconds: */ | |
818 | ns_offset = cyc2ns(clock, cycle_delta); | |
819 | ||
820 | return ns_offset; | |
821 | } | |
822 | ||
823 | /** | |
824 | * __get_realtime_clock_ts - Returns the time of day in a timespec | |
825 | * @ts: pointer to the timespec to be set | |
826 | * | |
827 | * Returns the time of day in a timespec. Used by | |
828 | * do_gettimeofday() and get_realtime_clock_ts(). | |
829 | */ | |
830 | static inline void __get_realtime_clock_ts(struct timespec *ts) | |
831 | { | |
832 | unsigned long seq; | |
833 | s64 nsecs; | |
834 | ||
835 | do { | |
836 | seq = read_seqbegin(&xtime_lock); | |
837 | ||
838 | *ts = xtime; | |
839 | nsecs = __get_nsec_offset(); | |
840 | ||
841 | } while (read_seqretry(&xtime_lock, seq)); | |
842 | ||
843 | timespec_add_ns(ts, nsecs); | |
844 | } | |
845 | ||
846 | /** | |
a2752549 | 847 | * getnstimeofday - Returns the time of day in a timespec |
cf3c769b JS |
848 | * @ts: pointer to the timespec to be set |
849 | * | |
850 | * Returns the time of day in a timespec. | |
851 | */ | |
852 | void getnstimeofday(struct timespec *ts) | |
853 | { | |
854 | __get_realtime_clock_ts(ts); | |
855 | } | |
856 | ||
857 | EXPORT_SYMBOL(getnstimeofday); | |
858 | ||
859 | /** | |
860 | * do_gettimeofday - Returns the time of day in a timeval | |
861 | * @tv: pointer to the timeval to be set | |
862 | * | |
863 | * NOTE: Users should be converted to using get_realtime_clock_ts() | |
864 | */ | |
865 | void do_gettimeofday(struct timeval *tv) | |
866 | { | |
867 | struct timespec now; | |
868 | ||
869 | __get_realtime_clock_ts(&now); | |
870 | tv->tv_sec = now.tv_sec; | |
871 | tv->tv_usec = now.tv_nsec/1000; | |
872 | } | |
873 | ||
874 | EXPORT_SYMBOL(do_gettimeofday); | |
875 | /** | |
876 | * do_settimeofday - Sets the time of day | |
877 | * @tv: pointer to the timespec variable containing the new time | |
878 | * | |
879 | * Sets the time of day to the new time and update NTP and notify hrtimers | |
880 | */ | |
881 | int do_settimeofday(struct timespec *tv) | |
882 | { | |
883 | unsigned long flags; | |
884 | time_t wtm_sec, sec = tv->tv_sec; | |
885 | long wtm_nsec, nsec = tv->tv_nsec; | |
886 | ||
887 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
888 | return -EINVAL; | |
889 | ||
890 | write_seqlock_irqsave(&xtime_lock, flags); | |
891 | ||
892 | nsec -= __get_nsec_offset(); | |
893 | ||
894 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
895 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
896 | ||
897 | set_normalized_timespec(&xtime, sec, nsec); | |
898 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
899 | ||
e154ff3d | 900 | clock->error = 0; |
cf3c769b JS |
901 | ntp_clear(); |
902 | ||
903 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
904 | ||
905 | /* signal hrtimers about time change */ | |
906 | clock_was_set(); | |
907 | ||
908 | return 0; | |
909 | } | |
910 | ||
911 | EXPORT_SYMBOL(do_settimeofday); | |
912 | ||
913 | /** | |
914 | * change_clocksource - Swaps clocksources if a new one is available | |
915 | * | |
916 | * Accumulates current time interval and initializes new clocksource | |
917 | */ | |
918 | static int change_clocksource(void) | |
919 | { | |
920 | struct clocksource *new; | |
921 | cycle_t now; | |
922 | u64 nsec; | |
a2752549 | 923 | new = clocksource_get_next(); |
cf3c769b | 924 | if (clock != new) { |
a2752549 | 925 | now = clocksource_read(new); |
cf3c769b JS |
926 | nsec = __get_nsec_offset(); |
927 | timespec_add_ns(&xtime, nsec); | |
928 | ||
929 | clock = new; | |
19923c19 | 930 | clock->cycle_last = now; |
cf3c769b JS |
931 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", |
932 | clock->name); | |
933 | return 1; | |
934 | } else if (clock->update_callback) { | |
935 | return clock->update_callback(); | |
936 | } | |
937 | return 0; | |
938 | } | |
939 | #else | |
940 | #define change_clocksource() (0) | |
941 | #endif | |
942 | ||
943 | /** | |
944 | * timeofday_is_continuous - check to see if timekeeping is free running | |
945 | */ | |
946 | int timekeeping_is_continuous(void) | |
947 | { | |
948 | unsigned long seq; | |
949 | int ret; | |
950 | ||
951 | do { | |
952 | seq = read_seqbegin(&xtime_lock); | |
953 | ||
954 | ret = clock->is_continuous; | |
955 | ||
956 | } while (read_seqretry(&xtime_lock, seq)); | |
957 | ||
958 | return ret; | |
959 | } | |
960 | ||
1da177e4 | 961 | /* |
ad596171 | 962 | * timekeeping_init - Initializes the clocksource and common timekeeping values |
1da177e4 | 963 | */ |
ad596171 | 964 | void __init timekeeping_init(void) |
1da177e4 | 965 | { |
ad596171 JS |
966 | unsigned long flags; |
967 | ||
968 | write_seqlock_irqsave(&xtime_lock, flags); | |
a2752549 JS |
969 | clock = clocksource_get_next(); |
970 | clocksource_calculate_interval(clock, tick_nsec); | |
19923c19 | 971 | clock->cycle_last = clocksource_read(clock); |
ad596171 JS |
972 | ntp_clear(); |
973 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
974 | } | |
975 | ||
976 | ||
3e143475 | 977 | static int timekeeping_suspended; |
2aae4a10 | 978 | /** |
ad596171 JS |
979 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
980 | * @dev: unused | |
981 | * | |
982 | * This is for the generic clocksource timekeeping. | |
983 | * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are | |
984 | * still managed by arch specific suspend/resume code. | |
985 | */ | |
986 | static int timekeeping_resume(struct sys_device *dev) | |
987 | { | |
988 | unsigned long flags; | |
989 | ||
990 | write_seqlock_irqsave(&xtime_lock, flags); | |
991 | /* restart the last cycle value */ | |
19923c19 | 992 | clock->cycle_last = clocksource_read(clock); |
3e143475 JS |
993 | clock->error = 0; |
994 | timekeeping_suspended = 0; | |
995 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
996 | return 0; | |
997 | } | |
998 | ||
999 | static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |
1000 | { | |
1001 | unsigned long flags; | |
1002 | ||
1003 | write_seqlock_irqsave(&xtime_lock, flags); | |
1004 | timekeeping_suspended = 1; | |
ad596171 JS |
1005 | write_sequnlock_irqrestore(&xtime_lock, flags); |
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | /* sysfs resume/suspend bits for timekeeping */ | |
1010 | static struct sysdev_class timekeeping_sysclass = { | |
1011 | .resume = timekeeping_resume, | |
3e143475 | 1012 | .suspend = timekeeping_suspend, |
ad596171 JS |
1013 | set_kset_name("timekeeping"), |
1014 | }; | |
1015 | ||
1016 | static struct sys_device device_timer = { | |
1017 | .id = 0, | |
1018 | .cls = &timekeeping_sysclass, | |
1019 | }; | |
1020 | ||
1021 | static int __init timekeeping_init_device(void) | |
1022 | { | |
1023 | int error = sysdev_class_register(&timekeeping_sysclass); | |
1024 | if (!error) | |
1025 | error = sysdev_register(&device_timer); | |
1026 | return error; | |
1027 | } | |
1028 | ||
1029 | device_initcall(timekeeping_init_device); | |
1030 | ||
19923c19 | 1031 | /* |
e154ff3d | 1032 | * If the error is already larger, we look ahead even further |
19923c19 RZ |
1033 | * to compensate for late or lost adjustments. |
1034 | */ | |
e154ff3d | 1035 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset) |
19923c19 | 1036 | { |
e154ff3d RZ |
1037 | s64 tick_error, i; |
1038 | u32 look_ahead, adj; | |
1039 | s32 error2, mult; | |
19923c19 RZ |
1040 | |
1041 | /* | |
e154ff3d RZ |
1042 | * Use the current error value to determine how much to look ahead. |
1043 | * The larger the error the slower we adjust for it to avoid problems | |
1044 | * with losing too many ticks, otherwise we would overadjust and | |
1045 | * produce an even larger error. The smaller the adjustment the | |
1046 | * faster we try to adjust for it, as lost ticks can do less harm | |
1047 | * here. This is tuned so that an error of about 1 msec is adusted | |
1048 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | |
19923c19 | 1049 | */ |
e154ff3d RZ |
1050 | error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); |
1051 | error2 = abs(error2); | |
1052 | for (look_ahead = 0; error2 > 0; look_ahead++) | |
1053 | error2 >>= 2; | |
19923c19 RZ |
1054 | |
1055 | /* | |
e154ff3d RZ |
1056 | * Now calculate the error in (1 << look_ahead) ticks, but first |
1057 | * remove the single look ahead already included in the error. | |
19923c19 | 1058 | */ |
e154ff3d RZ |
1059 | tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); |
1060 | tick_error -= clock->xtime_interval >> 1; | |
1061 | error = ((error - tick_error) >> look_ahead) + tick_error; | |
1062 | ||
1063 | /* Finally calculate the adjustment shift value. */ | |
1064 | i = *interval; | |
1065 | mult = 1; | |
1066 | if (error < 0) { | |
1067 | error = -error; | |
1068 | *interval = -*interval; | |
1069 | *offset = -*offset; | |
1070 | mult = -1; | |
19923c19 | 1071 | } |
e154ff3d RZ |
1072 | for (adj = 0; error > i; adj++) |
1073 | error >>= 1; | |
19923c19 RZ |
1074 | |
1075 | *interval <<= adj; | |
1076 | *offset <<= adj; | |
e154ff3d | 1077 | return mult << adj; |
19923c19 RZ |
1078 | } |
1079 | ||
1080 | /* | |
1081 | * Adjust the multiplier to reduce the error value, | |
1082 | * this is optimized for the most common adjustments of -1,0,1, | |
1083 | * for other values we can do a bit more work. | |
1084 | */ | |
1085 | static void clocksource_adjust(struct clocksource *clock, s64 offset) | |
1086 | { | |
1087 | s64 error, interval = clock->cycle_interval; | |
1088 | int adj; | |
1089 | ||
1090 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | |
1091 | if (error > interval) { | |
e154ff3d RZ |
1092 | error >>= 2; |
1093 | if (likely(error <= interval)) | |
1094 | adj = 1; | |
1095 | else | |
1096 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 | 1097 | } else if (error < -interval) { |
e154ff3d RZ |
1098 | error >>= 2; |
1099 | if (likely(error >= -interval)) { | |
1100 | adj = -1; | |
1101 | interval = -interval; | |
1102 | offset = -offset; | |
1103 | } else | |
1104 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 RZ |
1105 | } else |
1106 | return; | |
1107 | ||
1108 | clock->mult += adj; | |
1109 | clock->xtime_interval += interval; | |
1110 | clock->xtime_nsec -= offset; | |
1111 | clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift); | |
1112 | } | |
1113 | ||
2aae4a10 | 1114 | /** |
ad596171 JS |
1115 | * update_wall_time - Uses the current clocksource to increment the wall time |
1116 | * | |
1117 | * Called from the timer interrupt, must hold a write on xtime_lock. | |
1118 | */ | |
1119 | static void update_wall_time(void) | |
1120 | { | |
19923c19 | 1121 | cycle_t offset; |
ad596171 | 1122 | |
3e143475 JS |
1123 | /* Make sure we're fully resumed: */ |
1124 | if (unlikely(timekeeping_suspended)) | |
1125 | return; | |
5eb6d205 | 1126 | |
19923c19 RZ |
1127 | #ifdef CONFIG_GENERIC_TIME |
1128 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | |
1129 | #else | |
1130 | offset = clock->cycle_interval; | |
1131 | #endif | |
3e143475 | 1132 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; |
ad596171 JS |
1133 | |
1134 | /* normally this loop will run just once, however in the | |
1135 | * case of lost or late ticks, it will accumulate correctly. | |
1136 | */ | |
19923c19 | 1137 | while (offset >= clock->cycle_interval) { |
ad596171 | 1138 | /* accumulate one interval */ |
19923c19 RZ |
1139 | clock->xtime_nsec += clock->xtime_interval; |
1140 | clock->cycle_last += clock->cycle_interval; | |
1141 | offset -= clock->cycle_interval; | |
1142 | ||
1143 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | |
1144 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | |
1145 | xtime.tv_sec++; | |
1146 | second_overflow(); | |
1147 | } | |
ad596171 | 1148 | |
5eb6d205 | 1149 | /* interpolator bits */ |
19923c19 | 1150 | time_interpolator_update(clock->xtime_interval |
5eb6d205 JS |
1151 | >> clock->shift); |
1152 | /* increment the NTP state machine */ | |
1153 | update_ntp_one_tick(); | |
1154 | ||
1155 | /* accumulate error between NTP and clock interval */ | |
19923c19 RZ |
1156 | clock->error += current_tick_length(); |
1157 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | |
1158 | } | |
5eb6d205 | 1159 | |
19923c19 RZ |
1160 | /* correct the clock when NTP error is too big */ |
1161 | clocksource_adjust(clock, offset); | |
5eb6d205 | 1162 | |
5eb6d205 | 1163 | /* store full nanoseconds into xtime */ |
e154ff3d | 1164 | xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; |
19923c19 | 1165 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
cf3c769b JS |
1166 | |
1167 | /* check to see if there is a new clocksource to use */ | |
1168 | if (change_clocksource()) { | |
19923c19 RZ |
1169 | clock->error = 0; |
1170 | clock->xtime_nsec = 0; | |
a2752549 | 1171 | clocksource_calculate_interval(clock, tick_nsec); |
cf3c769b | 1172 | } |
1da177e4 LT |
1173 | } |
1174 | ||
1175 | /* | |
1176 | * Called from the timer interrupt handler to charge one tick to the current | |
1177 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
1178 | */ | |
1179 | void update_process_times(int user_tick) | |
1180 | { | |
1181 | struct task_struct *p = current; | |
1182 | int cpu = smp_processor_id(); | |
1183 | ||
1184 | /* Note: this timer irq context must be accounted for as well. */ | |
1185 | if (user_tick) | |
1186 | account_user_time(p, jiffies_to_cputime(1)); | |
1187 | else | |
1188 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
1189 | run_local_timers(); | |
1190 | if (rcu_pending(cpu)) | |
1191 | rcu_check_callbacks(cpu, user_tick); | |
1192 | scheduler_tick(); | |
1193 | run_posix_cpu_timers(p); | |
1194 | } | |
1195 | ||
1196 | /* | |
1197 | * Nr of active tasks - counted in fixed-point numbers | |
1198 | */ | |
1199 | static unsigned long count_active_tasks(void) | |
1200 | { | |
db1b1fef | 1201 | return nr_active() * FIXED_1; |
1da177e4 LT |
1202 | } |
1203 | ||
1204 | /* | |
1205 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
1206 | * imply that avenrun[] is the standard name for this kind of thing. | |
1207 | * Nothing else seems to be standardized: the fractional size etc | |
1208 | * all seem to differ on different machines. | |
1209 | * | |
1210 | * Requires xtime_lock to access. | |
1211 | */ | |
1212 | unsigned long avenrun[3]; | |
1213 | ||
1214 | EXPORT_SYMBOL(avenrun); | |
1215 | ||
1216 | /* | |
1217 | * calc_load - given tick count, update the avenrun load estimates. | |
1218 | * This is called while holding a write_lock on xtime_lock. | |
1219 | */ | |
1220 | static inline void calc_load(unsigned long ticks) | |
1221 | { | |
1222 | unsigned long active_tasks; /* fixed-point */ | |
1223 | static int count = LOAD_FREQ; | |
1224 | ||
1225 | count -= ticks; | |
1226 | if (count < 0) { | |
1227 | count += LOAD_FREQ; | |
1228 | active_tasks = count_active_tasks(); | |
1229 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
1230 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
1231 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
1232 | } | |
1233 | } | |
1234 | ||
1235 | /* jiffies at the most recent update of wall time */ | |
1236 | unsigned long wall_jiffies = INITIAL_JIFFIES; | |
1237 | ||
1238 | /* | |
1239 | * This read-write spinlock protects us from races in SMP while | |
1240 | * playing with xtime and avenrun. | |
1241 | */ | |
1242 | #ifndef ARCH_HAVE_XTIME_LOCK | |
e4d91918 | 1243 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
1da177e4 LT |
1244 | |
1245 | EXPORT_SYMBOL(xtime_lock); | |
1246 | #endif | |
1247 | ||
1248 | /* | |
1249 | * This function runs timers and the timer-tq in bottom half context. | |
1250 | */ | |
1251 | static void run_timer_softirq(struct softirq_action *h) | |
1252 | { | |
a4a6198b | 1253 | tvec_base_t *base = __get_cpu_var(tvec_bases); |
1da177e4 | 1254 | |
c0a31329 | 1255 | hrtimer_run_queues(); |
1da177e4 LT |
1256 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1257 | __run_timers(base); | |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * Called by the local, per-CPU timer interrupt on SMP. | |
1262 | */ | |
1263 | void run_local_timers(void) | |
1264 | { | |
1265 | raise_softirq(TIMER_SOFTIRQ); | |
6687a97d | 1266 | softlockup_tick(); |
1da177e4 LT |
1267 | } |
1268 | ||
1269 | /* | |
1270 | * Called by the timer interrupt. xtime_lock must already be taken | |
1271 | * by the timer IRQ! | |
1272 | */ | |
1273 | static inline void update_times(void) | |
1274 | { | |
1275 | unsigned long ticks; | |
1276 | ||
1277 | ticks = jiffies - wall_jiffies; | |
ad596171 JS |
1278 | wall_jiffies += ticks; |
1279 | update_wall_time(); | |
1da177e4 LT |
1280 | calc_load(ticks); |
1281 | } | |
1282 | ||
1283 | /* | |
1284 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
1285 | * without sampling the sequence number in xtime_lock. | |
1286 | * jiffies is defined in the linker script... | |
1287 | */ | |
1288 | ||
1289 | void do_timer(struct pt_regs *regs) | |
1290 | { | |
1291 | jiffies_64++; | |
5aee405c AN |
1292 | /* prevent loading jiffies before storing new jiffies_64 value. */ |
1293 | barrier(); | |
1da177e4 LT |
1294 | update_times(); |
1295 | } | |
1296 | ||
1297 | #ifdef __ARCH_WANT_SYS_ALARM | |
1298 | ||
1299 | /* | |
1300 | * For backwards compatibility? This can be done in libc so Alpha | |
1301 | * and all newer ports shouldn't need it. | |
1302 | */ | |
1303 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
1304 | { | |
c08b8a49 | 1305 | return alarm_setitimer(seconds); |
1da177e4 LT |
1306 | } |
1307 | ||
1308 | #endif | |
1309 | ||
1310 | #ifndef __alpha__ | |
1311 | ||
1312 | /* | |
1313 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
1314 | * should be moved into arch/i386 instead? | |
1315 | */ | |
1316 | ||
1317 | /** | |
1318 | * sys_getpid - return the thread group id of the current process | |
1319 | * | |
1320 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
1321 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
1322 | * which case the tgid is the same in all threads of the same group. | |
1323 | * | |
1324 | * This is SMP safe as current->tgid does not change. | |
1325 | */ | |
1326 | asmlinkage long sys_getpid(void) | |
1327 | { | |
1328 | return current->tgid; | |
1329 | } | |
1330 | ||
1331 | /* | |
6997a6fa KK |
1332 | * Accessing ->real_parent is not SMP-safe, it could |
1333 | * change from under us. However, we can use a stale | |
1334 | * value of ->real_parent under rcu_read_lock(), see | |
1335 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 LT |
1336 | */ |
1337 | asmlinkage long sys_getppid(void) | |
1338 | { | |
1339 | int pid; | |
1da177e4 | 1340 | |
6997a6fa KK |
1341 | rcu_read_lock(); |
1342 | pid = rcu_dereference(current->real_parent)->tgid; | |
1343 | rcu_read_unlock(); | |
1da177e4 | 1344 | |
1da177e4 LT |
1345 | return pid; |
1346 | } | |
1347 | ||
1348 | asmlinkage long sys_getuid(void) | |
1349 | { | |
1350 | /* Only we change this so SMP safe */ | |
1351 | return current->uid; | |
1352 | } | |
1353 | ||
1354 | asmlinkage long sys_geteuid(void) | |
1355 | { | |
1356 | /* Only we change this so SMP safe */ | |
1357 | return current->euid; | |
1358 | } | |
1359 | ||
1360 | asmlinkage long sys_getgid(void) | |
1361 | { | |
1362 | /* Only we change this so SMP safe */ | |
1363 | return current->gid; | |
1364 | } | |
1365 | ||
1366 | asmlinkage long sys_getegid(void) | |
1367 | { | |
1368 | /* Only we change this so SMP safe */ | |
1369 | return current->egid; | |
1370 | } | |
1371 | ||
1372 | #endif | |
1373 | ||
1374 | static void process_timeout(unsigned long __data) | |
1375 | { | |
36c8b586 | 1376 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1377 | } |
1378 | ||
1379 | /** | |
1380 | * schedule_timeout - sleep until timeout | |
1381 | * @timeout: timeout value in jiffies | |
1382 | * | |
1383 | * Make the current task sleep until @timeout jiffies have | |
1384 | * elapsed. The routine will return immediately unless | |
1385 | * the current task state has been set (see set_current_state()). | |
1386 | * | |
1387 | * You can set the task state as follows - | |
1388 | * | |
1389 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1390 | * pass before the routine returns. The routine will return 0 | |
1391 | * | |
1392 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1393 | * delivered to the current task. In this case the remaining time | |
1394 | * in jiffies will be returned, or 0 if the timer expired in time | |
1395 | * | |
1396 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1397 | * routine returns. | |
1398 | * | |
1399 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1400 | * the CPU away without a bound on the timeout. In this case the return | |
1401 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1402 | * | |
1403 | * In all cases the return value is guaranteed to be non-negative. | |
1404 | */ | |
1405 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1406 | { | |
1407 | struct timer_list timer; | |
1408 | unsigned long expire; | |
1409 | ||
1410 | switch (timeout) | |
1411 | { | |
1412 | case MAX_SCHEDULE_TIMEOUT: | |
1413 | /* | |
1414 | * These two special cases are useful to be comfortable | |
1415 | * in the caller. Nothing more. We could take | |
1416 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1417 | * but I' d like to return a valid offset (>=0) to allow | |
1418 | * the caller to do everything it want with the retval. | |
1419 | */ | |
1420 | schedule(); | |
1421 | goto out; | |
1422 | default: | |
1423 | /* | |
1424 | * Another bit of PARANOID. Note that the retval will be | |
1425 | * 0 since no piece of kernel is supposed to do a check | |
1426 | * for a negative retval of schedule_timeout() (since it | |
1427 | * should never happens anyway). You just have the printk() | |
1428 | * that will tell you if something is gone wrong and where. | |
1429 | */ | |
1430 | if (timeout < 0) | |
1431 | { | |
1432 | printk(KERN_ERR "schedule_timeout: wrong timeout " | |
a5a0d52c AM |
1433 | "value %lx from %p\n", timeout, |
1434 | __builtin_return_address(0)); | |
1da177e4 LT |
1435 | current->state = TASK_RUNNING; |
1436 | goto out; | |
1437 | } | |
1438 | } | |
1439 | ||
1440 | expire = timeout + jiffies; | |
1441 | ||
a8db2db1 ON |
1442 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1443 | __mod_timer(&timer, expire); | |
1da177e4 LT |
1444 | schedule(); |
1445 | del_singleshot_timer_sync(&timer); | |
1446 | ||
1447 | timeout = expire - jiffies; | |
1448 | ||
1449 | out: | |
1450 | return timeout < 0 ? 0 : timeout; | |
1451 | } | |
1da177e4 LT |
1452 | EXPORT_SYMBOL(schedule_timeout); |
1453 | ||
8a1c1757 AM |
1454 | /* |
1455 | * We can use __set_current_state() here because schedule_timeout() calls | |
1456 | * schedule() unconditionally. | |
1457 | */ | |
64ed93a2 NA |
1458 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1459 | { | |
a5a0d52c AM |
1460 | __set_current_state(TASK_INTERRUPTIBLE); |
1461 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1462 | } |
1463 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1464 | ||
1465 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |
1466 | { | |
a5a0d52c AM |
1467 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1468 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1469 | } |
1470 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1471 | ||
1da177e4 LT |
1472 | /* Thread ID - the internal kernel "pid" */ |
1473 | asmlinkage long sys_gettid(void) | |
1474 | { | |
1475 | return current->pid; | |
1476 | } | |
1477 | ||
2aae4a10 | 1478 | /** |
1da177e4 | 1479 | * sys_sysinfo - fill in sysinfo struct |
2aae4a10 | 1480 | * @info: pointer to buffer to fill |
1da177e4 LT |
1481 | */ |
1482 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1483 | { | |
1484 | struct sysinfo val; | |
1485 | unsigned long mem_total, sav_total; | |
1486 | unsigned int mem_unit, bitcount; | |
1487 | unsigned long seq; | |
1488 | ||
1489 | memset((char *)&val, 0, sizeof(struct sysinfo)); | |
1490 | ||
1491 | do { | |
1492 | struct timespec tp; | |
1493 | seq = read_seqbegin(&xtime_lock); | |
1494 | ||
1495 | /* | |
1496 | * This is annoying. The below is the same thing | |
1497 | * posix_get_clock_monotonic() does, but it wants to | |
1498 | * take the lock which we want to cover the loads stuff | |
1499 | * too. | |
1500 | */ | |
1501 | ||
1502 | getnstimeofday(&tp); | |
1503 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1504 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
1505 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | |
1506 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1507 | tp.tv_sec++; | |
1508 | } | |
1509 | val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | |
1510 | ||
1511 | val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); | |
1512 | val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1513 | val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1514 | ||
1515 | val.procs = nr_threads; | |
1516 | } while (read_seqretry(&xtime_lock, seq)); | |
1517 | ||
1518 | si_meminfo(&val); | |
1519 | si_swapinfo(&val); | |
1520 | ||
1521 | /* | |
1522 | * If the sum of all the available memory (i.e. ram + swap) | |
1523 | * is less than can be stored in a 32 bit unsigned long then | |
1524 | * we can be binary compatible with 2.2.x kernels. If not, | |
1525 | * well, in that case 2.2.x was broken anyways... | |
1526 | * | |
1527 | * -Erik Andersen <andersee@debian.org> | |
1528 | */ | |
1529 | ||
1530 | mem_total = val.totalram + val.totalswap; | |
1531 | if (mem_total < val.totalram || mem_total < val.totalswap) | |
1532 | goto out; | |
1533 | bitcount = 0; | |
1534 | mem_unit = val.mem_unit; | |
1535 | while (mem_unit > 1) { | |
1536 | bitcount++; | |
1537 | mem_unit >>= 1; | |
1538 | sav_total = mem_total; | |
1539 | mem_total <<= 1; | |
1540 | if (mem_total < sav_total) | |
1541 | goto out; | |
1542 | } | |
1543 | ||
1544 | /* | |
1545 | * If mem_total did not overflow, multiply all memory values by | |
1546 | * val.mem_unit and set it to 1. This leaves things compatible | |
1547 | * with 2.2.x, and also retains compatibility with earlier 2.4.x | |
1548 | * kernels... | |
1549 | */ | |
1550 | ||
1551 | val.mem_unit = 1; | |
1552 | val.totalram <<= bitcount; | |
1553 | val.freeram <<= bitcount; | |
1554 | val.sharedram <<= bitcount; | |
1555 | val.bufferram <<= bitcount; | |
1556 | val.totalswap <<= bitcount; | |
1557 | val.freeswap <<= bitcount; | |
1558 | val.totalhigh <<= bitcount; | |
1559 | val.freehigh <<= bitcount; | |
1560 | ||
1561 | out: | |
1562 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) | |
1563 | return -EFAULT; | |
1564 | ||
1565 | return 0; | |
1566 | } | |
1567 | ||
d730e882 IM |
1568 | /* |
1569 | * lockdep: we want to track each per-CPU base as a separate lock-class, | |
1570 | * but timer-bases are kmalloc()-ed, so we need to attach separate | |
1571 | * keys to them: | |
1572 | */ | |
1573 | static struct lock_class_key base_lock_keys[NR_CPUS]; | |
1574 | ||
a4a6198b | 1575 | static int __devinit init_timers_cpu(int cpu) |
1da177e4 LT |
1576 | { |
1577 | int j; | |
1578 | tvec_base_t *base; | |
ba6edfcd | 1579 | static char __devinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1580 | |
ba6edfcd | 1581 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1582 | static char boot_done; |
1583 | ||
a4a6198b | 1584 | if (boot_done) { |
ba6edfcd AM |
1585 | /* |
1586 | * The APs use this path later in boot | |
1587 | */ | |
a4a6198b JB |
1588 | base = kmalloc_node(sizeof(*base), GFP_KERNEL, |
1589 | cpu_to_node(cpu)); | |
1590 | if (!base) | |
1591 | return -ENOMEM; | |
1592 | memset(base, 0, sizeof(*base)); | |
ba6edfcd | 1593 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1594 | } else { |
ba6edfcd AM |
1595 | /* |
1596 | * This is for the boot CPU - we use compile-time | |
1597 | * static initialisation because per-cpu memory isn't | |
1598 | * ready yet and because the memory allocators are not | |
1599 | * initialised either. | |
1600 | */ | |
a4a6198b | 1601 | boot_done = 1; |
ba6edfcd | 1602 | base = &boot_tvec_bases; |
a4a6198b | 1603 | } |
ba6edfcd AM |
1604 | tvec_base_done[cpu] = 1; |
1605 | } else { | |
1606 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1607 | } |
ba6edfcd | 1608 | |
3691c519 | 1609 | spin_lock_init(&base->lock); |
d730e882 IM |
1610 | lockdep_set_class(&base->lock, base_lock_keys + cpu); |
1611 | ||
1da177e4 LT |
1612 | for (j = 0; j < TVN_SIZE; j++) { |
1613 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1614 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1615 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1616 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1617 | } | |
1618 | for (j = 0; j < TVR_SIZE; j++) | |
1619 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1620 | ||
1621 | base->timer_jiffies = jiffies; | |
a4a6198b | 1622 | return 0; |
1da177e4 LT |
1623 | } |
1624 | ||
1625 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1626 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1627 | { |
1628 | struct timer_list *timer; | |
1629 | ||
1630 | while (!list_empty(head)) { | |
1631 | timer = list_entry(head->next, struct timer_list, entry); | |
55c888d6 | 1632 | detach_timer(timer, 0); |
3691c519 | 1633 | timer->base = new_base; |
1da177e4 | 1634 | internal_add_timer(new_base, timer); |
1da177e4 | 1635 | } |
1da177e4 LT |
1636 | } |
1637 | ||
1638 | static void __devinit migrate_timers(int cpu) | |
1639 | { | |
1640 | tvec_base_t *old_base; | |
1641 | tvec_base_t *new_base; | |
1642 | int i; | |
1643 | ||
1644 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1645 | old_base = per_cpu(tvec_bases, cpu); |
1646 | new_base = get_cpu_var(tvec_bases); | |
1da177e4 LT |
1647 | |
1648 | local_irq_disable(); | |
3691c519 ON |
1649 | spin_lock(&new_base->lock); |
1650 | spin_lock(&old_base->lock); | |
1651 | ||
1652 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1653 | |
1da177e4 | 1654 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1655 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1656 | for (i = 0; i < TVN_SIZE; i++) { | |
1657 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1658 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1659 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1660 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1661 | } | |
1662 | ||
3691c519 ON |
1663 | spin_unlock(&old_base->lock); |
1664 | spin_unlock(&new_base->lock); | |
1da177e4 LT |
1665 | local_irq_enable(); |
1666 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1667 | } |
1668 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1669 | ||
8c78f307 | 1670 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1671 | unsigned long action, void *hcpu) |
1672 | { | |
1673 | long cpu = (long)hcpu; | |
1674 | switch(action) { | |
1675 | case CPU_UP_PREPARE: | |
a4a6198b JB |
1676 | if (init_timers_cpu(cpu) < 0) |
1677 | return NOTIFY_BAD; | |
1da177e4 LT |
1678 | break; |
1679 | #ifdef CONFIG_HOTPLUG_CPU | |
1680 | case CPU_DEAD: | |
1681 | migrate_timers(cpu); | |
1682 | break; | |
1683 | #endif | |
1684 | default: | |
1685 | break; | |
1686 | } | |
1687 | return NOTIFY_OK; | |
1688 | } | |
1689 | ||
8c78f307 | 1690 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1691 | .notifier_call = timer_cpu_notify, |
1692 | }; | |
1693 | ||
1694 | ||
1695 | void __init init_timers(void) | |
1696 | { | |
07dccf33 | 1697 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1da177e4 | 1698 | (void *)(long)smp_processor_id()); |
07dccf33 AM |
1699 | |
1700 | BUG_ON(err == NOTIFY_BAD); | |
1da177e4 LT |
1701 | register_cpu_notifier(&timers_nb); |
1702 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1703 | } | |
1704 | ||
1705 | #ifdef CONFIG_TIME_INTERPOLATION | |
1706 | ||
67890d70 CL |
1707 | struct time_interpolator *time_interpolator __read_mostly; |
1708 | static struct time_interpolator *time_interpolator_list __read_mostly; | |
1da177e4 LT |
1709 | static DEFINE_SPINLOCK(time_interpolator_lock); |
1710 | ||
1711 | static inline u64 time_interpolator_get_cycles(unsigned int src) | |
1712 | { | |
1713 | unsigned long (*x)(void); | |
1714 | ||
1715 | switch (src) | |
1716 | { | |
1717 | case TIME_SOURCE_FUNCTION: | |
1718 | x = time_interpolator->addr; | |
1719 | return x(); | |
1720 | ||
1721 | case TIME_SOURCE_MMIO64 : | |
685db65e | 1722 | return readq_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1723 | |
1724 | case TIME_SOURCE_MMIO32 : | |
685db65e | 1725 | return readl_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1726 | |
1727 | default: return get_cycles(); | |
1728 | } | |
1729 | } | |
1730 | ||
486d46ae | 1731 | static inline u64 time_interpolator_get_counter(int writelock) |
1da177e4 LT |
1732 | { |
1733 | unsigned int src = time_interpolator->source; | |
1734 | ||
1735 | if (time_interpolator->jitter) | |
1736 | { | |
1737 | u64 lcycle; | |
1738 | u64 now; | |
1739 | ||
1740 | do { | |
1741 | lcycle = time_interpolator->last_cycle; | |
1742 | now = time_interpolator_get_cycles(src); | |
1743 | if (lcycle && time_after(lcycle, now)) | |
1744 | return lcycle; | |
486d46ae AW |
1745 | |
1746 | /* When holding the xtime write lock, there's no need | |
1747 | * to add the overhead of the cmpxchg. Readers are | |
1748 | * force to retry until the write lock is released. | |
1749 | */ | |
1750 | if (writelock) { | |
1751 | time_interpolator->last_cycle = now; | |
1752 | return now; | |
1753 | } | |
1da177e4 LT |
1754 | /* Keep track of the last timer value returned. The use of cmpxchg here |
1755 | * will cause contention in an SMP environment. | |
1756 | */ | |
1757 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | |
1758 | return now; | |
1759 | } | |
1760 | else | |
1761 | return time_interpolator_get_cycles(src); | |
1762 | } | |
1763 | ||
1764 | void time_interpolator_reset(void) | |
1765 | { | |
1766 | time_interpolator->offset = 0; | |
486d46ae | 1767 | time_interpolator->last_counter = time_interpolator_get_counter(1); |
1da177e4 LT |
1768 | } |
1769 | ||
1770 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | |
1771 | ||
1772 | unsigned long time_interpolator_get_offset(void) | |
1773 | { | |
1774 | /* If we do not have a time interpolator set up then just return zero */ | |
1775 | if (!time_interpolator) | |
1776 | return 0; | |
1777 | ||
1778 | return time_interpolator->offset + | |
486d46ae | 1779 | GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator); |
1da177e4 LT |
1780 | } |
1781 | ||
1782 | #define INTERPOLATOR_ADJUST 65536 | |
1783 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | |
1784 | ||
1785 | static void time_interpolator_update(long delta_nsec) | |
1786 | { | |
1787 | u64 counter; | |
1788 | unsigned long offset; | |
1789 | ||
1790 | /* If there is no time interpolator set up then do nothing */ | |
1791 | if (!time_interpolator) | |
1792 | return; | |
1793 | ||
a5a0d52c AM |
1794 | /* |
1795 | * The interpolator compensates for late ticks by accumulating the late | |
1796 | * time in time_interpolator->offset. A tick earlier than expected will | |
1797 | * lead to a reset of the offset and a corresponding jump of the clock | |
1798 | * forward. Again this only works if the interpolator clock is running | |
1799 | * slightly slower than the regular clock and the tuning logic insures | |
1800 | * that. | |
1801 | */ | |
1da177e4 | 1802 | |
486d46ae | 1803 | counter = time_interpolator_get_counter(1); |
a5a0d52c AM |
1804 | offset = time_interpolator->offset + |
1805 | GET_TI_NSECS(counter, time_interpolator); | |
1da177e4 LT |
1806 | |
1807 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | |
1808 | time_interpolator->offset = offset - delta_nsec; | |
1809 | else { | |
1810 | time_interpolator->skips++; | |
1811 | time_interpolator->ns_skipped += delta_nsec - offset; | |
1812 | time_interpolator->offset = 0; | |
1813 | } | |
1814 | time_interpolator->last_counter = counter; | |
1815 | ||
1816 | /* Tuning logic for time interpolator invoked every minute or so. | |
1817 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | |
1818 | * Increase interpolator clock speed if we skip too much time. | |
1819 | */ | |
1820 | if (jiffies % INTERPOLATOR_ADJUST == 0) | |
1821 | { | |
b20367a6 | 1822 | if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) |
1da177e4 LT |
1823 | time_interpolator->nsec_per_cyc--; |
1824 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | |
1825 | time_interpolator->nsec_per_cyc++; | |
1826 | time_interpolator->skips = 0; | |
1827 | time_interpolator->ns_skipped = 0; | |
1828 | } | |
1829 | } | |
1830 | ||
1831 | static inline int | |
1832 | is_better_time_interpolator(struct time_interpolator *new) | |
1833 | { | |
1834 | if (!time_interpolator) | |
1835 | return 1; | |
1836 | return new->frequency > 2*time_interpolator->frequency || | |
1837 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | |
1838 | } | |
1839 | ||
1840 | void | |
1841 | register_time_interpolator(struct time_interpolator *ti) | |
1842 | { | |
1843 | unsigned long flags; | |
1844 | ||
1845 | /* Sanity check */ | |
9f31252c | 1846 | BUG_ON(ti->frequency == 0 || ti->mask == 0); |
1da177e4 LT |
1847 | |
1848 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | |
1849 | spin_lock(&time_interpolator_lock); | |
1850 | write_seqlock_irqsave(&xtime_lock, flags); | |
1851 | if (is_better_time_interpolator(ti)) { | |
1852 | time_interpolator = ti; | |
1853 | time_interpolator_reset(); | |
1854 | } | |
1855 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1856 | ||
1857 | ti->next = time_interpolator_list; | |
1858 | time_interpolator_list = ti; | |
1859 | spin_unlock(&time_interpolator_lock); | |
1860 | } | |
1861 | ||
1862 | void | |
1863 | unregister_time_interpolator(struct time_interpolator *ti) | |
1864 | { | |
1865 | struct time_interpolator *curr, **prev; | |
1866 | unsigned long flags; | |
1867 | ||
1868 | spin_lock(&time_interpolator_lock); | |
1869 | prev = &time_interpolator_list; | |
1870 | for (curr = *prev; curr; curr = curr->next) { | |
1871 | if (curr == ti) { | |
1872 | *prev = curr->next; | |
1873 | break; | |
1874 | } | |
1875 | prev = &curr->next; | |
1876 | } | |
1877 | ||
1878 | write_seqlock_irqsave(&xtime_lock, flags); | |
1879 | if (ti == time_interpolator) { | |
1880 | /* we lost the best time-interpolator: */ | |
1881 | time_interpolator = NULL; | |
1882 | /* find the next-best interpolator */ | |
1883 | for (curr = time_interpolator_list; curr; curr = curr->next) | |
1884 | if (is_better_time_interpolator(curr)) | |
1885 | time_interpolator = curr; | |
1886 | time_interpolator_reset(); | |
1887 | } | |
1888 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1889 | spin_unlock(&time_interpolator_lock); | |
1890 | } | |
1891 | #endif /* CONFIG_TIME_INTERPOLATION */ | |
1892 | ||
1893 | /** | |
1894 | * msleep - sleep safely even with waitqueue interruptions | |
1895 | * @msecs: Time in milliseconds to sleep for | |
1896 | */ | |
1897 | void msleep(unsigned int msecs) | |
1898 | { | |
1899 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1900 | ||
75bcc8c5 NA |
1901 | while (timeout) |
1902 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1903 | } |
1904 | ||
1905 | EXPORT_SYMBOL(msleep); | |
1906 | ||
1907 | /** | |
96ec3efd | 1908 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1909 | * @msecs: Time in milliseconds to sleep for |
1910 | */ | |
1911 | unsigned long msleep_interruptible(unsigned int msecs) | |
1912 | { | |
1913 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1914 | ||
75bcc8c5 NA |
1915 | while (timeout && !signal_pending(current)) |
1916 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1917 | return jiffies_to_msecs(timeout); |
1918 | } | |
1919 | ||
1920 | EXPORT_SYMBOL(msleep_interruptible); |