2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * High-resolution kernel timers
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
14 * These timers are currently used for:
18 * - precise in-kernel timing
20 * Started by: Thomas Gleixner and Ingo Molnar
23 * based on kernel/timer.c
25 * Help, testing, suggestions, bugfixes, improvements were
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
31 * For licencing details see kernel-base/COPYING
34 #include <linux/cpu.h>
35 #include <linux/module.h>
36 #include <linux/percpu.h>
37 #include <linux/hrtimer.h>
38 #include <linux/notifier.h>
39 #include <linux/syscalls.h>
40 #include <linux/interrupt.h>
41 #include <linux/tick.h>
43 #include <asm/uaccess.h>
46 * ktime_get - get the monotonic time in ktime_t format
48 * returns the time in ktime_t format
50 ktime_t
ktime_get(void)
56 return timespec_to_ktime(now
);
60 * ktime_get_real - get the real (wall-) time in ktime_t format
62 * returns the time in ktime_t format
64 ktime_t
ktime_get_real(void)
70 return timespec_to_ktime(now
);
73 EXPORT_SYMBOL_GPL(ktime_get_real
);
78 * Note: If we want to add new timer bases, we have to skip the two
79 * clock ids captured by the cpu-timers. We do this by holding empty
80 * entries rather than doing math adjustment of the clock ids.
81 * This ensures that we capture erroneous accesses to these clock ids
82 * rather than moving them into the range of valid clock id's.
84 static DEFINE_PER_CPU(struct hrtimer_cpu_base
, hrtimer_bases
) =
90 .index
= CLOCK_REALTIME
,
91 .get_time
= &ktime_get_real
,
92 .resolution
= KTIME_REALTIME_RES
,
95 .index
= CLOCK_MONOTONIC
,
96 .get_time
= &ktime_get
,
97 .resolution
= KTIME_MONOTONIC_RES
,
103 * ktime_get_ts - get the monotonic clock in timespec format
104 * @ts: pointer to timespec variable
106 * The function calculates the monotonic clock from the realtime
107 * clock and the wall_to_monotonic offset and stores the result
108 * in normalized timespec format in the variable pointed to by @ts.
110 void ktime_get_ts(struct timespec
*ts
)
112 struct timespec tomono
;
116 seq
= read_seqbegin(&xtime_lock
);
118 tomono
= wall_to_monotonic
;
120 } while (read_seqretry(&xtime_lock
, seq
));
122 set_normalized_timespec(ts
, ts
->tv_sec
+ tomono
.tv_sec
,
123 ts
->tv_nsec
+ tomono
.tv_nsec
);
125 EXPORT_SYMBOL_GPL(ktime_get_ts
);
128 * Get the coarse grained time at the softirq based on xtime and
131 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base
*base
)
133 ktime_t xtim
, tomono
;
138 seq
= read_seqbegin(&xtime_lock
);
140 getnstimeofday(&xts
);
144 } while (read_seqretry(&xtime_lock
, seq
));
146 xtim
= timespec_to_ktime(xts
);
147 tomono
= timespec_to_ktime(wall_to_monotonic
);
148 base
->clock_base
[CLOCK_REALTIME
].softirq_time
= xtim
;
149 base
->clock_base
[CLOCK_MONOTONIC
].softirq_time
=
150 ktime_add(xtim
, tomono
);
154 * Helper function to check, whether the timer is on one of the queues
156 static inline int hrtimer_is_queued(struct hrtimer
*timer
)
158 return timer
->state
& HRTIMER_STATE_ENQUEUED
;
162 * Helper function to check, whether the timer is running the callback
165 static inline int hrtimer_callback_running(struct hrtimer
*timer
)
167 return timer
->state
& HRTIMER_STATE_CALLBACK
;
171 * Functions and macros which are different for UP/SMP systems are kept in a
177 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
178 * means that all timers which are tied to this base via timer->base are
179 * locked, and the base itself is locked too.
181 * So __run_timers/migrate_timers can safely modify all timers which could
182 * be found on the lists/queues.
184 * When the timer's base is locked, and the timer removed from list, it is
185 * possible to set timer->base = NULL and drop the lock: the timer remains
189 struct hrtimer_clock_base
*lock_hrtimer_base(const struct hrtimer
*timer
,
190 unsigned long *flags
)
192 struct hrtimer_clock_base
*base
;
196 if (likely(base
!= NULL
)) {
197 spin_lock_irqsave(&base
->cpu_base
->lock
, *flags
);
198 if (likely(base
== timer
->base
))
200 /* The timer has migrated to another CPU: */
201 spin_unlock_irqrestore(&base
->cpu_base
->lock
, *flags
);
208 * Switch the timer base to the current CPU when possible.
210 static inline struct hrtimer_clock_base
*
211 switch_hrtimer_base(struct hrtimer
*timer
, struct hrtimer_clock_base
*base
)
213 struct hrtimer_clock_base
*new_base
;
214 struct hrtimer_cpu_base
*new_cpu_base
;
216 new_cpu_base
= &__get_cpu_var(hrtimer_bases
);
217 new_base
= &new_cpu_base
->clock_base
[base
->index
];
219 if (base
!= new_base
) {
221 * We are trying to schedule the timer on the local CPU.
222 * However we can't change timer's base while it is running,
223 * so we keep it on the same CPU. No hassle vs. reprogramming
224 * the event source in the high resolution case. The softirq
225 * code will take care of this when the timer function has
226 * completed. There is no conflict as we hold the lock until
227 * the timer is enqueued.
229 if (unlikely(timer
->state
& HRTIMER_STATE_CALLBACK
))
232 /* See the comment in lock_timer_base() */
234 spin_unlock(&base
->cpu_base
->lock
);
235 spin_lock(&new_base
->cpu_base
->lock
);
236 timer
->base
= new_base
;
241 #else /* CONFIG_SMP */
243 static inline struct hrtimer_clock_base
*
244 lock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
246 struct hrtimer_clock_base
*base
= timer
->base
;
248 spin_lock_irqsave(&base
->cpu_base
->lock
, *flags
);
253 #define switch_hrtimer_base(t, b) (b)
255 #endif /* !CONFIG_SMP */
258 * Functions for the union type storage format of ktime_t which are
259 * too large for inlining:
261 #if BITS_PER_LONG < 64
262 # ifndef CONFIG_KTIME_SCALAR
264 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
266 * @nsec: the scalar nsec value to add
268 * Returns the sum of kt and nsec in ktime_t format
270 ktime_t
ktime_add_ns(const ktime_t kt
, u64 nsec
)
274 if (likely(nsec
< NSEC_PER_SEC
)) {
277 unsigned long rem
= do_div(nsec
, NSEC_PER_SEC
);
279 tmp
= ktime_set((long)nsec
, rem
);
282 return ktime_add(kt
, tmp
);
285 #else /* CONFIG_KTIME_SCALAR */
287 # endif /* !CONFIG_KTIME_SCALAR */
290 * Divide a ktime value by a nanosecond value
292 unsigned long ktime_divns(const ktime_t kt
, s64 div
)
297 dclc
= dns
= ktime_to_ns(kt
);
299 /* Make sure the divisor is less than 2^32: */
305 do_div(dclc
, (unsigned long) div
);
307 return (unsigned long) dclc
;
309 #endif /* BITS_PER_LONG >= 64 */
312 * Counterpart to lock_timer_base above:
315 void unlock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
317 spin_unlock_irqrestore(&timer
->base
->cpu_base
->lock
, *flags
);
321 * hrtimer_forward - forward the timer expiry
322 * @timer: hrtimer to forward
323 * @now: forward past this time
324 * @interval: the interval to forward
326 * Forward the timer expiry so it will expire in the future.
327 * Returns the number of overruns.
330 hrtimer_forward(struct hrtimer
*timer
, ktime_t now
, ktime_t interval
)
332 unsigned long orun
= 1;
335 delta
= ktime_sub(now
, timer
->expires
);
340 if (interval
.tv64
< timer
->base
->resolution
.tv64
)
341 interval
.tv64
= timer
->base
->resolution
.tv64
;
343 if (unlikely(delta
.tv64
>= interval
.tv64
)) {
344 s64 incr
= ktime_to_ns(interval
);
346 orun
= ktime_divns(delta
, incr
);
347 timer
->expires
= ktime_add_ns(timer
->expires
, incr
* orun
);
348 if (timer
->expires
.tv64
> now
.tv64
)
351 * This (and the ktime_add() below) is the
352 * correction for exact:
356 timer
->expires
= ktime_add(timer
->expires
, interval
);
362 * enqueue_hrtimer - internal function to (re)start a timer
364 * The timer is inserted in expiry order. Insertion into the
365 * red black tree is O(log(n)). Must hold the base lock.
367 static void enqueue_hrtimer(struct hrtimer
*timer
,
368 struct hrtimer_clock_base
*base
)
370 struct rb_node
**link
= &base
->active
.rb_node
;
371 struct rb_node
*parent
= NULL
;
372 struct hrtimer
*entry
;
375 * Find the right place in the rbtree:
379 entry
= rb_entry(parent
, struct hrtimer
, node
);
381 * We dont care about collisions. Nodes with
382 * the same expiry time stay together.
384 if (timer
->expires
.tv64
< entry
->expires
.tv64
)
385 link
= &(*link
)->rb_left
;
387 link
= &(*link
)->rb_right
;
391 * Insert the timer to the rbtree and check whether it
392 * replaces the first pending timer
394 rb_link_node(&timer
->node
, parent
, link
);
395 rb_insert_color(&timer
->node
, &base
->active
);
397 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
398 * state of a possibly running callback.
400 timer
->state
|= HRTIMER_STATE_ENQUEUED
;
402 if (!base
->first
|| timer
->expires
.tv64
<
403 rb_entry(base
->first
, struct hrtimer
, node
)->expires
.tv64
)
404 base
->first
= &timer
->node
;
408 * __remove_hrtimer - internal function to remove a timer
410 * Caller must hold the base lock.
412 static void __remove_hrtimer(struct hrtimer
*timer
,
413 struct hrtimer_clock_base
*base
,
414 unsigned long newstate
)
417 * Remove the timer from the rbtree and replace the
418 * first entry pointer if necessary.
420 if (base
->first
== &timer
->node
)
421 base
->first
= rb_next(&timer
->node
);
422 rb_erase(&timer
->node
, &base
->active
);
423 timer
->state
= newstate
;
427 * remove hrtimer, called with base lock held
430 remove_hrtimer(struct hrtimer
*timer
, struct hrtimer_clock_base
*base
)
432 if (hrtimer_is_queued(timer
)) {
433 __remove_hrtimer(timer
, base
, HRTIMER_STATE_INACTIVE
);
440 * hrtimer_start - (re)start an relative timer on the current CPU
441 * @timer: the timer to be added
443 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
447 * 1 when the timer was active
450 hrtimer_start(struct hrtimer
*timer
, ktime_t tim
, const enum hrtimer_mode mode
)
452 struct hrtimer_clock_base
*base
, *new_base
;
456 base
= lock_hrtimer_base(timer
, &flags
);
458 /* Remove an active timer from the queue: */
459 ret
= remove_hrtimer(timer
, base
);
461 /* Switch the timer base, if necessary: */
462 new_base
= switch_hrtimer_base(timer
, base
);
464 if (mode
== HRTIMER_MODE_REL
) {
465 tim
= ktime_add(tim
, new_base
->get_time());
467 * CONFIG_TIME_LOW_RES is a temporary way for architectures
468 * to signal that they simply return xtime in
469 * do_gettimeoffset(). In this case we want to round up by
470 * resolution when starting a relative timer, to avoid short
471 * timeouts. This will go away with the GTOD framework.
473 #ifdef CONFIG_TIME_LOW_RES
474 tim
= ktime_add(tim
, base
->resolution
);
477 timer
->expires
= tim
;
479 enqueue_hrtimer(timer
, new_base
);
481 unlock_hrtimer_base(timer
, &flags
);
485 EXPORT_SYMBOL_GPL(hrtimer_start
);
488 * hrtimer_try_to_cancel - try to deactivate a timer
489 * @timer: hrtimer to stop
492 * 0 when the timer was not active
493 * 1 when the timer was active
494 * -1 when the timer is currently excuting the callback function and
497 int hrtimer_try_to_cancel(struct hrtimer
*timer
)
499 struct hrtimer_clock_base
*base
;
503 base
= lock_hrtimer_base(timer
, &flags
);
505 if (!hrtimer_callback_running(timer
))
506 ret
= remove_hrtimer(timer
, base
);
508 unlock_hrtimer_base(timer
, &flags
);
513 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel
);
516 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
517 * @timer: the timer to be cancelled
520 * 0 when the timer was not active
521 * 1 when the timer was active
523 int hrtimer_cancel(struct hrtimer
*timer
)
526 int ret
= hrtimer_try_to_cancel(timer
);
533 EXPORT_SYMBOL_GPL(hrtimer_cancel
);
536 * hrtimer_get_remaining - get remaining time for the timer
537 * @timer: the timer to read
539 ktime_t
hrtimer_get_remaining(const struct hrtimer
*timer
)
541 struct hrtimer_clock_base
*base
;
545 base
= lock_hrtimer_base(timer
, &flags
);
546 rem
= ktime_sub(timer
->expires
, base
->get_time());
547 unlock_hrtimer_base(timer
, &flags
);
551 EXPORT_SYMBOL_GPL(hrtimer_get_remaining
);
553 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
555 * hrtimer_get_next_event - get the time until next expiry event
557 * Returns the delta to the next expiry event or KTIME_MAX if no timer
560 ktime_t
hrtimer_get_next_event(void)
562 struct hrtimer_cpu_base
*cpu_base
= &__get_cpu_var(hrtimer_bases
);
563 struct hrtimer_clock_base
*base
= cpu_base
->clock_base
;
564 ktime_t delta
, mindelta
= { .tv64
= KTIME_MAX
};
568 spin_lock_irqsave(&cpu_base
->lock
, flags
);
570 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++, base
++) {
571 struct hrtimer
*timer
;
576 timer
= rb_entry(base
->first
, struct hrtimer
, node
);
577 delta
.tv64
= timer
->expires
.tv64
;
578 delta
= ktime_sub(delta
, base
->get_time());
579 if (delta
.tv64
< mindelta
.tv64
)
580 mindelta
.tv64
= delta
.tv64
;
583 spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
585 if (mindelta
.tv64
< 0)
592 * hrtimer_init - initialize a timer to the given clock
593 * @timer: the timer to be initialized
594 * @clock_id: the clock to be used
595 * @mode: timer mode abs/rel
597 void hrtimer_init(struct hrtimer
*timer
, clockid_t clock_id
,
598 enum hrtimer_mode mode
)
600 struct hrtimer_cpu_base
*cpu_base
;
602 memset(timer
, 0, sizeof(struct hrtimer
));
604 cpu_base
= &__raw_get_cpu_var(hrtimer_bases
);
606 if (clock_id
== CLOCK_REALTIME
&& mode
!= HRTIMER_MODE_ABS
)
607 clock_id
= CLOCK_MONOTONIC
;
609 timer
->base
= &cpu_base
->clock_base
[clock_id
];
611 EXPORT_SYMBOL_GPL(hrtimer_init
);
614 * hrtimer_get_res - get the timer resolution for a clock
615 * @which_clock: which clock to query
616 * @tp: pointer to timespec variable to store the resolution
618 * Store the resolution of the clock selected by @which_clock in the
619 * variable pointed to by @tp.
621 int hrtimer_get_res(const clockid_t which_clock
, struct timespec
*tp
)
623 struct hrtimer_cpu_base
*cpu_base
;
625 cpu_base
= &__raw_get_cpu_var(hrtimer_bases
);
626 *tp
= ktime_to_timespec(cpu_base
->clock_base
[which_clock
].resolution
);
630 EXPORT_SYMBOL_GPL(hrtimer_get_res
);
633 * Expire the per base hrtimer-queue:
635 static inline void run_hrtimer_queue(struct hrtimer_cpu_base
*cpu_base
,
638 struct rb_node
*node
;
639 struct hrtimer_clock_base
*base
= &cpu_base
->clock_base
[index
];
644 if (base
->get_softirq_time
)
645 base
->softirq_time
= base
->get_softirq_time();
647 spin_lock_irq(&cpu_base
->lock
);
649 while ((node
= base
->first
)) {
650 struct hrtimer
*timer
;
651 enum hrtimer_restart (*fn
)(struct hrtimer
*);
654 timer
= rb_entry(node
, struct hrtimer
, node
);
655 if (base
->softirq_time
.tv64
<= timer
->expires
.tv64
)
658 fn
= timer
->function
;
659 __remove_hrtimer(timer
, base
, HRTIMER_STATE_CALLBACK
);
660 spin_unlock_irq(&cpu_base
->lock
);
664 spin_lock_irq(&cpu_base
->lock
);
666 timer
->state
&= ~HRTIMER_STATE_CALLBACK
;
667 if (restart
!= HRTIMER_NORESTART
) {
668 BUG_ON(hrtimer_active(timer
));
669 enqueue_hrtimer(timer
, base
);
672 spin_unlock_irq(&cpu_base
->lock
);
676 * Called from timer softirq every jiffy, expire hrtimers:
678 void hrtimer_run_queues(void)
680 struct hrtimer_cpu_base
*cpu_base
= &__get_cpu_var(hrtimer_bases
);
684 * This _is_ ugly: We have to check in the softirq context,
685 * whether we can switch to highres and / or nohz mode. The
686 * clocksource switch happens in the timer interrupt with
687 * xtime_lock held. Notification from there only sets the
688 * check bit in the tick_oneshot code, otherwise we might
689 * deadlock vs. xtime_lock.
691 tick_check_oneshot_change(1);
693 hrtimer_get_softirq_time(cpu_base
);
695 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++)
696 run_hrtimer_queue(cpu_base
, i
);
700 * Sleep related functions:
702 static enum hrtimer_restart
hrtimer_wakeup(struct hrtimer
*timer
)
704 struct hrtimer_sleeper
*t
=
705 container_of(timer
, struct hrtimer_sleeper
, timer
);
706 struct task_struct
*task
= t
->task
;
710 wake_up_process(task
);
712 return HRTIMER_NORESTART
;
715 void hrtimer_init_sleeper(struct hrtimer_sleeper
*sl
, struct task_struct
*task
)
717 sl
->timer
.function
= hrtimer_wakeup
;
721 static int __sched
do_nanosleep(struct hrtimer_sleeper
*t
, enum hrtimer_mode mode
)
723 hrtimer_init_sleeper(t
, current
);
726 set_current_state(TASK_INTERRUPTIBLE
);
727 hrtimer_start(&t
->timer
, t
->timer
.expires
, mode
);
731 hrtimer_cancel(&t
->timer
);
732 mode
= HRTIMER_MODE_ABS
;
734 } while (t
->task
&& !signal_pending(current
));
736 return t
->task
== NULL
;
739 long __sched
hrtimer_nanosleep_restart(struct restart_block
*restart
)
741 struct hrtimer_sleeper t
;
742 struct timespec __user
*rmtp
;
746 restart
->fn
= do_no_restart_syscall
;
748 hrtimer_init(&t
.timer
, restart
->arg0
, HRTIMER_MODE_ABS
);
749 t
.timer
.expires
.tv64
= ((u64
)restart
->arg3
<< 32) | (u64
) restart
->arg2
;
751 if (do_nanosleep(&t
, HRTIMER_MODE_ABS
))
754 rmtp
= (struct timespec __user
*) restart
->arg1
;
756 time
= ktime_sub(t
.timer
.expires
, t
.timer
.base
->get_time());
759 tu
= ktime_to_timespec(time
);
760 if (copy_to_user(rmtp
, &tu
, sizeof(tu
)))
764 restart
->fn
= hrtimer_nanosleep_restart
;
766 /* The other values in restart are already filled in */
767 return -ERESTART_RESTARTBLOCK
;
770 long hrtimer_nanosleep(struct timespec
*rqtp
, struct timespec __user
*rmtp
,
771 const enum hrtimer_mode mode
, const clockid_t clockid
)
773 struct restart_block
*restart
;
774 struct hrtimer_sleeper t
;
778 hrtimer_init(&t
.timer
, clockid
, mode
);
779 t
.timer
.expires
= timespec_to_ktime(*rqtp
);
780 if (do_nanosleep(&t
, mode
))
783 /* Absolute timers do not update the rmtp value and restart: */
784 if (mode
== HRTIMER_MODE_ABS
)
785 return -ERESTARTNOHAND
;
788 rem
= ktime_sub(t
.timer
.expires
, t
.timer
.base
->get_time());
791 tu
= ktime_to_timespec(rem
);
792 if (copy_to_user(rmtp
, &tu
, sizeof(tu
)))
796 restart
= ¤t_thread_info()->restart_block
;
797 restart
->fn
= hrtimer_nanosleep_restart
;
798 restart
->arg0
= (unsigned long) t
.timer
.base
->index
;
799 restart
->arg1
= (unsigned long) rmtp
;
800 restart
->arg2
= t
.timer
.expires
.tv64
& 0xFFFFFFFF;
801 restart
->arg3
= t
.timer
.expires
.tv64
>> 32;
803 return -ERESTART_RESTARTBLOCK
;
807 sys_nanosleep(struct timespec __user
*rqtp
, struct timespec __user
*rmtp
)
811 if (copy_from_user(&tu
, rqtp
, sizeof(tu
)))
814 if (!timespec_valid(&tu
))
817 return hrtimer_nanosleep(&tu
, rmtp
, HRTIMER_MODE_REL
, CLOCK_MONOTONIC
);
821 * Functions related to boot-time initialization:
823 static void __devinit
init_hrtimers_cpu(int cpu
)
825 struct hrtimer_cpu_base
*cpu_base
= &per_cpu(hrtimer_bases
, cpu
);
828 spin_lock_init(&cpu_base
->lock
);
829 lockdep_set_class(&cpu_base
->lock
, &cpu_base
->lock_key
);
831 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++)
832 cpu_base
->clock_base
[i
].cpu_base
= cpu_base
;
836 #ifdef CONFIG_HOTPLUG_CPU
838 static void migrate_hrtimer_list(struct hrtimer_clock_base
*old_base
,
839 struct hrtimer_clock_base
*new_base
)
841 struct hrtimer
*timer
;
842 struct rb_node
*node
;
844 while ((node
= rb_first(&old_base
->active
))) {
845 timer
= rb_entry(node
, struct hrtimer
, node
);
846 BUG_ON(timer
->state
& HRTIMER_STATE_CALLBACK
);
847 __remove_hrtimer(timer
, old_base
, HRTIMER_STATE_INACTIVE
);
848 timer
->base
= new_base
;
849 enqueue_hrtimer(timer
, new_base
);
853 static void migrate_hrtimers(int cpu
)
855 struct hrtimer_cpu_base
*old_base
, *new_base
;
858 BUG_ON(cpu_online(cpu
));
859 old_base
= &per_cpu(hrtimer_bases
, cpu
);
860 new_base
= &get_cpu_var(hrtimer_bases
);
864 spin_lock(&new_base
->lock
);
865 spin_lock(&old_base
->lock
);
867 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++) {
868 migrate_hrtimer_list(&old_base
->clock_base
[i
],
869 &new_base
->clock_base
[i
]);
871 spin_unlock(&old_base
->lock
);
872 spin_unlock(&new_base
->lock
);
875 put_cpu_var(hrtimer_bases
);
877 #endif /* CONFIG_HOTPLUG_CPU */
879 static int __cpuinit
hrtimer_cpu_notify(struct notifier_block
*self
,
880 unsigned long action
, void *hcpu
)
882 long cpu
= (long)hcpu
;
887 init_hrtimers_cpu(cpu
);
890 #ifdef CONFIG_HOTPLUG_CPU
892 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD
, &cpu
);
893 migrate_hrtimers(cpu
);
904 static struct notifier_block __cpuinitdata hrtimers_nb
= {
905 .notifier_call
= hrtimer_cpu_notify
,
908 void __init
hrtimers_init(void)
910 hrtimer_cpu_notify(&hrtimers_nb
, (unsigned long)CPU_UP_PREPARE
,
911 (void *)(long)smp_processor_id());
912 register_cpu_notifier(&hrtimers_nb
);