2 * linux/kernel/posix_timers.c
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
8 * Copyright (C) 2002 2003 by MontaVista Software.
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
30 /* These are all the functions necessary to implement
31 * POSIX clocks & timers
34 #include <linux/smp_lock.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/time.h>
38 #include <linux/calc64.h>
40 #include <asm/uaccess.h>
41 #include <asm/semaphore.h>
42 #include <linux/list.h>
43 #include <linux/init.h>
44 #include <linux/compiler.h>
45 #include <linux/idr.h>
46 #include <linux/posix-timers.h>
47 #include <linux/syscalls.h>
48 #include <linux/wait.h>
49 #include <linux/workqueue.h>
50 #include <linux/module.h>
52 #define CLOCK_REALTIME_RES TICK_NSEC /* In nano seconds. */
54 static inline u64
mpy_l_X_l_ll(unsigned long mpy1
,unsigned long mpy2
)
56 return (u64
)mpy1
* mpy2
;
59 * Management arrays for POSIX timers. Timers are kept in slab memory
60 * Timer ids are allocated by an external routine that keeps track of the
61 * id and the timer. The external interface is:
63 * void *idr_find(struct idr *idp, int id); to find timer_id <id>
64 * int idr_get_new(struct idr *idp, void *ptr); to get a new id and
66 * void idr_remove(struct idr *idp, int id); to release <id>
67 * void idr_init(struct idr *idp); to initialize <idp>
69 * The idr_get_new *may* call slab for more memory so it must not be
70 * called under a spin lock. Likewise idr_remore may release memory
71 * (but it may be ok to do this under a lock...).
72 * idr_find is just a memory look up and is quite fast. A -1 return
73 * indicates that the requested id does not exist.
77 * Lets keep our timers in a slab cache :-)
79 static kmem_cache_t
*posix_timers_cache
;
80 static struct idr posix_timers_id
;
81 static DEFINE_SPINLOCK(idr_lock
);
84 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
85 * SIGEV values. Here we put out an error if this assumption fails.
87 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
88 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
89 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
94 * The timer ID is turned into a timer address by idr_find().
95 * Verifying a valid ID consists of:
97 * a) checking that idr_find() returns other than -1.
98 * b) checking that the timer id matches the one in the timer itself.
99 * c) that the timer owner is in the callers thread group.
103 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
104 * to implement others. This structure defines the various
105 * clocks and allows the possibility of adding others. We
106 * provide an interface to add clocks to the table and expect
107 * the "arch" code to add at least one clock that is high
108 * resolution. Here we define the standard CLOCK_REALTIME as a
109 * 1/HZ resolution clock.
111 * RESOLUTION: Clock resolution is used to round up timer and interval
112 * times, NOT to report clock times, which are reported with as
113 * much resolution as the system can muster. In some cases this
114 * resolution may depend on the underlying clock hardware and
115 * may not be quantifiable until run time, and only then is the
116 * necessary code is written. The standard says we should say
117 * something about this issue in the documentation...
119 * FUNCTIONS: The CLOCKs structure defines possible functions to handle
120 * various clock functions. For clocks that use the standard
121 * system timer code these entries should be NULL. This will
122 * allow dispatch without the overhead of indirect function
123 * calls. CLOCKS that depend on other sources (e.g. WWV or GPS)
124 * must supply functions here, even if the function just returns
125 * ENOSYS. The standard POSIX timer management code assumes the
126 * following: 1.) The k_itimer struct (sched.h) is used for the
127 * timer. 2.) The list, it_lock, it_clock, it_id and it_process
128 * fields are not modified by timer code.
130 * At this time all functions EXCEPT clock_nanosleep can be
131 * redirected by the CLOCKS structure. Clock_nanosleep is in
132 * there, but the code ignores it.
134 * Permissions: It is assumed that the clock_settime() function defined
135 * for each clock will take care of permission checks. Some
136 * clocks may be set able by any user (i.e. local process
137 * clocks) others not. Currently the only set able clock we
138 * have is CLOCK_REALTIME and its high res counter part, both of
139 * which we beg off on and pass to do_sys_settimeofday().
142 static struct k_clock posix_clocks
[MAX_CLOCKS
];
144 * We only have one real clock that can be set so we need only one abs list,
145 * even if we should want to have several clocks with differing resolutions.
147 static struct k_clock_abs abs_list
= {.list
= LIST_HEAD_INIT(abs_list
.list
),
148 .lock
= SPIN_LOCK_UNLOCKED
};
150 static void posix_timer_fn(unsigned long);
151 static u64
do_posix_clock_monotonic_gettime_parts(
152 struct timespec
*tp
, struct timespec
*mo
);
153 int do_posix_clock_monotonic_gettime(struct timespec
*tp
);
154 static int do_posix_clock_monotonic_get(const clockid_t
, struct timespec
*tp
);
156 static struct k_itimer
*lock_timer(timer_t timer_id
, unsigned long *flags
);
158 static inline void unlock_timer(struct k_itimer
*timr
, unsigned long flags
)
160 spin_unlock_irqrestore(&timr
->it_lock
, flags
);
164 * Call the k_clock hook function if non-null, or the default function.
166 #define CLOCK_DISPATCH(clock, call, arglist) \
167 ((clock) < 0 ? posix_cpu_##call arglist : \
168 (posix_clocks[clock].call != NULL \
169 ? (*posix_clocks[clock].call) arglist : common_##call arglist))
172 * Default clock hook functions when the struct k_clock passed
173 * to register_posix_clock leaves a function pointer null.
175 * The function common_CALL is the default implementation for
176 * the function pointer CALL in struct k_clock.
179 static inline int common_clock_getres(const clockid_t which_clock
,
183 tp
->tv_nsec
= posix_clocks
[which_clock
].res
;
187 static inline int common_clock_get(const clockid_t which_clock
,
194 static inline int common_clock_set(const clockid_t which_clock
,
197 return do_sys_settimeofday(tp
, NULL
);
200 static inline int common_timer_create(struct k_itimer
*new_timer
)
202 INIT_LIST_HEAD(&new_timer
->it
.real
.abs_timer_entry
);
203 init_timer(&new_timer
->it
.real
.timer
);
204 new_timer
->it
.real
.timer
.data
= (unsigned long) new_timer
;
205 new_timer
->it
.real
.timer
.function
= posix_timer_fn
;
210 * These ones are defined below.
212 static int common_nsleep(const clockid_t
, int flags
, struct timespec
*t
,
213 struct timespec __user
*rmtp
);
214 static void common_timer_get(struct k_itimer
*, struct itimerspec
*);
215 static int common_timer_set(struct k_itimer
*, int,
216 struct itimerspec
*, struct itimerspec
*);
217 static int common_timer_del(struct k_itimer
*timer
);
220 * Return nonzero iff we know a priori this clockid_t value is bogus.
222 static inline int invalid_clockid(const clockid_t which_clock
)
224 if (which_clock
< 0) /* CPU clock, posix_cpu_* will check it */
226 if ((unsigned) which_clock
>= MAX_CLOCKS
)
228 if (posix_clocks
[which_clock
].clock_getres
!= NULL
)
230 #ifndef CLOCK_DISPATCH_DIRECT
231 if (posix_clocks
[which_clock
].res
!= 0)
239 * Initialize everything, well, just everything in Posix clocks/timers ;)
241 static __init
int init_posix_timers(void)
243 struct k_clock clock_realtime
= {.res
= CLOCK_REALTIME_RES
,
244 .abs_struct
= &abs_list
246 struct k_clock clock_monotonic
= {.res
= CLOCK_REALTIME_RES
,
248 .clock_get
= do_posix_clock_monotonic_get
,
249 .clock_set
= do_posix_clock_nosettime
252 register_posix_clock(CLOCK_REALTIME
, &clock_realtime
);
253 register_posix_clock(CLOCK_MONOTONIC
, &clock_monotonic
);
255 posix_timers_cache
= kmem_cache_create("posix_timers_cache",
256 sizeof (struct k_itimer
), 0, 0, NULL
, NULL
);
257 idr_init(&posix_timers_id
);
261 __initcall(init_posix_timers
);
263 static void tstojiffie(struct timespec
*tp
, int res
, u64
*jiff
)
265 long sec
= tp
->tv_sec
;
266 long nsec
= tp
->tv_nsec
+ res
- 1;
268 if (nsec
>= NSEC_PER_SEC
) {
270 nsec
-= NSEC_PER_SEC
;
274 * The scaling constants are defined in <linux/time.h>
275 * The difference between there and here is that we do the
276 * res rounding and compute a 64-bit result (well so does that
277 * but it then throws away the high bits).
279 *jiff
= (mpy_l_X_l_ll(sec
, SEC_CONVERSION
) +
280 (mpy_l_X_l_ll(nsec
, NSEC_CONVERSION
) >>
281 (NSEC_JIFFIE_SC
- SEC_JIFFIE_SC
))) >> SEC_JIFFIE_SC
;
285 * This function adjusts the timer as needed as a result of the clock
286 * being set. It should only be called for absolute timers, and then
287 * under the abs_list lock. It computes the time difference and sets
288 * the new jiffies value in the timer. It also updates the timers
289 * reference wall_to_monotonic value. It is complicated by the fact
290 * that tstojiffies() only handles positive times and it needs to work
291 * with both positive and negative times. Also, for negative offsets,
292 * we need to defeat the res round up.
294 * Return is true if there is a new time, else false.
296 static long add_clockset_delta(struct k_itimer
*timr
,
297 struct timespec
*new_wall_to
)
299 struct timespec delta
;
303 set_normalized_timespec(&delta
,
304 new_wall_to
->tv_sec
-
305 timr
->it
.real
.wall_to_prev
.tv_sec
,
306 new_wall_to
->tv_nsec
-
307 timr
->it
.real
.wall_to_prev
.tv_nsec
);
308 if (likely(!(delta
.tv_sec
| delta
.tv_nsec
)))
310 if (delta
.tv_sec
< 0) {
311 set_normalized_timespec(&delta
,
314 posix_clocks
[timr
->it_clock
].res
);
317 tstojiffie(&delta
, posix_clocks
[timr
->it_clock
].res
, &exp
);
318 timr
->it
.real
.wall_to_prev
= *new_wall_to
;
319 timr
->it
.real
.timer
.expires
+= (sign
? -exp
: exp
);
323 static void remove_from_abslist(struct k_itimer
*timr
)
325 if (!list_empty(&timr
->it
.real
.abs_timer_entry
)) {
326 spin_lock(&abs_list
.lock
);
327 list_del_init(&timr
->it
.real
.abs_timer_entry
);
328 spin_unlock(&abs_list
.lock
);
332 static void schedule_next_timer(struct k_itimer
*timr
)
334 struct timespec new_wall_to
;
335 struct now_struct now
;
339 * Set up the timer for the next interval (if there is one).
340 * Note: this code uses the abs_timer_lock to protect
341 * it.real.wall_to_prev and must hold it until exp is set, not exactly
344 * This function is used for CLOCK_REALTIME* and
345 * CLOCK_MONOTONIC* timers. If we ever want to handle other
346 * CLOCKs, the calling code (do_schedule_next_timer) would need
347 * to pull the "clock" info from the timer and dispatch the
348 * "other" CLOCKs "next timer" code (which, I suppose should
349 * also be added to the k_clock structure).
351 if (!timr
->it
.real
.incr
)
355 seq
= read_seqbegin(&xtime_lock
);
356 new_wall_to
= wall_to_monotonic
;
358 } while (read_seqretry(&xtime_lock
, seq
));
360 if (!list_empty(&timr
->it
.real
.abs_timer_entry
)) {
361 spin_lock(&abs_list
.lock
);
362 add_clockset_delta(timr
, &new_wall_to
);
364 posix_bump_timer(timr
, now
);
366 spin_unlock(&abs_list
.lock
);
368 posix_bump_timer(timr
, now
);
370 timr
->it_overrun_last
= timr
->it_overrun
;
371 timr
->it_overrun
= -1;
372 ++timr
->it_requeue_pending
;
373 add_timer(&timr
->it
.real
.timer
);
377 * This function is exported for use by the signal deliver code. It is
378 * called just prior to the info block being released and passes that
379 * block to us. It's function is to update the overrun entry AND to
380 * restart the timer. It should only be called if the timer is to be
381 * restarted (i.e. we have flagged this in the sys_private entry of the
384 * To protect aginst the timer going away while the interrupt is queued,
385 * we require that the it_requeue_pending flag be set.
387 void do_schedule_next_timer(struct siginfo
*info
)
389 struct k_itimer
*timr
;
392 timr
= lock_timer(info
->si_tid
, &flags
);
394 if (!timr
|| timr
->it_requeue_pending
!= info
->si_sys_private
)
397 if (timr
->it_clock
< 0) /* CPU clock */
398 posix_cpu_timer_schedule(timr
);
400 schedule_next_timer(timr
);
401 info
->si_overrun
= timr
->it_overrun_last
;
404 unlock_timer(timr
, flags
);
407 int posix_timer_event(struct k_itimer
*timr
,int si_private
)
409 memset(&timr
->sigq
->info
, 0, sizeof(siginfo_t
));
410 timr
->sigq
->info
.si_sys_private
= si_private
;
412 * Send signal to the process that owns this timer.
414 * This code assumes that all the possible abs_lists share the
415 * same lock (there is only one list at this time). If this is
416 * not the case, the CLOCK info would need to be used to find
417 * the proper abs list lock.
420 timr
->sigq
->info
.si_signo
= timr
->it_sigev_signo
;
421 timr
->sigq
->info
.si_errno
= 0;
422 timr
->sigq
->info
.si_code
= SI_TIMER
;
423 timr
->sigq
->info
.si_tid
= timr
->it_id
;
424 timr
->sigq
->info
.si_value
= timr
->it_sigev_value
;
426 if (timr
->it_sigev_notify
& SIGEV_THREAD_ID
) {
427 struct task_struct
*leader
;
428 int ret
= send_sigqueue(timr
->it_sigev_signo
, timr
->sigq
,
431 if (likely(ret
>= 0))
434 timr
->it_sigev_notify
= SIGEV_SIGNAL
;
435 leader
= timr
->it_process
->group_leader
;
436 put_task_struct(timr
->it_process
);
437 timr
->it_process
= leader
;
440 return send_group_sigqueue(timr
->it_sigev_signo
, timr
->sigq
,
443 EXPORT_SYMBOL_GPL(posix_timer_event
);
446 * This function gets called when a POSIX.1b interval timer expires. It
447 * is used as a callback from the kernel internal timer. The
448 * run_timer_list code ALWAYS calls with interrupts on.
450 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
452 static void posix_timer_fn(unsigned long __data
)
454 struct k_itimer
*timr
= (struct k_itimer
*) __data
;
457 struct timespec delta
, new_wall_to
;
461 spin_lock_irqsave(&timr
->it_lock
, flags
);
462 if (!list_empty(&timr
->it
.real
.abs_timer_entry
)) {
463 spin_lock(&abs_list
.lock
);
465 seq
= read_seqbegin(&xtime_lock
);
466 new_wall_to
= wall_to_monotonic
;
467 } while (read_seqretry(&xtime_lock
, seq
));
468 set_normalized_timespec(&delta
,
470 timr
->it
.real
.wall_to_prev
.tv_sec
,
471 new_wall_to
.tv_nsec
-
472 timr
->it
.real
.wall_to_prev
.tv_nsec
);
473 if (likely((delta
.tv_sec
| delta
.tv_nsec
) == 0)) {
474 /* do nothing, timer is on time */
475 } else if (delta
.tv_sec
< 0) {
476 /* do nothing, timer is already late */
478 /* timer is early due to a clock set */
480 posix_clocks
[timr
->it_clock
].res
,
482 timr
->it
.real
.wall_to_prev
= new_wall_to
;
483 timr
->it
.real
.timer
.expires
+= exp
;
484 add_timer(&timr
->it
.real
.timer
);
487 spin_unlock(&abs_list
.lock
);
493 if (timr
->it
.real
.incr
)
494 si_private
= ++timr
->it_requeue_pending
;
496 remove_from_abslist(timr
);
499 if (posix_timer_event(timr
, si_private
))
501 * signal was not sent because of sig_ignor
502 * we will not get a call back to restart it AND
503 * it should be restarted.
505 schedule_next_timer(timr
);
507 unlock_timer(timr
, flags
); /* hold thru abs lock to keep irq off */
511 static inline struct task_struct
* good_sigevent(sigevent_t
* event
)
513 struct task_struct
*rtn
= current
->group_leader
;
515 if ((event
->sigev_notify
& SIGEV_THREAD_ID
) &&
516 (!(rtn
= find_task_by_pid(event
->sigev_notify_thread_id
)) ||
517 rtn
->tgid
!= current
->tgid
||
518 (event
->sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_SIGNAL
))
521 if (((event
->sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
) &&
522 ((event
->sigev_signo
<= 0) || (event
->sigev_signo
> SIGRTMAX
)))
528 void register_posix_clock(const clockid_t clock_id
, struct k_clock
*new_clock
)
530 if ((unsigned) clock_id
>= MAX_CLOCKS
) {
531 printk("POSIX clock register failed for clock_id %d\n",
536 posix_clocks
[clock_id
] = *new_clock
;
538 EXPORT_SYMBOL_GPL(register_posix_clock
);
540 static struct k_itimer
* alloc_posix_timer(void)
542 struct k_itimer
*tmr
;
543 tmr
= kmem_cache_alloc(posix_timers_cache
, GFP_KERNEL
);
546 memset(tmr
, 0, sizeof (struct k_itimer
));
547 if (unlikely(!(tmr
->sigq
= sigqueue_alloc()))) {
548 kmem_cache_free(posix_timers_cache
, tmr
);
555 #define IT_ID_NOT_SET 0
556 static void release_posix_timer(struct k_itimer
*tmr
, int it_id_set
)
560 spin_lock_irqsave(&idr_lock
, flags
);
561 idr_remove(&posix_timers_id
, tmr
->it_id
);
562 spin_unlock_irqrestore(&idr_lock
, flags
);
564 sigqueue_free(tmr
->sigq
);
565 if (unlikely(tmr
->it_process
) &&
566 tmr
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
567 put_task_struct(tmr
->it_process
);
568 kmem_cache_free(posix_timers_cache
, tmr
);
571 /* Create a POSIX.1b interval timer. */
574 sys_timer_create(const clockid_t which_clock
,
575 struct sigevent __user
*timer_event_spec
,
576 timer_t __user
* created_timer_id
)
579 struct k_itimer
*new_timer
= NULL
;
581 struct task_struct
*process
= NULL
;
584 int it_id_set
= IT_ID_NOT_SET
;
586 if (invalid_clockid(which_clock
))
589 new_timer
= alloc_posix_timer();
590 if (unlikely(!new_timer
))
593 spin_lock_init(&new_timer
->it_lock
);
595 if (unlikely(!idr_pre_get(&posix_timers_id
, GFP_KERNEL
))) {
599 spin_lock_irq(&idr_lock
);
600 error
= idr_get_new(&posix_timers_id
,
603 spin_unlock_irq(&idr_lock
);
604 if (error
== -EAGAIN
)
608 * Wierd looking, but we return EAGAIN if the IDR is
609 * full (proper POSIX return value for this)
615 it_id_set
= IT_ID_SET
;
616 new_timer
->it_id
= (timer_t
) new_timer_id
;
617 new_timer
->it_clock
= which_clock
;
618 new_timer
->it_overrun
= -1;
619 error
= CLOCK_DISPATCH(which_clock
, timer_create
, (new_timer
));
624 * return the timer_id now. The next step is hard to
625 * back out if there is an error.
627 if (copy_to_user(created_timer_id
,
628 &new_timer_id
, sizeof (new_timer_id
))) {
632 if (timer_event_spec
) {
633 if (copy_from_user(&event
, timer_event_spec
, sizeof (event
))) {
637 new_timer
->it_sigev_notify
= event
.sigev_notify
;
638 new_timer
->it_sigev_signo
= event
.sigev_signo
;
639 new_timer
->it_sigev_value
= event
.sigev_value
;
641 read_lock(&tasklist_lock
);
642 if ((process
= good_sigevent(&event
))) {
644 * We may be setting up this process for another
645 * thread. It may be exiting. To catch this
646 * case the we check the PF_EXITING flag. If
647 * the flag is not set, the siglock will catch
648 * him before it is too late (in exit_itimers).
650 * The exec case is a bit more invloved but easy
651 * to code. If the process is in our thread
652 * group (and it must be or we would not allow
653 * it here) and is doing an exec, it will cause
654 * us to be killed. In this case it will wait
655 * for us to die which means we can finish this
656 * linkage with our last gasp. I.e. no code :)
658 spin_lock_irqsave(&process
->sighand
->siglock
, flags
);
659 if (!(process
->flags
& PF_EXITING
)) {
660 new_timer
->it_process
= process
;
661 list_add(&new_timer
->list
,
662 &process
->signal
->posix_timers
);
663 spin_unlock_irqrestore(&process
->sighand
->siglock
, flags
);
664 if (new_timer
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
665 get_task_struct(process
);
667 spin_unlock_irqrestore(&process
->sighand
->siglock
, flags
);
671 read_unlock(&tasklist_lock
);
677 new_timer
->it_sigev_notify
= SIGEV_SIGNAL
;
678 new_timer
->it_sigev_signo
= SIGALRM
;
679 new_timer
->it_sigev_value
.sival_int
= new_timer
->it_id
;
680 process
= current
->group_leader
;
681 spin_lock_irqsave(&process
->sighand
->siglock
, flags
);
682 new_timer
->it_process
= process
;
683 list_add(&new_timer
->list
, &process
->signal
->posix_timers
);
684 spin_unlock_irqrestore(&process
->sighand
->siglock
, flags
);
688 * In the case of the timer belonging to another task, after
689 * the task is unlocked, the timer is owned by the other task
690 * and may cease to exist at any time. Don't use or modify
691 * new_timer after the unlock call.
696 release_posix_timer(new_timer
, it_id_set
);
704 * This function checks the elements of a timespec structure.
707 * ts : Pointer to the timespec structure to check
710 * If a NULL pointer was passed in, or the tv_nsec field was less than 0
711 * or greater than NSEC_PER_SEC, or the tv_sec field was less than 0,
712 * this function returns 0. Otherwise it returns 1.
714 static int good_timespec(const struct timespec
*ts
)
716 if ((!ts
) || !timespec_valid(ts
))
722 * Locking issues: We need to protect the result of the id look up until
723 * we get the timer locked down so it is not deleted under us. The
724 * removal is done under the idr spinlock so we use that here to bridge
725 * the find to the timer lock. To avoid a dead lock, the timer id MUST
726 * be release with out holding the timer lock.
728 static struct k_itimer
* lock_timer(timer_t timer_id
, unsigned long *flags
)
730 struct k_itimer
*timr
;
732 * Watch out here. We do a irqsave on the idr_lock and pass the
733 * flags part over to the timer lock. Must not let interrupts in
734 * while we are moving the lock.
737 spin_lock_irqsave(&idr_lock
, *flags
);
738 timr
= (struct k_itimer
*) idr_find(&posix_timers_id
, (int) timer_id
);
740 spin_lock(&timr
->it_lock
);
741 spin_unlock(&idr_lock
);
743 if ((timr
->it_id
!= timer_id
) || !(timr
->it_process
) ||
744 timr
->it_process
->tgid
!= current
->tgid
) {
745 unlock_timer(timr
, *flags
);
749 spin_unlock_irqrestore(&idr_lock
, *flags
);
755 * Get the time remaining on a POSIX.1b interval timer. This function
756 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
759 * We have a couple of messes to clean up here. First there is the case
760 * of a timer that has a requeue pending. These timers should appear to
761 * be in the timer list with an expiry as if we were to requeue them
764 * The second issue is the SIGEV_NONE timer which may be active but is
765 * not really ever put in the timer list (to save system resources).
766 * This timer may be expired, and if so, we will do it here. Otherwise
767 * it is the same as a requeue pending timer WRT to what we should
771 common_timer_get(struct k_itimer
*timr
, struct itimerspec
*cur_setting
)
773 unsigned long expires
;
774 struct now_struct now
;
777 expires
= timr
->it
.real
.timer
.expires
;
778 while ((volatile long) (timr
->it
.real
.timer
.expires
) != expires
);
783 ((timr
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) &&
784 !timr
->it
.real
.incr
&&
785 posix_time_before(&timr
->it
.real
.timer
, &now
))
786 timr
->it
.real
.timer
.expires
= expires
= 0;
788 if (timr
->it_requeue_pending
& REQUEUE_PENDING
||
789 (timr
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
790 posix_bump_timer(timr
, now
);
791 expires
= timr
->it
.real
.timer
.expires
;
794 if (!timer_pending(&timr
->it
.real
.timer
))
797 expires
-= now
.jiffies
;
799 jiffies_to_timespec(expires
, &cur_setting
->it_value
);
800 jiffies_to_timespec(timr
->it
.real
.incr
, &cur_setting
->it_interval
);
802 if (cur_setting
->it_value
.tv_sec
< 0) {
803 cur_setting
->it_value
.tv_nsec
= 1;
804 cur_setting
->it_value
.tv_sec
= 0;
808 /* Get the time remaining on a POSIX.1b interval timer. */
810 sys_timer_gettime(timer_t timer_id
, struct itimerspec __user
*setting
)
812 struct k_itimer
*timr
;
813 struct itimerspec cur_setting
;
816 timr
= lock_timer(timer_id
, &flags
);
820 CLOCK_DISPATCH(timr
->it_clock
, timer_get
, (timr
, &cur_setting
));
822 unlock_timer(timr
, flags
);
824 if (copy_to_user(setting
, &cur_setting
, sizeof (cur_setting
)))
830 * Get the number of overruns of a POSIX.1b interval timer. This is to
831 * be the overrun of the timer last delivered. At the same time we are
832 * accumulating overruns on the next timer. The overrun is frozen when
833 * the signal is delivered, either at the notify time (if the info block
834 * is not queued) or at the actual delivery time (as we are informed by
835 * the call back to do_schedule_next_timer(). So all we need to do is
836 * to pick up the frozen overrun.
840 sys_timer_getoverrun(timer_t timer_id
)
842 struct k_itimer
*timr
;
846 timr
= lock_timer(timer_id
, &flags
);
850 overrun
= timr
->it_overrun_last
;
851 unlock_timer(timr
, flags
);
856 * Adjust for absolute time
858 * If absolute time is given and it is not CLOCK_MONOTONIC, we need to
859 * adjust for the offset between the timer clock (CLOCK_MONOTONIC) and
860 * what ever clock he is using.
862 * If it is relative time, we need to add the current (CLOCK_MONOTONIC)
863 * time to it to get the proper time for the timer.
865 static int adjust_abs_time(struct k_clock
*clock
, struct timespec
*tp
,
866 int abs
, u64
*exp
, struct timespec
*wall_to
)
869 struct timespec oc
= *tp
;
875 * The mask pick up the 4 basic clocks
877 if (!((clock
- &posix_clocks
[0]) & ~CLOCKS_MASK
)) {
878 jiffies_64_f
= do_posix_clock_monotonic_gettime_parts(
881 * If we are doing a MONOTONIC clock
883 if((clock
- &posix_clocks
[0]) & CLOCKS_MONO
){
884 now
.tv_sec
+= wall_to
->tv_sec
;
885 now
.tv_nsec
+= wall_to
->tv_nsec
;
889 * Not one of the basic clocks
891 clock
->clock_get(clock
- posix_clocks
, &now
);
892 jiffies_64_f
= get_jiffies_64();
895 * Take away now to get delta and normalize
897 set_normalized_timespec(&oc
, oc
.tv_sec
- now
.tv_sec
,
898 oc
.tv_nsec
- now
.tv_nsec
);
900 jiffies_64_f
= get_jiffies_64();
903 * Check if the requested time is prior to now (if so set now)
906 oc
.tv_sec
= oc
.tv_nsec
= 0;
908 if (oc
.tv_sec
| oc
.tv_nsec
)
909 set_normalized_timespec(&oc
, oc
.tv_sec
,
910 oc
.tv_nsec
+ clock
->res
);
911 tstojiffie(&oc
, clock
->res
, exp
);
914 * Check if the requested time is more than the timer code
915 * can handle (if so we error out but return the value too).
917 if (*exp
> ((u64
)MAX_JIFFY_OFFSET
))
919 * This is a considered response, not exactly in
920 * line with the standard (in fact it is silent on
921 * possible overflows). We assume such a large
922 * value is ALMOST always a programming error and
923 * try not to compound it by setting a really dumb
928 * return the actual jiffies expire time, full 64 bits
930 *exp
+= jiffies_64_f
;
934 /* Set a POSIX.1b interval timer. */
935 /* timr->it_lock is taken. */
937 common_timer_set(struct k_itimer
*timr
, int flags
,
938 struct itimerspec
*new_setting
, struct itimerspec
*old_setting
)
940 struct k_clock
*clock
= &posix_clocks
[timr
->it_clock
];
944 common_timer_get(timr
, old_setting
);
946 /* disable the timer */
947 timr
->it
.real
.incr
= 0;
949 * careful here. If smp we could be in the "fire" routine which will
950 * be spinning as we hold the lock. But this is ONLY an SMP issue.
952 if (try_to_del_timer_sync(&timr
->it
.real
.timer
) < 0) {
955 * It can only be active if on an other cpu. Since
956 * we have cleared the interval stuff above, it should
957 * clear once we release the spin lock. Of course once
958 * we do that anything could happen, including the
959 * complete melt down of the timer. So return with
960 * a "retry" exit status.
966 remove_from_abslist(timr
);
968 timr
->it_requeue_pending
= (timr
->it_requeue_pending
+ 2) &
970 timr
->it_overrun_last
= 0;
971 timr
->it_overrun
= -1;
973 *switch off the timer when it_value is zero
975 if (!new_setting
->it_value
.tv_sec
&& !new_setting
->it_value
.tv_nsec
) {
976 timr
->it
.real
.timer
.expires
= 0;
980 if (adjust_abs_time(clock
,
981 &new_setting
->it_value
, flags
& TIMER_ABSTIME
,
982 &expire_64
, &(timr
->it
.real
.wall_to_prev
))) {
985 timr
->it
.real
.timer
.expires
= (unsigned long)expire_64
;
986 tstojiffie(&new_setting
->it_interval
, clock
->res
, &expire_64
);
987 timr
->it
.real
.incr
= (unsigned long)expire_64
;
990 * We do not even queue SIGEV_NONE timers! But we do put them
991 * in the abs list so we can do that right.
993 if (((timr
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
))
994 add_timer(&timr
->it
.real
.timer
);
996 if (flags
& TIMER_ABSTIME
&& clock
->abs_struct
) {
997 spin_lock(&clock
->abs_struct
->lock
);
998 list_add_tail(&(timr
->it
.real
.abs_timer_entry
),
999 &(clock
->abs_struct
->list
));
1000 spin_unlock(&clock
->abs_struct
->lock
);
1005 /* Set a POSIX.1b interval timer */
1007 sys_timer_settime(timer_t timer_id
, int flags
,
1008 const struct itimerspec __user
*new_setting
,
1009 struct itimerspec __user
*old_setting
)
1011 struct k_itimer
*timr
;
1012 struct itimerspec new_spec
, old_spec
;
1015 struct itimerspec
*rtn
= old_setting
? &old_spec
: NULL
;
1020 if (copy_from_user(&new_spec
, new_setting
, sizeof (new_spec
)))
1023 if ((!good_timespec(&new_spec
.it_interval
)) ||
1024 (!good_timespec(&new_spec
.it_value
)))
1027 timr
= lock_timer(timer_id
, &flag
);
1031 error
= CLOCK_DISPATCH(timr
->it_clock
, timer_set
,
1032 (timr
, flags
, &new_spec
, rtn
));
1034 unlock_timer(timr
, flag
);
1035 if (error
== TIMER_RETRY
) {
1036 rtn
= NULL
; // We already got the old time...
1040 if (old_setting
&& !error
&& copy_to_user(old_setting
,
1041 &old_spec
, sizeof (old_spec
)))
1047 static inline int common_timer_del(struct k_itimer
*timer
)
1049 timer
->it
.real
.incr
= 0;
1051 if (try_to_del_timer_sync(&timer
->it
.real
.timer
) < 0) {
1054 * It can only be active if on an other cpu. Since
1055 * we have cleared the interval stuff above, it should
1056 * clear once we release the spin lock. Of course once
1057 * we do that anything could happen, including the
1058 * complete melt down of the timer. So return with
1059 * a "retry" exit status.
1065 remove_from_abslist(timer
);
1070 static inline int timer_delete_hook(struct k_itimer
*timer
)
1072 return CLOCK_DISPATCH(timer
->it_clock
, timer_del
, (timer
));
1075 /* Delete a POSIX.1b interval timer. */
1077 sys_timer_delete(timer_t timer_id
)
1079 struct k_itimer
*timer
;
1086 timer
= lock_timer(timer_id
, &flags
);
1091 error
= timer_delete_hook(timer
);
1093 if (error
== TIMER_RETRY
) {
1094 unlock_timer(timer
, flags
);
1098 timer_delete_hook(timer
);
1100 spin_lock(¤t
->sighand
->siglock
);
1101 list_del(&timer
->list
);
1102 spin_unlock(¤t
->sighand
->siglock
);
1104 * This keeps any tasks waiting on the spin lock from thinking
1105 * they got something (see the lock code above).
1107 if (timer
->it_process
) {
1108 if (timer
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
1109 put_task_struct(timer
->it_process
);
1110 timer
->it_process
= NULL
;
1112 unlock_timer(timer
, flags
);
1113 release_posix_timer(timer
, IT_ID_SET
);
1117 * return timer owned by the process, used by exit_itimers
1119 static inline void itimer_delete(struct k_itimer
*timer
)
1121 unsigned long flags
;
1127 spin_lock_irqsave(&timer
->it_lock
, flags
);
1130 error
= timer_delete_hook(timer
);
1132 if (error
== TIMER_RETRY
) {
1133 unlock_timer(timer
, flags
);
1137 timer_delete_hook(timer
);
1139 list_del(&timer
->list
);
1141 * This keeps any tasks waiting on the spin lock from thinking
1142 * they got something (see the lock code above).
1144 if (timer
->it_process
) {
1145 if (timer
->it_sigev_notify
== (SIGEV_SIGNAL
|SIGEV_THREAD_ID
))
1146 put_task_struct(timer
->it_process
);
1147 timer
->it_process
= NULL
;
1149 unlock_timer(timer
, flags
);
1150 release_posix_timer(timer
, IT_ID_SET
);
1154 * This is called by do_exit or de_thread, only when there are no more
1155 * references to the shared signal_struct.
1157 void exit_itimers(struct signal_struct
*sig
)
1159 struct k_itimer
*tmr
;
1161 while (!list_empty(&sig
->posix_timers
)) {
1162 tmr
= list_entry(sig
->posix_timers
.next
, struct k_itimer
, list
);
1168 * And now for the "clock" calls
1170 * These functions are called both from timer functions (with the timer
1171 * spin_lock_irq() held and from clock calls with no locking. They must
1172 * use the save flags versions of locks.
1176 * We do ticks here to avoid the irq lock ( they take sooo long).
1177 * The seqlock is great here. Since we a reader, we don't really care
1178 * if we are interrupted since we don't take lock that will stall us or
1179 * any other cpu. Voila, no irq lock is needed.
1183 static u64
do_posix_clock_monotonic_gettime_parts(
1184 struct timespec
*tp
, struct timespec
*mo
)
1190 seq
= read_seqbegin(&xtime_lock
);
1192 *mo
= wall_to_monotonic
;
1195 } while(read_seqretry(&xtime_lock
, seq
));
1200 static int do_posix_clock_monotonic_get(const clockid_t clock
,
1201 struct timespec
*tp
)
1203 struct timespec wall_to_mono
;
1205 do_posix_clock_monotonic_gettime_parts(tp
, &wall_to_mono
);
1207 set_normalized_timespec(tp
, tp
->tv_sec
+ wall_to_mono
.tv_sec
,
1208 tp
->tv_nsec
+ wall_to_mono
.tv_nsec
);
1213 int do_posix_clock_monotonic_gettime(struct timespec
*tp
)
1215 return do_posix_clock_monotonic_get(CLOCK_MONOTONIC
, tp
);
1218 int do_posix_clock_nosettime(const clockid_t clockid
, struct timespec
*tp
)
1222 EXPORT_SYMBOL_GPL(do_posix_clock_nosettime
);
1224 int do_posix_clock_notimer_create(struct k_itimer
*timer
)
1228 EXPORT_SYMBOL_GPL(do_posix_clock_notimer_create
);
1230 int do_posix_clock_nonanosleep(const clockid_t clock
, int flags
,
1231 struct timespec
*t
, struct timespec __user
*r
)
1234 return -EOPNOTSUPP
; /* aka ENOTSUP in userland for POSIX */
1235 #else /* parisc does define it separately. */
1239 EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep
);
1241 asmlinkage
long sys_clock_settime(const clockid_t which_clock
,
1242 const struct timespec __user
*tp
)
1244 struct timespec new_tp
;
1246 if (invalid_clockid(which_clock
))
1248 if (copy_from_user(&new_tp
, tp
, sizeof (*tp
)))
1251 return CLOCK_DISPATCH(which_clock
, clock_set
, (which_clock
, &new_tp
));
1255 sys_clock_gettime(const clockid_t which_clock
, struct timespec __user
*tp
)
1257 struct timespec kernel_tp
;
1260 if (invalid_clockid(which_clock
))
1262 error
= CLOCK_DISPATCH(which_clock
, clock_get
,
1263 (which_clock
, &kernel_tp
));
1264 if (!error
&& copy_to_user(tp
, &kernel_tp
, sizeof (kernel_tp
)))
1272 sys_clock_getres(const clockid_t which_clock
, struct timespec __user
*tp
)
1274 struct timespec rtn_tp
;
1277 if (invalid_clockid(which_clock
))
1280 error
= CLOCK_DISPATCH(which_clock
, clock_getres
,
1281 (which_clock
, &rtn_tp
));
1283 if (!error
&& tp
&& copy_to_user(tp
, &rtn_tp
, sizeof (rtn_tp
))) {
1291 * The standard says that an absolute nanosleep call MUST wake up at
1292 * the requested time in spite of clock settings. Here is what we do:
1293 * For each nanosleep call that needs it (only absolute and not on
1294 * CLOCK_MONOTONIC* (as it can not be set)) we thread a little structure
1295 * into the "nanosleep_abs_list". All we need is the task_struct pointer.
1296 * When ever the clock is set we just wake up all those tasks. The rest
1297 * is done by the while loop in clock_nanosleep().
1299 * On locking, clock_was_set() is called from update_wall_clock which
1300 * holds (or has held for it) a write_lock_irq( xtime_lock) and is
1301 * called from the timer bh code. Thus we need the irq save locks.
1303 * Also, on the call from update_wall_clock, that is done as part of a
1304 * softirq thing. We don't want to delay the system that much (possibly
1305 * long list of timers to fix), so we defer that work to keventd.
1308 static DECLARE_WAIT_QUEUE_HEAD(nanosleep_abs_wqueue
);
1309 static DECLARE_WORK(clock_was_set_work
, (void(*)(void*))clock_was_set
, NULL
);
1311 static DECLARE_MUTEX(clock_was_set_lock
);
1313 void clock_was_set(void)
1315 struct k_itimer
*timr
;
1316 struct timespec new_wall_to
;
1317 LIST_HEAD(cws_list
);
1321 if (unlikely(in_interrupt())) {
1322 schedule_work(&clock_was_set_work
);
1325 wake_up_all(&nanosleep_abs_wqueue
);
1328 * Check if there exist TIMER_ABSTIME timers to correct.
1330 * Notes on locking: This code is run in task context with irq
1331 * on. We CAN be interrupted! All other usage of the abs list
1332 * lock is under the timer lock which holds the irq lock as
1333 * well. We REALLY don't want to scan the whole list with the
1334 * interrupt system off, AND we would like a sequence lock on
1335 * this code as well. Since we assume that the clock will not
1336 * be set often, it seems ok to take and release the irq lock
1337 * for each timer. In fact add_timer will do this, so this is
1338 * not an issue. So we know when we are done, we will move the
1339 * whole list to a new location. Then as we process each entry,
1340 * we will move it to the actual list again. This way, when our
1341 * copy is empty, we are done. We are not all that concerned
1342 * about preemption so we will use a semaphore lock to protect
1343 * aginst reentry. This way we will not stall another
1344 * processor. It is possible that this may delay some timers
1345 * that should have expired, given the new clock, but even this
1346 * will be minimal as we will always update to the current time,
1347 * even if it was set by a task that is waiting for entry to
1348 * this code. Timers that expire too early will be caught by
1349 * the expire code and restarted.
1351 * Absolute timers that repeat are left in the abs list while
1352 * waiting for the task to pick up the signal. This means we
1353 * may find timers that are not in the "add_timer" list, but are
1354 * in the abs list. We do the same thing for these, save
1355 * putting them back in the "add_timer" list. (Note, these are
1356 * left in the abs list mainly to indicate that they are
1357 * ABSOLUTE timers, a fact that is used by the re-arm code, and
1358 * for which we have no other flag.)
1362 down(&clock_was_set_lock
);
1363 spin_lock_irq(&abs_list
.lock
);
1364 list_splice_init(&abs_list
.list
, &cws_list
);
1365 spin_unlock_irq(&abs_list
.lock
);
1368 seq
= read_seqbegin(&xtime_lock
);
1369 new_wall_to
= wall_to_monotonic
;
1370 } while (read_seqretry(&xtime_lock
, seq
));
1372 spin_lock_irq(&abs_list
.lock
);
1373 if (list_empty(&cws_list
)) {
1374 spin_unlock_irq(&abs_list
.lock
);
1377 timr
= list_entry(cws_list
.next
, struct k_itimer
,
1378 it
.real
.abs_timer_entry
);
1380 list_del_init(&timr
->it
.real
.abs_timer_entry
);
1381 if (add_clockset_delta(timr
, &new_wall_to
) &&
1382 del_timer(&timr
->it
.real
.timer
)) /* timer run yet? */
1383 add_timer(&timr
->it
.real
.timer
);
1384 list_add(&timr
->it
.real
.abs_timer_entry
, &abs_list
.list
);
1385 spin_unlock_irq(&abs_list
.lock
);
1388 up(&clock_was_set_lock
);
1392 * nanosleep for monotonic and realtime clocks
1394 static int common_nsleep(const clockid_t which_clock
, int flags
,
1395 struct timespec
*tsave
, struct timespec __user
*rmtp
)
1397 int mode
= flags
& TIMER_ABSTIME
? HRTIMER_ABS
: HRTIMER_REL
;
1398 int clockid
= which_clock
;
1400 switch (which_clock
) {
1401 case CLOCK_REALTIME
:
1402 /* Posix madness. Only absolute timers on clock realtime
1403 are affected by clock set. */
1404 if (mode
== HRTIMER_ABS
)
1405 clockid
= CLOCK_MONOTONIC
;
1406 case CLOCK_MONOTONIC
:
1411 return hrtimer_nanosleep(tsave
, rmtp
, mode
, clockid
);
1415 sys_clock_nanosleep(const clockid_t which_clock
, int flags
,
1416 const struct timespec __user
*rqtp
,
1417 struct timespec __user
*rmtp
)
1421 if (invalid_clockid(which_clock
))
1424 if (copy_from_user(&t
, rqtp
, sizeof (struct timespec
)))
1427 if (!timespec_valid(&t
))
1430 return CLOCK_DISPATCH(which_clock
, nsleep
,
1431 (which_clock
, flags
, &t
, rmtp
));