2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched/signal.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <linux/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/tick.h>
13 #include <linux/workqueue.h>
16 * Called after updating RLIMIT_CPU to run cpu timer and update
17 * tsk->signal->cputime_expires expiration cache if necessary. Needs
18 * siglock protection since other code may update expiration cache as
21 void update_rlimit_cpu(struct task_struct
*task
, unsigned long rlim_new
)
23 u64 nsecs
= rlim_new
* NSEC_PER_SEC
;
25 spin_lock_irq(&task
->sighand
->siglock
);
26 set_process_cpu_timer(task
, CPUCLOCK_PROF
, &nsecs
, NULL
);
27 spin_unlock_irq(&task
->sighand
->siglock
);
30 static int check_clock(const clockid_t which_clock
)
33 struct task_struct
*p
;
34 const pid_t pid
= CPUCLOCK_PID(which_clock
);
36 if (CPUCLOCK_WHICH(which_clock
) >= CPUCLOCK_MAX
)
43 p
= find_task_by_vpid(pid
);
44 if (!p
|| !(CPUCLOCK_PERTHREAD(which_clock
) ?
45 same_thread_group(p
, current
) : has_group_leader_pid(p
))) {
54 * Update expiry time from increment, and increase overrun count,
55 * given the current clock sample.
57 static void bump_cpu_timer(struct k_itimer
*timer
, u64 now
)
62 if (timer
->it
.cpu
.incr
== 0)
65 if (now
< timer
->it
.cpu
.expires
)
68 incr
= timer
->it
.cpu
.incr
;
69 delta
= now
+ incr
- timer
->it
.cpu
.expires
;
71 /* Don't use (incr*2 < delta), incr*2 might overflow. */
72 for (i
= 0; incr
< delta
- incr
; i
++)
75 for (; i
>= 0; incr
>>= 1, i
--) {
79 timer
->it
.cpu
.expires
+= incr
;
80 timer
->it_overrun
+= 1 << i
;
86 * task_cputime_zero - Check a task_cputime struct for all zero fields.
88 * @cputime: The struct to compare.
90 * Checks @cputime to see if all fields are zero. Returns true if all fields
91 * are zero, false if any field is nonzero.
93 static inline int task_cputime_zero(const struct task_cputime
*cputime
)
95 if (!cputime
->utime
&& !cputime
->stime
&& !cputime
->sum_exec_runtime
)
100 static inline u64
prof_ticks(struct task_struct
*p
)
104 task_cputime(p
, &utime
, &stime
);
106 return utime
+ stime
;
108 static inline u64
virt_ticks(struct task_struct
*p
)
112 task_cputime(p
, &utime
, &stime
);
118 posix_cpu_clock_getres(const clockid_t which_clock
, struct timespec
*tp
)
120 int error
= check_clock(which_clock
);
123 tp
->tv_nsec
= ((NSEC_PER_SEC
+ HZ
- 1) / HZ
);
124 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
126 * If sched_clock is using a cycle counter, we
127 * don't have any idea of its true resolution
128 * exported, but it is much more than 1s/HZ.
137 posix_cpu_clock_set(const clockid_t which_clock
, const struct timespec
*tp
)
140 * You can never reset a CPU clock, but we check for other errors
141 * in the call before failing with EPERM.
143 int error
= check_clock(which_clock
);
152 * Sample a per-thread clock for the given task.
154 static int cpu_clock_sample(const clockid_t which_clock
,
155 struct task_struct
*p
, u64
*sample
)
157 switch (CPUCLOCK_WHICH(which_clock
)) {
161 *sample
= prof_ticks(p
);
164 *sample
= virt_ticks(p
);
167 *sample
= task_sched_runtime(p
);
174 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
175 * to avoid race conditions with concurrent updates to cputime.
177 static inline void __update_gt_cputime(atomic64_t
*cputime
, u64 sum_cputime
)
181 curr_cputime
= atomic64_read(cputime
);
182 if (sum_cputime
> curr_cputime
) {
183 if (atomic64_cmpxchg(cputime
, curr_cputime
, sum_cputime
) != curr_cputime
)
188 static void update_gt_cputime(struct task_cputime_atomic
*cputime_atomic
, struct task_cputime
*sum
)
190 __update_gt_cputime(&cputime_atomic
->utime
, sum
->utime
);
191 __update_gt_cputime(&cputime_atomic
->stime
, sum
->stime
);
192 __update_gt_cputime(&cputime_atomic
->sum_exec_runtime
, sum
->sum_exec_runtime
);
195 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
196 static inline void sample_cputime_atomic(struct task_cputime
*times
,
197 struct task_cputime_atomic
*atomic_times
)
199 times
->utime
= atomic64_read(&atomic_times
->utime
);
200 times
->stime
= atomic64_read(&atomic_times
->stime
);
201 times
->sum_exec_runtime
= atomic64_read(&atomic_times
->sum_exec_runtime
);
204 void thread_group_cputimer(struct task_struct
*tsk
, struct task_cputime
*times
)
206 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
207 struct task_cputime sum
;
209 /* Check if cputimer isn't running. This is accessed without locking. */
210 if (!READ_ONCE(cputimer
->running
)) {
212 * The POSIX timer interface allows for absolute time expiry
213 * values through the TIMER_ABSTIME flag, therefore we have
214 * to synchronize the timer to the clock every time we start it.
216 thread_group_cputime(tsk
, &sum
);
217 update_gt_cputime(&cputimer
->cputime_atomic
, &sum
);
220 * We're setting cputimer->running without a lock. Ensure
221 * this only gets written to in one operation. We set
222 * running after update_gt_cputime() as a small optimization,
223 * but barriers are not required because update_gt_cputime()
224 * can handle concurrent updates.
226 WRITE_ONCE(cputimer
->running
, true);
228 sample_cputime_atomic(times
, &cputimer
->cputime_atomic
);
232 * Sample a process (thread group) clock for the given group_leader task.
233 * Must be called with task sighand lock held for safe while_each_thread()
236 static int cpu_clock_sample_group(const clockid_t which_clock
,
237 struct task_struct
*p
,
240 struct task_cputime cputime
;
242 switch (CPUCLOCK_WHICH(which_clock
)) {
246 thread_group_cputime(p
, &cputime
);
247 *sample
= cputime
.utime
+ cputime
.stime
;
250 thread_group_cputime(p
, &cputime
);
251 *sample
= cputime
.utime
;
254 thread_group_cputime(p
, &cputime
);
255 *sample
= cputime
.sum_exec_runtime
;
261 static int posix_cpu_clock_get_task(struct task_struct
*tsk
,
262 const clockid_t which_clock
,
268 if (CPUCLOCK_PERTHREAD(which_clock
)) {
269 if (same_thread_group(tsk
, current
))
270 err
= cpu_clock_sample(which_clock
, tsk
, &rtn
);
272 if (tsk
== current
|| thread_group_leader(tsk
))
273 err
= cpu_clock_sample_group(which_clock
, tsk
, &rtn
);
277 *tp
= ns_to_timespec(rtn
);
283 static int posix_cpu_clock_get(const clockid_t which_clock
, struct timespec
*tp
)
285 const pid_t pid
= CPUCLOCK_PID(which_clock
);
290 * Special case constant value for our own clocks.
291 * We don't have to do any lookup to find ourselves.
293 err
= posix_cpu_clock_get_task(current
, which_clock
, tp
);
296 * Find the given PID, and validate that the caller
297 * should be able to see it.
299 struct task_struct
*p
;
301 p
= find_task_by_vpid(pid
);
303 err
= posix_cpu_clock_get_task(p
, which_clock
, tp
);
311 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
312 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
313 * new timer already all-zeros initialized.
315 static int posix_cpu_timer_create(struct k_itimer
*new_timer
)
318 const pid_t pid
= CPUCLOCK_PID(new_timer
->it_clock
);
319 struct task_struct
*p
;
321 if (CPUCLOCK_WHICH(new_timer
->it_clock
) >= CPUCLOCK_MAX
)
324 INIT_LIST_HEAD(&new_timer
->it
.cpu
.entry
);
327 if (CPUCLOCK_PERTHREAD(new_timer
->it_clock
)) {
331 p
= find_task_by_vpid(pid
);
332 if (p
&& !same_thread_group(p
, current
))
337 p
= current
->group_leader
;
339 p
= find_task_by_vpid(pid
);
340 if (p
&& !has_group_leader_pid(p
))
344 new_timer
->it
.cpu
.task
= p
;
356 * Clean up a CPU-clock timer that is about to be destroyed.
357 * This is called from timer deletion with the timer already locked.
358 * If we return TIMER_RETRY, it's necessary to release the timer's lock
359 * and try again. (This happens when the timer is in the middle of firing.)
361 static int posix_cpu_timer_del(struct k_itimer
*timer
)
365 struct sighand_struct
*sighand
;
366 struct task_struct
*p
= timer
->it
.cpu
.task
;
368 WARN_ON_ONCE(p
== NULL
);
371 * Protect against sighand release/switch in exit/exec and process/
372 * thread timer list entry concurrent read/writes.
374 sighand
= lock_task_sighand(p
, &flags
);
375 if (unlikely(sighand
== NULL
)) {
377 * We raced with the reaping of the task.
378 * The deletion should have cleared us off the list.
380 WARN_ON_ONCE(!list_empty(&timer
->it
.cpu
.entry
));
382 if (timer
->it
.cpu
.firing
)
385 list_del(&timer
->it
.cpu
.entry
);
387 unlock_task_sighand(p
, &flags
);
396 static void cleanup_timers_list(struct list_head
*head
)
398 struct cpu_timer_list
*timer
, *next
;
400 list_for_each_entry_safe(timer
, next
, head
, entry
)
401 list_del_init(&timer
->entry
);
405 * Clean out CPU timers still ticking when a thread exited. The task
406 * pointer is cleared, and the expiry time is replaced with the residual
407 * time for later timer_gettime calls to return.
408 * This must be called with the siglock held.
410 static void cleanup_timers(struct list_head
*head
)
412 cleanup_timers_list(head
);
413 cleanup_timers_list(++head
);
414 cleanup_timers_list(++head
);
418 * These are both called with the siglock held, when the current thread
419 * is being reaped. When the final (leader) thread in the group is reaped,
420 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
422 void posix_cpu_timers_exit(struct task_struct
*tsk
)
424 cleanup_timers(tsk
->cpu_timers
);
426 void posix_cpu_timers_exit_group(struct task_struct
*tsk
)
428 cleanup_timers(tsk
->signal
->cpu_timers
);
431 static inline int expires_gt(u64 expires
, u64 new_exp
)
433 return expires
== 0 || expires
> new_exp
;
437 * Insert the timer on the appropriate list before any timers that
438 * expire later. This must be called with the sighand lock held.
440 static void arm_timer(struct k_itimer
*timer
)
442 struct task_struct
*p
= timer
->it
.cpu
.task
;
443 struct list_head
*head
, *listpos
;
444 struct task_cputime
*cputime_expires
;
445 struct cpu_timer_list
*const nt
= &timer
->it
.cpu
;
446 struct cpu_timer_list
*next
;
448 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
449 head
= p
->cpu_timers
;
450 cputime_expires
= &p
->cputime_expires
;
452 head
= p
->signal
->cpu_timers
;
453 cputime_expires
= &p
->signal
->cputime_expires
;
455 head
+= CPUCLOCK_WHICH(timer
->it_clock
);
458 list_for_each_entry(next
, head
, entry
) {
459 if (nt
->expires
< next
->expires
)
461 listpos
= &next
->entry
;
463 list_add(&nt
->entry
, listpos
);
465 if (listpos
== head
) {
466 u64 exp
= nt
->expires
;
469 * We are the new earliest-expiring POSIX 1.b timer, hence
470 * need to update expiration cache. Take into account that
471 * for process timers we share expiration cache with itimers
472 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
475 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
477 if (expires_gt(cputime_expires
->prof_exp
, exp
))
478 cputime_expires
->prof_exp
= exp
;
481 if (expires_gt(cputime_expires
->virt_exp
, exp
))
482 cputime_expires
->virt_exp
= exp
;
485 if (expires_gt(cputime_expires
->sched_exp
, exp
))
486 cputime_expires
->sched_exp
= exp
;
489 if (CPUCLOCK_PERTHREAD(timer
->it_clock
))
490 tick_dep_set_task(p
, TICK_DEP_BIT_POSIX_TIMER
);
492 tick_dep_set_signal(p
->signal
, TICK_DEP_BIT_POSIX_TIMER
);
497 * The timer is locked, fire it and arrange for its reload.
499 static void cpu_timer_fire(struct k_itimer
*timer
)
501 if ((timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
503 * User don't want any signal.
505 timer
->it
.cpu
.expires
= 0;
506 } else if (unlikely(timer
->sigq
== NULL
)) {
508 * This a special case for clock_nanosleep,
509 * not a normal timer from sys_timer_create.
511 wake_up_process(timer
->it_process
);
512 timer
->it
.cpu
.expires
= 0;
513 } else if (timer
->it
.cpu
.incr
== 0) {
515 * One-shot timer. Clear it as soon as it's fired.
517 posix_timer_event(timer
, 0);
518 timer
->it
.cpu
.expires
= 0;
519 } else if (posix_timer_event(timer
, ++timer
->it_requeue_pending
)) {
521 * The signal did not get queued because the signal
522 * was ignored, so we won't get any callback to
523 * reload the timer. But we need to keep it
524 * ticking in case the signal is deliverable next time.
526 posix_cpu_timer_schedule(timer
);
531 * Sample a process (thread group) timer for the given group_leader task.
532 * Must be called with task sighand lock held for safe while_each_thread()
535 static int cpu_timer_sample_group(const clockid_t which_clock
,
536 struct task_struct
*p
, u64
*sample
)
538 struct task_cputime cputime
;
540 thread_group_cputimer(p
, &cputime
);
541 switch (CPUCLOCK_WHICH(which_clock
)) {
545 *sample
= cputime
.utime
+ cputime
.stime
;
548 *sample
= cputime
.utime
;
551 *sample
= cputime
.sum_exec_runtime
;
558 * Guts of sys_timer_settime for CPU timers.
559 * This is called with the timer locked and interrupts disabled.
560 * If we return TIMER_RETRY, it's necessary to release the timer's lock
561 * and try again. (This happens when the timer is in the middle of firing.)
563 static int posix_cpu_timer_set(struct k_itimer
*timer
, int timer_flags
,
564 struct itimerspec
*new, struct itimerspec
*old
)
567 struct sighand_struct
*sighand
;
568 struct task_struct
*p
= timer
->it
.cpu
.task
;
569 u64 old_expires
, new_expires
, old_incr
, val
;
572 WARN_ON_ONCE(p
== NULL
);
574 new_expires
= timespec_to_ns(&new->it_value
);
577 * Protect against sighand release/switch in exit/exec and p->cpu_timers
578 * and p->signal->cpu_timers read/write in arm_timer()
580 sighand
= lock_task_sighand(p
, &flags
);
582 * If p has just been reaped, we can no
583 * longer get any information about it at all.
585 if (unlikely(sighand
== NULL
)) {
590 * Disarm any old timer after extracting its expiry time.
592 WARN_ON_ONCE(!irqs_disabled());
595 old_incr
= timer
->it
.cpu
.incr
;
596 old_expires
= timer
->it
.cpu
.expires
;
597 if (unlikely(timer
->it
.cpu
.firing
)) {
598 timer
->it
.cpu
.firing
= -1;
601 list_del_init(&timer
->it
.cpu
.entry
);
604 * We need to sample the current value to convert the new
605 * value from to relative and absolute, and to convert the
606 * old value from absolute to relative. To set a process
607 * timer, we need a sample to balance the thread expiry
608 * times (in arm_timer). With an absolute time, we must
609 * check if it's already passed. In short, we need a sample.
611 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
612 cpu_clock_sample(timer
->it_clock
, p
, &val
);
614 cpu_timer_sample_group(timer
->it_clock
, p
, &val
);
618 if (old_expires
== 0) {
619 old
->it_value
.tv_sec
= 0;
620 old
->it_value
.tv_nsec
= 0;
623 * Update the timer in case it has
624 * overrun already. If it has,
625 * we'll report it as having overrun
626 * and with the next reloaded timer
627 * already ticking, though we are
628 * swallowing that pending
629 * notification here to install the
632 bump_cpu_timer(timer
, val
);
633 if (val
< timer
->it
.cpu
.expires
) {
634 old_expires
= timer
->it
.cpu
.expires
- val
;
635 old
->it_value
= ns_to_timespec(old_expires
);
637 old
->it_value
.tv_nsec
= 1;
638 old
->it_value
.tv_sec
= 0;
645 * We are colliding with the timer actually firing.
646 * Punt after filling in the timer's old value, and
647 * disable this firing since we are already reporting
648 * it as an overrun (thanks to bump_cpu_timer above).
650 unlock_task_sighand(p
, &flags
);
654 if (new_expires
!= 0 && !(timer_flags
& TIMER_ABSTIME
)) {
659 * Install the new expiry time (or zero).
660 * For a timer with no notification action, we don't actually
661 * arm the timer (we'll just fake it for timer_gettime).
663 timer
->it
.cpu
.expires
= new_expires
;
664 if (new_expires
!= 0 && val
< new_expires
) {
668 unlock_task_sighand(p
, &flags
);
670 * Install the new reload setting, and
671 * set up the signal and overrun bookkeeping.
673 timer
->it
.cpu
.incr
= timespec_to_ns(&new->it_interval
);
676 * This acts as a modification timestamp for the timer,
677 * so any automatic reload attempt will punt on seeing
678 * that we have reset the timer manually.
680 timer
->it_requeue_pending
= (timer
->it_requeue_pending
+ 2) &
682 timer
->it_overrun_last
= 0;
683 timer
->it_overrun
= -1;
685 if (new_expires
!= 0 && !(val
< new_expires
)) {
687 * The designated time already passed, so we notify
688 * immediately, even if the thread never runs to
689 * accumulate more time on this clock.
691 cpu_timer_fire(timer
);
697 old
->it_interval
= ns_to_timespec(old_incr
);
702 static void posix_cpu_timer_get(struct k_itimer
*timer
, struct itimerspec
*itp
)
705 struct task_struct
*p
= timer
->it
.cpu
.task
;
707 WARN_ON_ONCE(p
== NULL
);
710 * Easy part: convert the reload time.
712 itp
->it_interval
= ns_to_timespec(timer
->it
.cpu
.incr
);
714 if (timer
->it
.cpu
.expires
== 0) { /* Timer not armed at all. */
715 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
720 * Sample the clock to take the difference with the expiry time.
722 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
723 cpu_clock_sample(timer
->it_clock
, p
, &now
);
725 struct sighand_struct
*sighand
;
729 * Protect against sighand release/switch in exit/exec and
730 * also make timer sampling safe if it ends up calling
731 * thread_group_cputime().
733 sighand
= lock_task_sighand(p
, &flags
);
734 if (unlikely(sighand
== NULL
)) {
736 * The process has been reaped.
737 * We can't even collect a sample any more.
738 * Call the timer disarmed, nothing else to do.
740 timer
->it
.cpu
.expires
= 0;
741 itp
->it_value
= ns_to_timespec(timer
->it
.cpu
.expires
);
744 cpu_timer_sample_group(timer
->it_clock
, p
, &now
);
745 unlock_task_sighand(p
, &flags
);
749 if (now
< timer
->it
.cpu
.expires
) {
750 itp
->it_value
= ns_to_timespec(timer
->it
.cpu
.expires
- now
);
753 * The timer should have expired already, but the firing
754 * hasn't taken place yet. Say it's just about to expire.
756 itp
->it_value
.tv_nsec
= 1;
757 itp
->it_value
.tv_sec
= 0;
761 static unsigned long long
762 check_timers_list(struct list_head
*timers
,
763 struct list_head
*firing
,
764 unsigned long long curr
)
768 while (!list_empty(timers
)) {
769 struct cpu_timer_list
*t
;
771 t
= list_first_entry(timers
, struct cpu_timer_list
, entry
);
773 if (!--maxfire
|| curr
< t
->expires
)
777 list_move_tail(&t
->entry
, firing
);
784 * Check for any per-thread CPU timers that have fired and move them off
785 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
786 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
788 static void check_thread_timers(struct task_struct
*tsk
,
789 struct list_head
*firing
)
791 struct list_head
*timers
= tsk
->cpu_timers
;
792 struct signal_struct
*const sig
= tsk
->signal
;
793 struct task_cputime
*tsk_expires
= &tsk
->cputime_expires
;
798 * If cputime_expires is zero, then there are no active
799 * per thread CPU timers.
801 if (task_cputime_zero(&tsk
->cputime_expires
))
804 expires
= check_timers_list(timers
, firing
, prof_ticks(tsk
));
805 tsk_expires
->prof_exp
= expires
;
807 expires
= check_timers_list(++timers
, firing
, virt_ticks(tsk
));
808 tsk_expires
->virt_exp
= expires
;
810 tsk_expires
->sched_exp
= check_timers_list(++timers
, firing
,
811 tsk
->se
.sum_exec_runtime
);
814 * Check for the special case thread timers.
816 soft
= READ_ONCE(sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
);
817 if (soft
!= RLIM_INFINITY
) {
819 READ_ONCE(sig
->rlim
[RLIMIT_RTTIME
].rlim_max
);
821 if (hard
!= RLIM_INFINITY
&&
822 tsk
->rt
.timeout
> DIV_ROUND_UP(hard
, USEC_PER_SEC
/HZ
)) {
824 * At the hard limit, we just die.
825 * No need to calculate anything else now.
827 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
830 if (tsk
->rt
.timeout
> DIV_ROUND_UP(soft
, USEC_PER_SEC
/HZ
)) {
832 * At the soft limit, send a SIGXCPU every second.
835 soft
+= USEC_PER_SEC
;
836 sig
->rlim
[RLIMIT_RTTIME
].rlim_cur
= soft
;
839 "RT Watchdog Timeout: %s[%d]\n",
840 tsk
->comm
, task_pid_nr(tsk
));
841 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
844 if (task_cputime_zero(tsk_expires
))
845 tick_dep_clear_task(tsk
, TICK_DEP_BIT_POSIX_TIMER
);
848 static inline void stop_process_timers(struct signal_struct
*sig
)
850 struct thread_group_cputimer
*cputimer
= &sig
->cputimer
;
852 /* Turn off cputimer->running. This is done without locking. */
853 WRITE_ONCE(cputimer
->running
, false);
854 tick_dep_clear_signal(sig
, TICK_DEP_BIT_POSIX_TIMER
);
857 static void check_cpu_itimer(struct task_struct
*tsk
, struct cpu_itimer
*it
,
858 u64
*expires
, u64 cur_time
, int signo
)
863 if (cur_time
>= it
->expires
) {
865 it
->expires
+= it
->incr
;
869 trace_itimer_expire(signo
== SIGPROF
?
870 ITIMER_PROF
: ITIMER_VIRTUAL
,
871 tsk
->signal
->leader_pid
, cur_time
);
872 __group_send_sig_info(signo
, SEND_SIG_PRIV
, tsk
);
875 if (it
->expires
&& (!*expires
|| it
->expires
< *expires
))
876 *expires
= it
->expires
;
880 * Check for any per-thread CPU timers that have fired and move them
881 * off the tsk->*_timers list onto the firing list. Per-thread timers
882 * have already been taken off.
884 static void check_process_timers(struct task_struct
*tsk
,
885 struct list_head
*firing
)
887 struct signal_struct
*const sig
= tsk
->signal
;
888 u64 utime
, ptime
, virt_expires
, prof_expires
;
889 u64 sum_sched_runtime
, sched_expires
;
890 struct list_head
*timers
= sig
->cpu_timers
;
891 struct task_cputime cputime
;
895 * If cputimer is not running, then there are no active
896 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
898 if (!READ_ONCE(tsk
->signal
->cputimer
.running
))
902 * Signify that a thread is checking for process timers.
903 * Write access to this field is protected by the sighand lock.
905 sig
->cputimer
.checking_timer
= true;
908 * Collect the current process totals.
910 thread_group_cputimer(tsk
, &cputime
);
911 utime
= cputime
.utime
;
912 ptime
= utime
+ cputime
.stime
;
913 sum_sched_runtime
= cputime
.sum_exec_runtime
;
915 prof_expires
= check_timers_list(timers
, firing
, ptime
);
916 virt_expires
= check_timers_list(++timers
, firing
, utime
);
917 sched_expires
= check_timers_list(++timers
, firing
, sum_sched_runtime
);
920 * Check for the special case process timers.
922 check_cpu_itimer(tsk
, &sig
->it
[CPUCLOCK_PROF
], &prof_expires
, ptime
,
924 check_cpu_itimer(tsk
, &sig
->it
[CPUCLOCK_VIRT
], &virt_expires
, utime
,
926 soft
= READ_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
927 if (soft
!= RLIM_INFINITY
) {
928 unsigned long psecs
= div_u64(ptime
, NSEC_PER_SEC
);
930 READ_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_max
);
934 * At the hard limit, we just die.
935 * No need to calculate anything else now.
937 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
942 * At the soft limit, send a SIGXCPU every second.
944 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
947 sig
->rlim
[RLIMIT_CPU
].rlim_cur
= soft
;
950 x
= soft
* NSEC_PER_SEC
;
951 if (!prof_expires
|| x
< prof_expires
)
955 sig
->cputime_expires
.prof_exp
= prof_expires
;
956 sig
->cputime_expires
.virt_exp
= virt_expires
;
957 sig
->cputime_expires
.sched_exp
= sched_expires
;
958 if (task_cputime_zero(&sig
->cputime_expires
))
959 stop_process_timers(sig
);
961 sig
->cputimer
.checking_timer
= false;
965 * This is called from the signal code (via do_schedule_next_timer)
966 * when the last timer signal was delivered and we have to reload the timer.
968 void posix_cpu_timer_schedule(struct k_itimer
*timer
)
970 struct sighand_struct
*sighand
;
972 struct task_struct
*p
= timer
->it
.cpu
.task
;
975 WARN_ON_ONCE(p
== NULL
);
978 * Fetch the current sample and update the timer's expiry time.
980 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
981 cpu_clock_sample(timer
->it_clock
, p
, &now
);
982 bump_cpu_timer(timer
, now
);
983 if (unlikely(p
->exit_state
))
986 /* Protect timer list r/w in arm_timer() */
987 sighand
= lock_task_sighand(p
, &flags
);
992 * Protect arm_timer() and timer sampling in case of call to
993 * thread_group_cputime().
995 sighand
= lock_task_sighand(p
, &flags
);
996 if (unlikely(sighand
== NULL
)) {
998 * The process has been reaped.
999 * We can't even collect a sample any more.
1001 timer
->it
.cpu
.expires
= 0;
1003 } else if (unlikely(p
->exit_state
) && thread_group_empty(p
)) {
1004 unlock_task_sighand(p
, &flags
);
1005 /* Optimizations: if the process is dying, no need to rearm */
1008 cpu_timer_sample_group(timer
->it_clock
, p
, &now
);
1009 bump_cpu_timer(timer
, now
);
1010 /* Leave the sighand locked for the call below. */
1014 * Now re-arm for the new expiry time.
1016 WARN_ON_ONCE(!irqs_disabled());
1018 unlock_task_sighand(p
, &flags
);
1021 timer
->it_overrun_last
= timer
->it_overrun
;
1022 timer
->it_overrun
= -1;
1023 ++timer
->it_requeue_pending
;
1027 * task_cputime_expired - Compare two task_cputime entities.
1029 * @sample: The task_cputime structure to be checked for expiration.
1030 * @expires: Expiration times, against which @sample will be checked.
1032 * Checks @sample against @expires to see if any field of @sample has expired.
1033 * Returns true if any field of the former is greater than the corresponding
1034 * field of the latter if the latter field is set. Otherwise returns false.
1036 static inline int task_cputime_expired(const struct task_cputime
*sample
,
1037 const struct task_cputime
*expires
)
1039 if (expires
->utime
&& sample
->utime
>= expires
->utime
)
1041 if (expires
->stime
&& sample
->utime
+ sample
->stime
>= expires
->stime
)
1043 if (expires
->sum_exec_runtime
!= 0 &&
1044 sample
->sum_exec_runtime
>= expires
->sum_exec_runtime
)
1050 * fastpath_timer_check - POSIX CPU timers fast path.
1052 * @tsk: The task (thread) being checked.
1054 * Check the task and thread group timers. If both are zero (there are no
1055 * timers set) return false. Otherwise snapshot the task and thread group
1056 * timers and compare them with the corresponding expiration times. Return
1057 * true if a timer has expired, else return false.
1059 static inline int fastpath_timer_check(struct task_struct
*tsk
)
1061 struct signal_struct
*sig
;
1063 if (!task_cputime_zero(&tsk
->cputime_expires
)) {
1064 struct task_cputime task_sample
;
1066 task_cputime(tsk
, &task_sample
.utime
, &task_sample
.stime
);
1067 task_sample
.sum_exec_runtime
= tsk
->se
.sum_exec_runtime
;
1068 if (task_cputime_expired(&task_sample
, &tsk
->cputime_expires
))
1074 * Check if thread group timers expired when the cputimer is
1075 * running and no other thread in the group is already checking
1076 * for thread group cputimers. These fields are read without the
1077 * sighand lock. However, this is fine because this is meant to
1078 * be a fastpath heuristic to determine whether we should try to
1079 * acquire the sighand lock to check/handle timers.
1081 * In the worst case scenario, if 'running' or 'checking_timer' gets
1082 * set but the current thread doesn't see the change yet, we'll wait
1083 * until the next thread in the group gets a scheduler interrupt to
1084 * handle the timer. This isn't an issue in practice because these
1085 * types of delays with signals actually getting sent are expected.
1087 if (READ_ONCE(sig
->cputimer
.running
) &&
1088 !READ_ONCE(sig
->cputimer
.checking_timer
)) {
1089 struct task_cputime group_sample
;
1091 sample_cputime_atomic(&group_sample
, &sig
->cputimer
.cputime_atomic
);
1093 if (task_cputime_expired(&group_sample
, &sig
->cputime_expires
))
1101 * This is called from the timer interrupt handler. The irq handler has
1102 * already updated our counts. We need to check if any timers fire now.
1103 * Interrupts are disabled.
1105 void run_posix_cpu_timers(struct task_struct
*tsk
)
1108 struct k_itimer
*timer
, *next
;
1109 unsigned long flags
;
1111 WARN_ON_ONCE(!irqs_disabled());
1114 * The fast path checks that there are no expired thread or thread
1115 * group timers. If that's so, just return.
1117 if (!fastpath_timer_check(tsk
))
1120 if (!lock_task_sighand(tsk
, &flags
))
1123 * Here we take off tsk->signal->cpu_timers[N] and
1124 * tsk->cpu_timers[N] all the timers that are firing, and
1125 * put them on the firing list.
1127 check_thread_timers(tsk
, &firing
);
1129 check_process_timers(tsk
, &firing
);
1132 * We must release these locks before taking any timer's lock.
1133 * There is a potential race with timer deletion here, as the
1134 * siglock now protects our private firing list. We have set
1135 * the firing flag in each timer, so that a deletion attempt
1136 * that gets the timer lock before we do will give it up and
1137 * spin until we've taken care of that timer below.
1139 unlock_task_sighand(tsk
, &flags
);
1142 * Now that all the timers on our list have the firing flag,
1143 * no one will touch their list entries but us. We'll take
1144 * each timer's lock before clearing its firing flag, so no
1145 * timer call will interfere.
1147 list_for_each_entry_safe(timer
, next
, &firing
, it
.cpu
.entry
) {
1150 spin_lock(&timer
->it_lock
);
1151 list_del_init(&timer
->it
.cpu
.entry
);
1152 cpu_firing
= timer
->it
.cpu
.firing
;
1153 timer
->it
.cpu
.firing
= 0;
1155 * The firing flag is -1 if we collided with a reset
1156 * of the timer, which already reported this
1157 * almost-firing as an overrun. So don't generate an event.
1159 if (likely(cpu_firing
>= 0))
1160 cpu_timer_fire(timer
);
1161 spin_unlock(&timer
->it_lock
);
1166 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1167 * The tsk->sighand->siglock must be held by the caller.
1169 void set_process_cpu_timer(struct task_struct
*tsk
, unsigned int clock_idx
,
1170 u64
*newval
, u64
*oldval
)
1174 WARN_ON_ONCE(clock_idx
== CPUCLOCK_SCHED
);
1175 cpu_timer_sample_group(clock_idx
, tsk
, &now
);
1179 * We are setting itimer. The *oldval is absolute and we update
1180 * it to be relative, *newval argument is relative and we update
1181 * it to be absolute.
1184 if (*oldval
<= now
) {
1185 /* Just about to fire. */
1186 *oldval
= TICK_NSEC
;
1198 * Update expiration cache if we are the earliest timer, or eventually
1199 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1201 switch (clock_idx
) {
1203 if (expires_gt(tsk
->signal
->cputime_expires
.prof_exp
, *newval
))
1204 tsk
->signal
->cputime_expires
.prof_exp
= *newval
;
1207 if (expires_gt(tsk
->signal
->cputime_expires
.virt_exp
, *newval
))
1208 tsk
->signal
->cputime_expires
.virt_exp
= *newval
;
1212 tick_dep_set_signal(tsk
->signal
, TICK_DEP_BIT_POSIX_TIMER
);
1215 static int do_cpu_nanosleep(const clockid_t which_clock
, int flags
,
1216 struct timespec
*rqtp
, struct itimerspec
*it
)
1218 struct k_itimer timer
;
1222 * Set up a temporary timer and then wait for it to go off.
1224 memset(&timer
, 0, sizeof timer
);
1225 spin_lock_init(&timer
.it_lock
);
1226 timer
.it_clock
= which_clock
;
1227 timer
.it_overrun
= -1;
1228 error
= posix_cpu_timer_create(&timer
);
1229 timer
.it_process
= current
;
1231 static struct itimerspec zero_it
;
1233 memset(it
, 0, sizeof *it
);
1234 it
->it_value
= *rqtp
;
1236 spin_lock_irq(&timer
.it_lock
);
1237 error
= posix_cpu_timer_set(&timer
, flags
, it
, NULL
);
1239 spin_unlock_irq(&timer
.it_lock
);
1243 while (!signal_pending(current
)) {
1244 if (timer
.it
.cpu
.expires
== 0) {
1246 * Our timer fired and was reset, below
1247 * deletion can not fail.
1249 posix_cpu_timer_del(&timer
);
1250 spin_unlock_irq(&timer
.it_lock
);
1255 * Block until cpu_timer_fire (or a signal) wakes us.
1257 __set_current_state(TASK_INTERRUPTIBLE
);
1258 spin_unlock_irq(&timer
.it_lock
);
1260 spin_lock_irq(&timer
.it_lock
);
1264 * We were interrupted by a signal.
1266 *rqtp
= ns_to_timespec(timer
.it
.cpu
.expires
);
1267 error
= posix_cpu_timer_set(&timer
, 0, &zero_it
, it
);
1270 * Timer is now unarmed, deletion can not fail.
1272 posix_cpu_timer_del(&timer
);
1274 spin_unlock_irq(&timer
.it_lock
);
1276 while (error
== TIMER_RETRY
) {
1278 * We need to handle case when timer was or is in the
1279 * middle of firing. In other cases we already freed
1282 spin_lock_irq(&timer
.it_lock
);
1283 error
= posix_cpu_timer_del(&timer
);
1284 spin_unlock_irq(&timer
.it_lock
);
1287 if ((it
->it_value
.tv_sec
| it
->it_value
.tv_nsec
) == 0) {
1289 * It actually did fire already.
1294 error
= -ERESTART_RESTARTBLOCK
;
1300 static long posix_cpu_nsleep_restart(struct restart_block
*restart_block
);
1302 static int posix_cpu_nsleep(const clockid_t which_clock
, int flags
,
1303 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1305 struct restart_block
*restart_block
= ¤t
->restart_block
;
1306 struct itimerspec it
;
1310 * Diagnose required errors first.
1312 if (CPUCLOCK_PERTHREAD(which_clock
) &&
1313 (CPUCLOCK_PID(which_clock
) == 0 ||
1314 CPUCLOCK_PID(which_clock
) == current
->pid
))
1317 error
= do_cpu_nanosleep(which_clock
, flags
, rqtp
, &it
);
1319 if (error
== -ERESTART_RESTARTBLOCK
) {
1321 if (flags
& TIMER_ABSTIME
)
1322 return -ERESTARTNOHAND
;
1324 * Report back to the user the time still remaining.
1326 if (rmtp
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1329 restart_block
->fn
= posix_cpu_nsleep_restart
;
1330 restart_block
->nanosleep
.clockid
= which_clock
;
1331 restart_block
->nanosleep
.rmtp
= rmtp
;
1332 restart_block
->nanosleep
.expires
= timespec_to_ns(rqtp
);
1337 static long posix_cpu_nsleep_restart(struct restart_block
*restart_block
)
1339 clockid_t which_clock
= restart_block
->nanosleep
.clockid
;
1341 struct itimerspec it
;
1344 t
= ns_to_timespec(restart_block
->nanosleep
.expires
);
1346 error
= do_cpu_nanosleep(which_clock
, TIMER_ABSTIME
, &t
, &it
);
1348 if (error
== -ERESTART_RESTARTBLOCK
) {
1349 struct timespec __user
*rmtp
= restart_block
->nanosleep
.rmtp
;
1351 * Report back to the user the time still remaining.
1353 if (rmtp
&& copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1356 restart_block
->nanosleep
.expires
= timespec_to_ns(&t
);
1362 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1363 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1365 static int process_cpu_clock_getres(const clockid_t which_clock
,
1366 struct timespec
*tp
)
1368 return posix_cpu_clock_getres(PROCESS_CLOCK
, tp
);
1370 static int process_cpu_clock_get(const clockid_t which_clock
,
1371 struct timespec
*tp
)
1373 return posix_cpu_clock_get(PROCESS_CLOCK
, tp
);
1375 static int process_cpu_timer_create(struct k_itimer
*timer
)
1377 timer
->it_clock
= PROCESS_CLOCK
;
1378 return posix_cpu_timer_create(timer
);
1380 static int process_cpu_nsleep(const clockid_t which_clock
, int flags
,
1381 struct timespec
*rqtp
,
1382 struct timespec __user
*rmtp
)
1384 return posix_cpu_nsleep(PROCESS_CLOCK
, flags
, rqtp
, rmtp
);
1386 static long process_cpu_nsleep_restart(struct restart_block
*restart_block
)
1390 static int thread_cpu_clock_getres(const clockid_t which_clock
,
1391 struct timespec
*tp
)
1393 return posix_cpu_clock_getres(THREAD_CLOCK
, tp
);
1395 static int thread_cpu_clock_get(const clockid_t which_clock
,
1396 struct timespec
*tp
)
1398 return posix_cpu_clock_get(THREAD_CLOCK
, tp
);
1400 static int thread_cpu_timer_create(struct k_itimer
*timer
)
1402 timer
->it_clock
= THREAD_CLOCK
;
1403 return posix_cpu_timer_create(timer
);
1406 struct k_clock clock_posix_cpu
= {
1407 .clock_getres
= posix_cpu_clock_getres
,
1408 .clock_set
= posix_cpu_clock_set
,
1409 .clock_get
= posix_cpu_clock_get
,
1410 .timer_create
= posix_cpu_timer_create
,
1411 .nsleep
= posix_cpu_nsleep
,
1412 .nsleep_restart
= posix_cpu_nsleep_restart
,
1413 .timer_set
= posix_cpu_timer_set
,
1414 .timer_del
= posix_cpu_timer_del
,
1415 .timer_get
= posix_cpu_timer_get
,
1418 static __init
int init_posix_cpu_timers(void)
1420 struct k_clock process
= {
1421 .clock_getres
= process_cpu_clock_getres
,
1422 .clock_get
= process_cpu_clock_get
,
1423 .timer_create
= process_cpu_timer_create
,
1424 .nsleep
= process_cpu_nsleep
,
1425 .nsleep_restart
= process_cpu_nsleep_restart
,
1427 struct k_clock thread
= {
1428 .clock_getres
= thread_cpu_clock_getres
,
1429 .clock_get
= thread_cpu_clock_get
,
1430 .timer_create
= thread_cpu_timer_create
,
1433 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID
, &process
);
1434 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID
, &thread
);
1438 __initcall(init_posix_cpu_timers
);