]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/posix-cpu-timers.c
Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[mirror_ubuntu-bionic-kernel.git] / kernel / posix-cpu-timers.c
1 /*
2 * Implement CPU time clocks for the POSIX clock interface.
3 */
4
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
12 #include <linux/random.h>
13 #include <linux/tick.h>
14 #include <linux/workqueue.h>
15
16 /*
17 * Called after updating RLIMIT_CPU to run cpu timer and update
18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
19 * siglock protection since other code may update expiration cache as
20 * well.
21 */
22 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
23 {
24 cputime_t cputime = secs_to_cputime(rlim_new);
25
26 spin_lock_irq(&task->sighand->siglock);
27 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
28 spin_unlock_irq(&task->sighand->siglock);
29 }
30
31 static int check_clock(const clockid_t which_clock)
32 {
33 int error = 0;
34 struct task_struct *p;
35 const pid_t pid = CPUCLOCK_PID(which_clock);
36
37 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
38 return -EINVAL;
39
40 if (pid == 0)
41 return 0;
42
43 rcu_read_lock();
44 p = find_task_by_vpid(pid);
45 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
46 same_thread_group(p, current) : has_group_leader_pid(p))) {
47 error = -EINVAL;
48 }
49 rcu_read_unlock();
50
51 return error;
52 }
53
54 static inline unsigned long long
55 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
56 {
57 unsigned long long ret;
58
59 ret = 0; /* high half always zero when .cpu used */
60 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
61 ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
62 } else {
63 ret = cputime_to_expires(timespec_to_cputime(tp));
64 }
65 return ret;
66 }
67
68 static void sample_to_timespec(const clockid_t which_clock,
69 unsigned long long expires,
70 struct timespec *tp)
71 {
72 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
73 *tp = ns_to_timespec(expires);
74 else
75 cputime_to_timespec((__force cputime_t)expires, tp);
76 }
77
78 /*
79 * Update expiry time from increment, and increase overrun count,
80 * given the current clock sample.
81 */
82 static void bump_cpu_timer(struct k_itimer *timer,
83 unsigned long long now)
84 {
85 int i;
86 unsigned long long delta, incr;
87
88 if (timer->it.cpu.incr == 0)
89 return;
90
91 if (now < timer->it.cpu.expires)
92 return;
93
94 incr = timer->it.cpu.incr;
95 delta = now + incr - timer->it.cpu.expires;
96
97 /* Don't use (incr*2 < delta), incr*2 might overflow. */
98 for (i = 0; incr < delta - incr; i++)
99 incr = incr << 1;
100
101 for (; i >= 0; incr >>= 1, i--) {
102 if (delta < incr)
103 continue;
104
105 timer->it.cpu.expires += incr;
106 timer->it_overrun += 1 << i;
107 delta -= incr;
108 }
109 }
110
111 /**
112 * task_cputime_zero - Check a task_cputime struct for all zero fields.
113 *
114 * @cputime: The struct to compare.
115 *
116 * Checks @cputime to see if all fields are zero. Returns true if all fields
117 * are zero, false if any field is nonzero.
118 */
119 static inline int task_cputime_zero(const struct task_cputime *cputime)
120 {
121 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
122 return 1;
123 return 0;
124 }
125
126 static inline unsigned long long prof_ticks(struct task_struct *p)
127 {
128 cputime_t utime, stime;
129
130 task_cputime(p, &utime, &stime);
131
132 return cputime_to_expires(utime + stime);
133 }
134 static inline unsigned long long virt_ticks(struct task_struct *p)
135 {
136 cputime_t utime;
137
138 task_cputime(p, &utime, NULL);
139
140 return cputime_to_expires(utime);
141 }
142
143 static int
144 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
145 {
146 int error = check_clock(which_clock);
147 if (!error) {
148 tp->tv_sec = 0;
149 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
150 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
151 /*
152 * If sched_clock is using a cycle counter, we
153 * don't have any idea of its true resolution
154 * exported, but it is much more than 1s/HZ.
155 */
156 tp->tv_nsec = 1;
157 }
158 }
159 return error;
160 }
161
162 static int
163 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
164 {
165 /*
166 * You can never reset a CPU clock, but we check for other errors
167 * in the call before failing with EPERM.
168 */
169 int error = check_clock(which_clock);
170 if (error == 0) {
171 error = -EPERM;
172 }
173 return error;
174 }
175
176
177 /*
178 * Sample a per-thread clock for the given task.
179 */
180 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
181 unsigned long long *sample)
182 {
183 switch (CPUCLOCK_WHICH(which_clock)) {
184 default:
185 return -EINVAL;
186 case CPUCLOCK_PROF:
187 *sample = prof_ticks(p);
188 break;
189 case CPUCLOCK_VIRT:
190 *sample = virt_ticks(p);
191 break;
192 case CPUCLOCK_SCHED:
193 *sample = task_sched_runtime(p);
194 break;
195 }
196 return 0;
197 }
198
199 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
200 {
201 if (b->utime > a->utime)
202 a->utime = b->utime;
203
204 if (b->stime > a->stime)
205 a->stime = b->stime;
206
207 if (b->sum_exec_runtime > a->sum_exec_runtime)
208 a->sum_exec_runtime = b->sum_exec_runtime;
209 }
210
211 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
212 {
213 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
214 struct task_cputime sum;
215 unsigned long flags;
216
217 if (!cputimer->running) {
218 /*
219 * The POSIX timer interface allows for absolute time expiry
220 * values through the TIMER_ABSTIME flag, therefore we have
221 * to synchronize the timer to the clock every time we start
222 * it.
223 */
224 thread_group_cputime(tsk, &sum);
225 raw_spin_lock_irqsave(&cputimer->lock, flags);
226 cputimer->running = 1;
227 update_gt_cputime(&cputimer->cputime, &sum);
228 } else
229 raw_spin_lock_irqsave(&cputimer->lock, flags);
230 *times = cputimer->cputime;
231 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
232 }
233
234 /*
235 * Sample a process (thread group) clock for the given group_leader task.
236 * Must be called with tasklist_lock held for reading.
237 */
238 static int cpu_clock_sample_group(const clockid_t which_clock,
239 struct task_struct *p,
240 unsigned long long *sample)
241 {
242 struct task_cputime cputime;
243
244 switch (CPUCLOCK_WHICH(which_clock)) {
245 default:
246 return -EINVAL;
247 case CPUCLOCK_PROF:
248 thread_group_cputime(p, &cputime);
249 *sample = cputime_to_expires(cputime.utime + cputime.stime);
250 break;
251 case CPUCLOCK_VIRT:
252 thread_group_cputime(p, &cputime);
253 *sample = cputime_to_expires(cputime.utime);
254 break;
255 case CPUCLOCK_SCHED:
256 thread_group_cputime(p, &cputime);
257 *sample = cputime.sum_exec_runtime;
258 break;
259 }
260 return 0;
261 }
262
263
264 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
265 {
266 const pid_t pid = CPUCLOCK_PID(which_clock);
267 int error = -EINVAL;
268 unsigned long long rtn;
269
270 if (pid == 0) {
271 /*
272 * Special case constant value for our own clocks.
273 * We don't have to do any lookup to find ourselves.
274 */
275 if (CPUCLOCK_PERTHREAD(which_clock)) {
276 /*
277 * Sampling just ourselves we can do with no locking.
278 */
279 error = cpu_clock_sample(which_clock,
280 current, &rtn);
281 } else {
282 read_lock(&tasklist_lock);
283 error = cpu_clock_sample_group(which_clock,
284 current, &rtn);
285 read_unlock(&tasklist_lock);
286 }
287 } else {
288 /*
289 * Find the given PID, and validate that the caller
290 * should be able to see it.
291 */
292 struct task_struct *p;
293 rcu_read_lock();
294 p = find_task_by_vpid(pid);
295 if (p) {
296 if (CPUCLOCK_PERTHREAD(which_clock)) {
297 if (same_thread_group(p, current)) {
298 error = cpu_clock_sample(which_clock,
299 p, &rtn);
300 }
301 } else {
302 read_lock(&tasklist_lock);
303 if (thread_group_leader(p) && p->sighand) {
304 error =
305 cpu_clock_sample_group(which_clock,
306 p, &rtn);
307 }
308 read_unlock(&tasklist_lock);
309 }
310 }
311 rcu_read_unlock();
312 }
313
314 if (error)
315 return error;
316 sample_to_timespec(which_clock, rtn, tp);
317 return 0;
318 }
319
320
321 /*
322 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
323 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
324 * new timer already all-zeros initialized.
325 */
326 static int posix_cpu_timer_create(struct k_itimer *new_timer)
327 {
328 int ret = 0;
329 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
330 struct task_struct *p;
331
332 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
333 return -EINVAL;
334
335 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
336
337 rcu_read_lock();
338 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
339 if (pid == 0) {
340 p = current;
341 } else {
342 p = find_task_by_vpid(pid);
343 if (p && !same_thread_group(p, current))
344 p = NULL;
345 }
346 } else {
347 if (pid == 0) {
348 p = current->group_leader;
349 } else {
350 p = find_task_by_vpid(pid);
351 if (p && !has_group_leader_pid(p))
352 p = NULL;
353 }
354 }
355 new_timer->it.cpu.task = p;
356 if (p) {
357 get_task_struct(p);
358 } else {
359 ret = -EINVAL;
360 }
361 rcu_read_unlock();
362
363 return ret;
364 }
365
366 /*
367 * Clean up a CPU-clock timer that is about to be destroyed.
368 * This is called from timer deletion with the timer already locked.
369 * If we return TIMER_RETRY, it's necessary to release the timer's lock
370 * and try again. (This happens when the timer is in the middle of firing.)
371 */
372 static int posix_cpu_timer_del(struct k_itimer *timer)
373 {
374 struct task_struct *p = timer->it.cpu.task;
375 int ret = 0;
376
377 if (likely(p != NULL)) {
378 read_lock(&tasklist_lock);
379 if (unlikely(p->sighand == NULL)) {
380 /*
381 * We raced with the reaping of the task.
382 * The deletion should have cleared us off the list.
383 */
384 BUG_ON(!list_empty(&timer->it.cpu.entry));
385 } else {
386 spin_lock(&p->sighand->siglock);
387 if (timer->it.cpu.firing)
388 ret = TIMER_RETRY;
389 else
390 list_del(&timer->it.cpu.entry);
391 spin_unlock(&p->sighand->siglock);
392 }
393 read_unlock(&tasklist_lock);
394
395 if (!ret)
396 put_task_struct(p);
397 }
398
399 return ret;
400 }
401
402 static void cleanup_timers_list(struct list_head *head,
403 unsigned long long curr)
404 {
405 struct cpu_timer_list *timer, *next;
406
407 list_for_each_entry_safe(timer, next, head, entry)
408 list_del_init(&timer->entry);
409 }
410
411 /*
412 * Clean out CPU timers still ticking when a thread exited. The task
413 * pointer is cleared, and the expiry time is replaced with the residual
414 * time for later timer_gettime calls to return.
415 * This must be called with the siglock held.
416 */
417 static void cleanup_timers(struct list_head *head,
418 cputime_t utime, cputime_t stime,
419 unsigned long long sum_exec_runtime)
420 {
421
422 cputime_t ptime = utime + stime;
423
424 cleanup_timers_list(head, cputime_to_expires(ptime));
425 cleanup_timers_list(++head, cputime_to_expires(utime));
426 cleanup_timers_list(++head, sum_exec_runtime);
427 }
428
429 /*
430 * These are both called with the siglock held, when the current thread
431 * is being reaped. When the final (leader) thread in the group is reaped,
432 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
433 */
434 void posix_cpu_timers_exit(struct task_struct *tsk)
435 {
436 cputime_t utime, stime;
437
438 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
439 sizeof(unsigned long long));
440 task_cputime(tsk, &utime, &stime);
441 cleanup_timers(tsk->cpu_timers,
442 utime, stime, tsk->se.sum_exec_runtime);
443
444 }
445 void posix_cpu_timers_exit_group(struct task_struct *tsk)
446 {
447 struct signal_struct *const sig = tsk->signal;
448 cputime_t utime, stime;
449
450 task_cputime(tsk, &utime, &stime);
451 cleanup_timers(tsk->signal->cpu_timers,
452 utime + sig->utime, stime + sig->stime,
453 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
454 }
455
456 static void clear_dead_task(struct k_itimer *itimer, unsigned long long now)
457 {
458 struct cpu_timer_list *timer = &itimer->it.cpu;
459
460 /*
461 * That's all for this thread or process.
462 * We leave our residual in expires to be reported.
463 */
464 put_task_struct(timer->task);
465 timer->task = NULL;
466 if (timer->expires < now) {
467 timer->expires = 0;
468 } else {
469 timer->expires -= now;
470 }
471 }
472
473 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
474 {
475 return expires == 0 || expires > new_exp;
476 }
477
478 /*
479 * Insert the timer on the appropriate list before any timers that
480 * expire later. This must be called with the tasklist_lock held
481 * for reading, interrupts disabled and p->sighand->siglock taken.
482 */
483 static void arm_timer(struct k_itimer *timer)
484 {
485 struct task_struct *p = timer->it.cpu.task;
486 struct list_head *head, *listpos;
487 struct task_cputime *cputime_expires;
488 struct cpu_timer_list *const nt = &timer->it.cpu;
489 struct cpu_timer_list *next;
490
491 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
492 head = p->cpu_timers;
493 cputime_expires = &p->cputime_expires;
494 } else {
495 head = p->signal->cpu_timers;
496 cputime_expires = &p->signal->cputime_expires;
497 }
498 head += CPUCLOCK_WHICH(timer->it_clock);
499
500 listpos = head;
501 list_for_each_entry(next, head, entry) {
502 if (nt->expires < next->expires)
503 break;
504 listpos = &next->entry;
505 }
506 list_add(&nt->entry, listpos);
507
508 if (listpos == head) {
509 unsigned long long exp = nt->expires;
510
511 /*
512 * We are the new earliest-expiring POSIX 1.b timer, hence
513 * need to update expiration cache. Take into account that
514 * for process timers we share expiration cache with itimers
515 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
516 */
517
518 switch (CPUCLOCK_WHICH(timer->it_clock)) {
519 case CPUCLOCK_PROF:
520 if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
521 cputime_expires->prof_exp = expires_to_cputime(exp);
522 break;
523 case CPUCLOCK_VIRT:
524 if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
525 cputime_expires->virt_exp = expires_to_cputime(exp);
526 break;
527 case CPUCLOCK_SCHED:
528 if (cputime_expires->sched_exp == 0 ||
529 cputime_expires->sched_exp > exp)
530 cputime_expires->sched_exp = exp;
531 break;
532 }
533 }
534 }
535
536 /*
537 * The timer is locked, fire it and arrange for its reload.
538 */
539 static void cpu_timer_fire(struct k_itimer *timer)
540 {
541 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
542 /*
543 * User don't want any signal.
544 */
545 timer->it.cpu.expires = 0;
546 } else if (unlikely(timer->sigq == NULL)) {
547 /*
548 * This a special case for clock_nanosleep,
549 * not a normal timer from sys_timer_create.
550 */
551 wake_up_process(timer->it_process);
552 timer->it.cpu.expires = 0;
553 } else if (timer->it.cpu.incr == 0) {
554 /*
555 * One-shot timer. Clear it as soon as it's fired.
556 */
557 posix_timer_event(timer, 0);
558 timer->it.cpu.expires = 0;
559 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
560 /*
561 * The signal did not get queued because the signal
562 * was ignored, so we won't get any callback to
563 * reload the timer. But we need to keep it
564 * ticking in case the signal is deliverable next time.
565 */
566 posix_cpu_timer_schedule(timer);
567 }
568 }
569
570 /*
571 * Sample a process (thread group) timer for the given group_leader task.
572 * Must be called with tasklist_lock held for reading.
573 */
574 static int cpu_timer_sample_group(const clockid_t which_clock,
575 struct task_struct *p,
576 unsigned long long *sample)
577 {
578 struct task_cputime cputime;
579
580 thread_group_cputimer(p, &cputime);
581 switch (CPUCLOCK_WHICH(which_clock)) {
582 default:
583 return -EINVAL;
584 case CPUCLOCK_PROF:
585 *sample = cputime_to_expires(cputime.utime + cputime.stime);
586 break;
587 case CPUCLOCK_VIRT:
588 *sample = cputime_to_expires(cputime.utime);
589 break;
590 case CPUCLOCK_SCHED:
591 *sample = cputime.sum_exec_runtime + task_delta_exec(p);
592 break;
593 }
594 return 0;
595 }
596
597 #ifdef CONFIG_NO_HZ_FULL
598 static void nohz_kick_work_fn(struct work_struct *work)
599 {
600 tick_nohz_full_kick_all();
601 }
602
603 static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
604
605 /*
606 * We need the IPIs to be sent from sane process context.
607 * The posix cpu timers are always set with irqs disabled.
608 */
609 static void posix_cpu_timer_kick_nohz(void)
610 {
611 schedule_work(&nohz_kick_work);
612 }
613
614 bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
615 {
616 if (!task_cputime_zero(&tsk->cputime_expires))
617 return false;
618
619 if (tsk->signal->cputimer.running)
620 return false;
621
622 return true;
623 }
624 #else
625 static inline void posix_cpu_timer_kick_nohz(void) { }
626 #endif
627
628 /*
629 * Guts of sys_timer_settime for CPU timers.
630 * This is called with the timer locked and interrupts disabled.
631 * If we return TIMER_RETRY, it's necessary to release the timer's lock
632 * and try again. (This happens when the timer is in the middle of firing.)
633 */
634 static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
635 struct itimerspec *new, struct itimerspec *old)
636 {
637 struct task_struct *p = timer->it.cpu.task;
638 unsigned long long old_expires, new_expires, old_incr, val;
639 int ret;
640
641 if (unlikely(p == NULL)) {
642 /*
643 * Timer refers to a dead task's clock.
644 */
645 return -ESRCH;
646 }
647
648 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
649
650 read_lock(&tasklist_lock);
651 /*
652 * We need the tasklist_lock to protect against reaping that
653 * clears p->sighand. If p has just been reaped, we can no
654 * longer get any information about it at all.
655 */
656 if (unlikely(p->sighand == NULL)) {
657 read_unlock(&tasklist_lock);
658 put_task_struct(p);
659 timer->it.cpu.task = NULL;
660 return -ESRCH;
661 }
662
663 /*
664 * Disarm any old timer after extracting its expiry time.
665 */
666 BUG_ON(!irqs_disabled());
667
668 ret = 0;
669 old_incr = timer->it.cpu.incr;
670 spin_lock(&p->sighand->siglock);
671 old_expires = timer->it.cpu.expires;
672 if (unlikely(timer->it.cpu.firing)) {
673 timer->it.cpu.firing = -1;
674 ret = TIMER_RETRY;
675 } else
676 list_del_init(&timer->it.cpu.entry);
677
678 /*
679 * We need to sample the current value to convert the new
680 * value from to relative and absolute, and to convert the
681 * old value from absolute to relative. To set a process
682 * timer, we need a sample to balance the thread expiry
683 * times (in arm_timer). With an absolute time, we must
684 * check if it's already passed. In short, we need a sample.
685 */
686 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
687 cpu_clock_sample(timer->it_clock, p, &val);
688 } else {
689 cpu_timer_sample_group(timer->it_clock, p, &val);
690 }
691
692 if (old) {
693 if (old_expires == 0) {
694 old->it_value.tv_sec = 0;
695 old->it_value.tv_nsec = 0;
696 } else {
697 /*
698 * Update the timer in case it has
699 * overrun already. If it has,
700 * we'll report it as having overrun
701 * and with the next reloaded timer
702 * already ticking, though we are
703 * swallowing that pending
704 * notification here to install the
705 * new setting.
706 */
707 bump_cpu_timer(timer, val);
708 if (val < timer->it.cpu.expires) {
709 old_expires = timer->it.cpu.expires - val;
710 sample_to_timespec(timer->it_clock,
711 old_expires,
712 &old->it_value);
713 } else {
714 old->it_value.tv_nsec = 1;
715 old->it_value.tv_sec = 0;
716 }
717 }
718 }
719
720 if (unlikely(ret)) {
721 /*
722 * We are colliding with the timer actually firing.
723 * Punt after filling in the timer's old value, and
724 * disable this firing since we are already reporting
725 * it as an overrun (thanks to bump_cpu_timer above).
726 */
727 spin_unlock(&p->sighand->siglock);
728 read_unlock(&tasklist_lock);
729 goto out;
730 }
731
732 if (new_expires != 0 && !(flags & TIMER_ABSTIME)) {
733 new_expires += val;
734 }
735
736 /*
737 * Install the new expiry time (or zero).
738 * For a timer with no notification action, we don't actually
739 * arm the timer (we'll just fake it for timer_gettime).
740 */
741 timer->it.cpu.expires = new_expires;
742 if (new_expires != 0 && val < new_expires) {
743 arm_timer(timer);
744 }
745
746 spin_unlock(&p->sighand->siglock);
747 read_unlock(&tasklist_lock);
748
749 /*
750 * Install the new reload setting, and
751 * set up the signal and overrun bookkeeping.
752 */
753 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
754 &new->it_interval);
755
756 /*
757 * This acts as a modification timestamp for the timer,
758 * so any automatic reload attempt will punt on seeing
759 * that we have reset the timer manually.
760 */
761 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
762 ~REQUEUE_PENDING;
763 timer->it_overrun_last = 0;
764 timer->it_overrun = -1;
765
766 if (new_expires != 0 && !(val < new_expires)) {
767 /*
768 * The designated time already passed, so we notify
769 * immediately, even if the thread never runs to
770 * accumulate more time on this clock.
771 */
772 cpu_timer_fire(timer);
773 }
774
775 ret = 0;
776 out:
777 if (old) {
778 sample_to_timespec(timer->it_clock,
779 old_incr, &old->it_interval);
780 }
781 if (!ret)
782 posix_cpu_timer_kick_nohz();
783 return ret;
784 }
785
786 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
787 {
788 unsigned long long now;
789 struct task_struct *p = timer->it.cpu.task;
790 int clear_dead;
791
792 /*
793 * Easy part: convert the reload time.
794 */
795 sample_to_timespec(timer->it_clock,
796 timer->it.cpu.incr, &itp->it_interval);
797
798 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
799 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
800 return;
801 }
802
803 if (unlikely(p == NULL)) {
804 /*
805 * This task already died and the timer will never fire.
806 * In this case, expires is actually the dead value.
807 */
808 dead:
809 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
810 &itp->it_value);
811 return;
812 }
813
814 /*
815 * Sample the clock to take the difference with the expiry time.
816 */
817 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
818 cpu_clock_sample(timer->it_clock, p, &now);
819 clear_dead = p->exit_state;
820 } else {
821 read_lock(&tasklist_lock);
822 if (unlikely(p->sighand == NULL)) {
823 /*
824 * The process has been reaped.
825 * We can't even collect a sample any more.
826 * Call the timer disarmed, nothing else to do.
827 */
828 put_task_struct(p);
829 timer->it.cpu.task = NULL;
830 timer->it.cpu.expires = 0;
831 read_unlock(&tasklist_lock);
832 goto dead;
833 } else {
834 cpu_timer_sample_group(timer->it_clock, p, &now);
835 clear_dead = (unlikely(p->exit_state) &&
836 thread_group_empty(p));
837 }
838 read_unlock(&tasklist_lock);
839 }
840
841 if (unlikely(clear_dead)) {
842 /*
843 * We've noticed that the thread is dead, but
844 * not yet reaped. Take this opportunity to
845 * drop our task ref.
846 */
847 clear_dead_task(timer, now);
848 goto dead;
849 }
850
851 if (now < timer->it.cpu.expires) {
852 sample_to_timespec(timer->it_clock,
853 timer->it.cpu.expires - now,
854 &itp->it_value);
855 } else {
856 /*
857 * The timer should have expired already, but the firing
858 * hasn't taken place yet. Say it's just about to expire.
859 */
860 itp->it_value.tv_nsec = 1;
861 itp->it_value.tv_sec = 0;
862 }
863 }
864
865 static unsigned long long
866 check_timers_list(struct list_head *timers,
867 struct list_head *firing,
868 unsigned long long curr)
869 {
870 int maxfire = 20;
871
872 while (!list_empty(timers)) {
873 struct cpu_timer_list *t;
874
875 t = list_first_entry(timers, struct cpu_timer_list, entry);
876
877 if (!--maxfire || curr < t->expires)
878 return t->expires;
879
880 t->firing = 1;
881 list_move_tail(&t->entry, firing);
882 }
883
884 return 0;
885 }
886
887 /*
888 * Check for any per-thread CPU timers that have fired and move them off
889 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
890 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
891 */
892 static void check_thread_timers(struct task_struct *tsk,
893 struct list_head *firing)
894 {
895 struct list_head *timers = tsk->cpu_timers;
896 struct signal_struct *const sig = tsk->signal;
897 struct task_cputime *tsk_expires = &tsk->cputime_expires;
898 unsigned long long expires;
899 unsigned long soft;
900
901 expires = check_timers_list(timers, firing, prof_ticks(tsk));
902 tsk_expires->prof_exp = expires_to_cputime(expires);
903
904 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
905 tsk_expires->virt_exp = expires_to_cputime(expires);
906
907 tsk_expires->sched_exp = check_timers_list(++timers, firing,
908 tsk->se.sum_exec_runtime);
909
910 /*
911 * Check for the special case thread timers.
912 */
913 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
914 if (soft != RLIM_INFINITY) {
915 unsigned long hard =
916 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
917
918 if (hard != RLIM_INFINITY &&
919 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
920 /*
921 * At the hard limit, we just die.
922 * No need to calculate anything else now.
923 */
924 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
925 return;
926 }
927 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
928 /*
929 * At the soft limit, send a SIGXCPU every second.
930 */
931 if (soft < hard) {
932 soft += USEC_PER_SEC;
933 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
934 }
935 printk(KERN_INFO
936 "RT Watchdog Timeout: %s[%d]\n",
937 tsk->comm, task_pid_nr(tsk));
938 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
939 }
940 }
941 }
942
943 static void stop_process_timers(struct signal_struct *sig)
944 {
945 struct thread_group_cputimer *cputimer = &sig->cputimer;
946 unsigned long flags;
947
948 raw_spin_lock_irqsave(&cputimer->lock, flags);
949 cputimer->running = 0;
950 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
951 }
952
953 static u32 onecputick;
954
955 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
956 unsigned long long *expires,
957 unsigned long long cur_time, int signo)
958 {
959 if (!it->expires)
960 return;
961
962 if (cur_time >= it->expires) {
963 if (it->incr) {
964 it->expires += it->incr;
965 it->error += it->incr_error;
966 if (it->error >= onecputick) {
967 it->expires -= cputime_one_jiffy;
968 it->error -= onecputick;
969 }
970 } else {
971 it->expires = 0;
972 }
973
974 trace_itimer_expire(signo == SIGPROF ?
975 ITIMER_PROF : ITIMER_VIRTUAL,
976 tsk->signal->leader_pid, cur_time);
977 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
978 }
979
980 if (it->expires && (!*expires || it->expires < *expires)) {
981 *expires = it->expires;
982 }
983 }
984
985 /*
986 * Check for any per-thread CPU timers that have fired and move them
987 * off the tsk->*_timers list onto the firing list. Per-thread timers
988 * have already been taken off.
989 */
990 static void check_process_timers(struct task_struct *tsk,
991 struct list_head *firing)
992 {
993 struct signal_struct *const sig = tsk->signal;
994 unsigned long long utime, ptime, virt_expires, prof_expires;
995 unsigned long long sum_sched_runtime, sched_expires;
996 struct list_head *timers = sig->cpu_timers;
997 struct task_cputime cputime;
998 unsigned long soft;
999
1000 /*
1001 * Collect the current process totals.
1002 */
1003 thread_group_cputimer(tsk, &cputime);
1004 utime = cputime_to_expires(cputime.utime);
1005 ptime = utime + cputime_to_expires(cputime.stime);
1006 sum_sched_runtime = cputime.sum_exec_runtime;
1007
1008 prof_expires = check_timers_list(timers, firing, ptime);
1009 virt_expires = check_timers_list(++timers, firing, utime);
1010 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
1011
1012 /*
1013 * Check for the special case process timers.
1014 */
1015 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1016 SIGPROF);
1017 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1018 SIGVTALRM);
1019 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1020 if (soft != RLIM_INFINITY) {
1021 unsigned long psecs = cputime_to_secs(ptime);
1022 unsigned long hard =
1023 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1024 cputime_t x;
1025 if (psecs >= hard) {
1026 /*
1027 * At the hard limit, we just die.
1028 * No need to calculate anything else now.
1029 */
1030 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1031 return;
1032 }
1033 if (psecs >= soft) {
1034 /*
1035 * At the soft limit, send a SIGXCPU every second.
1036 */
1037 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1038 if (soft < hard) {
1039 soft++;
1040 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1041 }
1042 }
1043 x = secs_to_cputime(soft);
1044 if (!prof_expires || x < prof_expires) {
1045 prof_expires = x;
1046 }
1047 }
1048
1049 sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
1050 sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
1051 sig->cputime_expires.sched_exp = sched_expires;
1052 if (task_cputime_zero(&sig->cputime_expires))
1053 stop_process_timers(sig);
1054 }
1055
1056 /*
1057 * This is called from the signal code (via do_schedule_next_timer)
1058 * when the last timer signal was delivered and we have to reload the timer.
1059 */
1060 void posix_cpu_timer_schedule(struct k_itimer *timer)
1061 {
1062 struct task_struct *p = timer->it.cpu.task;
1063 unsigned long long now;
1064
1065 if (unlikely(p == NULL))
1066 /*
1067 * The task was cleaned up already, no future firings.
1068 */
1069 goto out;
1070
1071 /*
1072 * Fetch the current sample and update the timer's expiry time.
1073 */
1074 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1075 cpu_clock_sample(timer->it_clock, p, &now);
1076 bump_cpu_timer(timer, now);
1077 if (unlikely(p->exit_state)) {
1078 clear_dead_task(timer, now);
1079 goto out;
1080 }
1081 read_lock(&tasklist_lock); /* arm_timer needs it. */
1082 spin_lock(&p->sighand->siglock);
1083 } else {
1084 read_lock(&tasklist_lock);
1085 if (unlikely(p->sighand == NULL)) {
1086 /*
1087 * The process has been reaped.
1088 * We can't even collect a sample any more.
1089 */
1090 put_task_struct(p);
1091 timer->it.cpu.task = p = NULL;
1092 timer->it.cpu.expires = 0;
1093 goto out_unlock;
1094 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1095 /*
1096 * We've noticed that the thread is dead, but
1097 * not yet reaped. Take this opportunity to
1098 * drop our task ref.
1099 */
1100 cpu_timer_sample_group(timer->it_clock, p, &now);
1101 clear_dead_task(timer, now);
1102 goto out_unlock;
1103 }
1104 spin_lock(&p->sighand->siglock);
1105 cpu_timer_sample_group(timer->it_clock, p, &now);
1106 bump_cpu_timer(timer, now);
1107 /* Leave the tasklist_lock locked for the call below. */
1108 }
1109
1110 /*
1111 * Now re-arm for the new expiry time.
1112 */
1113 BUG_ON(!irqs_disabled());
1114 arm_timer(timer);
1115 spin_unlock(&p->sighand->siglock);
1116
1117 out_unlock:
1118 read_unlock(&tasklist_lock);
1119
1120 out:
1121 timer->it_overrun_last = timer->it_overrun;
1122 timer->it_overrun = -1;
1123 ++timer->it_requeue_pending;
1124 }
1125
1126 /**
1127 * task_cputime_expired - Compare two task_cputime entities.
1128 *
1129 * @sample: The task_cputime structure to be checked for expiration.
1130 * @expires: Expiration times, against which @sample will be checked.
1131 *
1132 * Checks @sample against @expires to see if any field of @sample has expired.
1133 * Returns true if any field of the former is greater than the corresponding
1134 * field of the latter if the latter field is set. Otherwise returns false.
1135 */
1136 static inline int task_cputime_expired(const struct task_cputime *sample,
1137 const struct task_cputime *expires)
1138 {
1139 if (expires->utime && sample->utime >= expires->utime)
1140 return 1;
1141 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1142 return 1;
1143 if (expires->sum_exec_runtime != 0 &&
1144 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1145 return 1;
1146 return 0;
1147 }
1148
1149 /**
1150 * fastpath_timer_check - POSIX CPU timers fast path.
1151 *
1152 * @tsk: The task (thread) being checked.
1153 *
1154 * Check the task and thread group timers. If both are zero (there are no
1155 * timers set) return false. Otherwise snapshot the task and thread group
1156 * timers and compare them with the corresponding expiration times. Return
1157 * true if a timer has expired, else return false.
1158 */
1159 static inline int fastpath_timer_check(struct task_struct *tsk)
1160 {
1161 struct signal_struct *sig;
1162 cputime_t utime, stime;
1163
1164 task_cputime(tsk, &utime, &stime);
1165
1166 if (!task_cputime_zero(&tsk->cputime_expires)) {
1167 struct task_cputime task_sample = {
1168 .utime = utime,
1169 .stime = stime,
1170 .sum_exec_runtime = tsk->se.sum_exec_runtime
1171 };
1172
1173 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1174 return 1;
1175 }
1176
1177 sig = tsk->signal;
1178 if (sig->cputimer.running) {
1179 struct task_cputime group_sample;
1180
1181 raw_spin_lock(&sig->cputimer.lock);
1182 group_sample = sig->cputimer.cputime;
1183 raw_spin_unlock(&sig->cputimer.lock);
1184
1185 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1186 return 1;
1187 }
1188
1189 return 0;
1190 }
1191
1192 /*
1193 * This is called from the timer interrupt handler. The irq handler has
1194 * already updated our counts. We need to check if any timers fire now.
1195 * Interrupts are disabled.
1196 */
1197 void run_posix_cpu_timers(struct task_struct *tsk)
1198 {
1199 LIST_HEAD(firing);
1200 struct k_itimer *timer, *next;
1201 unsigned long flags;
1202
1203 BUG_ON(!irqs_disabled());
1204
1205 /*
1206 * The fast path checks that there are no expired thread or thread
1207 * group timers. If that's so, just return.
1208 */
1209 if (!fastpath_timer_check(tsk))
1210 return;
1211
1212 if (!lock_task_sighand(tsk, &flags))
1213 return;
1214 /*
1215 * Here we take off tsk->signal->cpu_timers[N] and
1216 * tsk->cpu_timers[N] all the timers that are firing, and
1217 * put them on the firing list.
1218 */
1219 check_thread_timers(tsk, &firing);
1220 /*
1221 * If there are any active process wide timers (POSIX 1.b, itimers,
1222 * RLIMIT_CPU) cputimer must be running.
1223 */
1224 if (tsk->signal->cputimer.running)
1225 check_process_timers(tsk, &firing);
1226
1227 /*
1228 * We must release these locks before taking any timer's lock.
1229 * There is a potential race with timer deletion here, as the
1230 * siglock now protects our private firing list. We have set
1231 * the firing flag in each timer, so that a deletion attempt
1232 * that gets the timer lock before we do will give it up and
1233 * spin until we've taken care of that timer below.
1234 */
1235 unlock_task_sighand(tsk, &flags);
1236
1237 /*
1238 * Now that all the timers on our list have the firing flag,
1239 * no one will touch their list entries but us. We'll take
1240 * each timer's lock before clearing its firing flag, so no
1241 * timer call will interfere.
1242 */
1243 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1244 int cpu_firing;
1245
1246 spin_lock(&timer->it_lock);
1247 list_del_init(&timer->it.cpu.entry);
1248 cpu_firing = timer->it.cpu.firing;
1249 timer->it.cpu.firing = 0;
1250 /*
1251 * The firing flag is -1 if we collided with a reset
1252 * of the timer, which already reported this
1253 * almost-firing as an overrun. So don't generate an event.
1254 */
1255 if (likely(cpu_firing >= 0))
1256 cpu_timer_fire(timer);
1257 spin_unlock(&timer->it_lock);
1258 }
1259
1260 /*
1261 * In case some timers were rescheduled after the queue got emptied,
1262 * wake up full dynticks CPUs.
1263 */
1264 if (tsk->signal->cputimer.running)
1265 posix_cpu_timer_kick_nohz();
1266 }
1267
1268 /*
1269 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1270 * The tsk->sighand->siglock must be held by the caller.
1271 */
1272 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1273 cputime_t *newval, cputime_t *oldval)
1274 {
1275 unsigned long long now;
1276
1277 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1278 cpu_timer_sample_group(clock_idx, tsk, &now);
1279
1280 if (oldval) {
1281 /*
1282 * We are setting itimer. The *oldval is absolute and we update
1283 * it to be relative, *newval argument is relative and we update
1284 * it to be absolute.
1285 */
1286 if (*oldval) {
1287 if (*oldval <= now) {
1288 /* Just about to fire. */
1289 *oldval = cputime_one_jiffy;
1290 } else {
1291 *oldval -= now;
1292 }
1293 }
1294
1295 if (!*newval)
1296 goto out;
1297 *newval += now;
1298 }
1299
1300 /*
1301 * Update expiration cache if we are the earliest timer, or eventually
1302 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1303 */
1304 switch (clock_idx) {
1305 case CPUCLOCK_PROF:
1306 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1307 tsk->signal->cputime_expires.prof_exp = *newval;
1308 break;
1309 case CPUCLOCK_VIRT:
1310 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1311 tsk->signal->cputime_expires.virt_exp = *newval;
1312 break;
1313 }
1314 out:
1315 posix_cpu_timer_kick_nohz();
1316 }
1317
1318 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1319 struct timespec *rqtp, struct itimerspec *it)
1320 {
1321 struct k_itimer timer;
1322 int error;
1323
1324 /*
1325 * Set up a temporary timer and then wait for it to go off.
1326 */
1327 memset(&timer, 0, sizeof timer);
1328 spin_lock_init(&timer.it_lock);
1329 timer.it_clock = which_clock;
1330 timer.it_overrun = -1;
1331 error = posix_cpu_timer_create(&timer);
1332 timer.it_process = current;
1333 if (!error) {
1334 static struct itimerspec zero_it;
1335
1336 memset(it, 0, sizeof *it);
1337 it->it_value = *rqtp;
1338
1339 spin_lock_irq(&timer.it_lock);
1340 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1341 if (error) {
1342 spin_unlock_irq(&timer.it_lock);
1343 return error;
1344 }
1345
1346 while (!signal_pending(current)) {
1347 if (timer.it.cpu.expires == 0) {
1348 /*
1349 * Our timer fired and was reset, below
1350 * deletion can not fail.
1351 */
1352 posix_cpu_timer_del(&timer);
1353 spin_unlock_irq(&timer.it_lock);
1354 return 0;
1355 }
1356
1357 /*
1358 * Block until cpu_timer_fire (or a signal) wakes us.
1359 */
1360 __set_current_state(TASK_INTERRUPTIBLE);
1361 spin_unlock_irq(&timer.it_lock);
1362 schedule();
1363 spin_lock_irq(&timer.it_lock);
1364 }
1365
1366 /*
1367 * We were interrupted by a signal.
1368 */
1369 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1370 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1371 if (!error) {
1372 /*
1373 * Timer is now unarmed, deletion can not fail.
1374 */
1375 posix_cpu_timer_del(&timer);
1376 }
1377 spin_unlock_irq(&timer.it_lock);
1378
1379 while (error == TIMER_RETRY) {
1380 /*
1381 * We need to handle case when timer was or is in the
1382 * middle of firing. In other cases we already freed
1383 * resources.
1384 */
1385 spin_lock_irq(&timer.it_lock);
1386 error = posix_cpu_timer_del(&timer);
1387 spin_unlock_irq(&timer.it_lock);
1388 }
1389
1390 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1391 /*
1392 * It actually did fire already.
1393 */
1394 return 0;
1395 }
1396
1397 error = -ERESTART_RESTARTBLOCK;
1398 }
1399
1400 return error;
1401 }
1402
1403 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1404
1405 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1406 struct timespec *rqtp, struct timespec __user *rmtp)
1407 {
1408 struct restart_block *restart_block =
1409 &current_thread_info()->restart_block;
1410 struct itimerspec it;
1411 int error;
1412
1413 /*
1414 * Diagnose required errors first.
1415 */
1416 if (CPUCLOCK_PERTHREAD(which_clock) &&
1417 (CPUCLOCK_PID(which_clock) == 0 ||
1418 CPUCLOCK_PID(which_clock) == current->pid))
1419 return -EINVAL;
1420
1421 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1422
1423 if (error == -ERESTART_RESTARTBLOCK) {
1424
1425 if (flags & TIMER_ABSTIME)
1426 return -ERESTARTNOHAND;
1427 /*
1428 * Report back to the user the time still remaining.
1429 */
1430 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1431 return -EFAULT;
1432
1433 restart_block->fn = posix_cpu_nsleep_restart;
1434 restart_block->nanosleep.clockid = which_clock;
1435 restart_block->nanosleep.rmtp = rmtp;
1436 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1437 }
1438 return error;
1439 }
1440
1441 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1442 {
1443 clockid_t which_clock = restart_block->nanosleep.clockid;
1444 struct timespec t;
1445 struct itimerspec it;
1446 int error;
1447
1448 t = ns_to_timespec(restart_block->nanosleep.expires);
1449
1450 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1451
1452 if (error == -ERESTART_RESTARTBLOCK) {
1453 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1454 /*
1455 * Report back to the user the time still remaining.
1456 */
1457 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1458 return -EFAULT;
1459
1460 restart_block->nanosleep.expires = timespec_to_ns(&t);
1461 }
1462 return error;
1463
1464 }
1465
1466 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1467 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1468
1469 static int process_cpu_clock_getres(const clockid_t which_clock,
1470 struct timespec *tp)
1471 {
1472 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1473 }
1474 static int process_cpu_clock_get(const clockid_t which_clock,
1475 struct timespec *tp)
1476 {
1477 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1478 }
1479 static int process_cpu_timer_create(struct k_itimer *timer)
1480 {
1481 timer->it_clock = PROCESS_CLOCK;
1482 return posix_cpu_timer_create(timer);
1483 }
1484 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1485 struct timespec *rqtp,
1486 struct timespec __user *rmtp)
1487 {
1488 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1489 }
1490 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1491 {
1492 return -EINVAL;
1493 }
1494 static int thread_cpu_clock_getres(const clockid_t which_clock,
1495 struct timespec *tp)
1496 {
1497 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1498 }
1499 static int thread_cpu_clock_get(const clockid_t which_clock,
1500 struct timespec *tp)
1501 {
1502 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1503 }
1504 static int thread_cpu_timer_create(struct k_itimer *timer)
1505 {
1506 timer->it_clock = THREAD_CLOCK;
1507 return posix_cpu_timer_create(timer);
1508 }
1509
1510 struct k_clock clock_posix_cpu = {
1511 .clock_getres = posix_cpu_clock_getres,
1512 .clock_set = posix_cpu_clock_set,
1513 .clock_get = posix_cpu_clock_get,
1514 .timer_create = posix_cpu_timer_create,
1515 .nsleep = posix_cpu_nsleep,
1516 .nsleep_restart = posix_cpu_nsleep_restart,
1517 .timer_set = posix_cpu_timer_set,
1518 .timer_del = posix_cpu_timer_del,
1519 .timer_get = posix_cpu_timer_get,
1520 };
1521
1522 static __init int init_posix_cpu_timers(void)
1523 {
1524 struct k_clock process = {
1525 .clock_getres = process_cpu_clock_getres,
1526 .clock_get = process_cpu_clock_get,
1527 .timer_create = process_cpu_timer_create,
1528 .nsleep = process_cpu_nsleep,
1529 .nsleep_restart = process_cpu_nsleep_restart,
1530 };
1531 struct k_clock thread = {
1532 .clock_getres = thread_cpu_clock_getres,
1533 .clock_get = thread_cpu_clock_get,
1534 .timer_create = thread_cpu_timer_create,
1535 };
1536 struct timespec ts;
1537
1538 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1539 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1540
1541 cputime_to_timespec(cputime_one_jiffy, &ts);
1542 onecputick = ts.tv_nsec;
1543 WARN_ON(ts.tv_sec != 0);
1544
1545 return 0;
1546 }
1547 __initcall(init_posix_cpu_timers);