]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Implement CPU time clocks for the POSIX clock interface. | |
3 | */ | |
4 | ||
3f07c014 | 5 | #include <linux/sched/signal.h> |
32ef5517 | 6 | #include <linux/sched/cputime.h> |
1da177e4 | 7 | #include <linux/posix-timers.h> |
1da177e4 | 8 | #include <linux/errno.h> |
f8bd2258 | 9 | #include <linux/math64.h> |
7c0f6ba6 | 10 | #include <linux/uaccess.h> |
bb34d92f | 11 | #include <linux/kernel_stat.h> |
3f0a525e | 12 | #include <trace/events/timer.h> |
a8572160 FW |
13 | #include <linux/tick.h> |
14 | #include <linux/workqueue.h> | |
1da177e4 | 15 | |
f06febc9 | 16 | /* |
f55db609 SG |
17 | * Called after updating RLIMIT_CPU to run cpu timer and update |
18 | * tsk->signal->cputime_expires expiration cache if necessary. Needs | |
19 | * siglock protection since other code may update expiration cache as | |
20 | * well. | |
f06febc9 | 21 | */ |
5ab46b34 | 22 | void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) |
f06febc9 | 23 | { |
858cf3a8 | 24 | u64 nsecs = rlim_new * NSEC_PER_SEC; |
f06febc9 | 25 | |
5ab46b34 | 26 | spin_lock_irq(&task->sighand->siglock); |
858cf3a8 | 27 | set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); |
5ab46b34 | 28 | spin_unlock_irq(&task->sighand->siglock); |
f06febc9 FM |
29 | } |
30 | ||
a924b04d | 31 | static int check_clock(const clockid_t which_clock) |
1da177e4 LT |
32 | { |
33 | int error = 0; | |
34 | struct task_struct *p; | |
35 | const pid_t pid = CPUCLOCK_PID(which_clock); | |
36 | ||
37 | if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) | |
38 | return -EINVAL; | |
39 | ||
40 | if (pid == 0) | |
41 | return 0; | |
42 | ||
c0deae8c | 43 | rcu_read_lock(); |
8dc86af0 | 44 | p = find_task_by_vpid(pid); |
bac0abd6 | 45 | if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? |
c0deae8c | 46 | same_thread_group(p, current) : has_group_leader_pid(p))) { |
1da177e4 LT |
47 | error = -EINVAL; |
48 | } | |
c0deae8c | 49 | rcu_read_unlock(); |
1da177e4 LT |
50 | |
51 | return error; | |
52 | } | |
53 | ||
1da177e4 LT |
54 | /* |
55 | * Update expiry time from increment, and increase overrun count, | |
56 | * given the current clock sample. | |
57 | */ | |
ebd7e7fc | 58 | static void bump_cpu_timer(struct k_itimer *timer, u64 now) |
1da177e4 LT |
59 | { |
60 | int i; | |
ebd7e7fc | 61 | u64 delta, incr; |
1da177e4 | 62 | |
55ccb616 | 63 | if (timer->it.cpu.incr == 0) |
1da177e4 LT |
64 | return; |
65 | ||
55ccb616 FW |
66 | if (now < timer->it.cpu.expires) |
67 | return; | |
1da177e4 | 68 | |
55ccb616 FW |
69 | incr = timer->it.cpu.incr; |
70 | delta = now + incr - timer->it.cpu.expires; | |
1da177e4 | 71 | |
55ccb616 FW |
72 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
73 | for (i = 0; incr < delta - incr; i++) | |
74 | incr = incr << 1; | |
75 | ||
76 | for (; i >= 0; incr >>= 1, i--) { | |
77 | if (delta < incr) | |
78 | continue; | |
79 | ||
80 | timer->it.cpu.expires += incr; | |
81 | timer->it_overrun += 1 << i; | |
82 | delta -= incr; | |
1da177e4 LT |
83 | } |
84 | } | |
85 | ||
555347f6 FW |
86 | /** |
87 | * task_cputime_zero - Check a task_cputime struct for all zero fields. | |
88 | * | |
89 | * @cputime: The struct to compare. | |
90 | * | |
91 | * Checks @cputime to see if all fields are zero. Returns true if all fields | |
92 | * are zero, false if any field is nonzero. | |
93 | */ | |
ebd7e7fc | 94 | static inline int task_cputime_zero(const struct task_cputime *cputime) |
555347f6 FW |
95 | { |
96 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) | |
97 | return 1; | |
98 | return 0; | |
99 | } | |
100 | ||
ebd7e7fc | 101 | static inline u64 prof_ticks(struct task_struct *p) |
1da177e4 | 102 | { |
ebd7e7fc | 103 | u64 utime, stime; |
6fac4829 | 104 | |
ebd7e7fc | 105 | task_cputime(p, &utime, &stime); |
6fac4829 | 106 | |
ebd7e7fc | 107 | return utime + stime; |
1da177e4 | 108 | } |
ebd7e7fc | 109 | static inline u64 virt_ticks(struct task_struct *p) |
1da177e4 | 110 | { |
ebd7e7fc | 111 | u64 utime, stime; |
6fac4829 | 112 | |
ebd7e7fc | 113 | task_cputime(p, &utime, &stime); |
6fac4829 | 114 | |
ebd7e7fc | 115 | return utime; |
1da177e4 | 116 | } |
1da177e4 | 117 | |
bc2c8ea4 TG |
118 | static int |
119 | posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) | |
1da177e4 LT |
120 | { |
121 | int error = check_clock(which_clock); | |
122 | if (!error) { | |
123 | tp->tv_sec = 0; | |
124 | tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); | |
125 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | |
126 | /* | |
127 | * If sched_clock is using a cycle counter, we | |
128 | * don't have any idea of its true resolution | |
129 | * exported, but it is much more than 1s/HZ. | |
130 | */ | |
131 | tp->tv_nsec = 1; | |
132 | } | |
133 | } | |
134 | return error; | |
135 | } | |
136 | ||
bc2c8ea4 TG |
137 | static int |
138 | posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) | |
1da177e4 LT |
139 | { |
140 | /* | |
141 | * You can never reset a CPU clock, but we check for other errors | |
142 | * in the call before failing with EPERM. | |
143 | */ | |
144 | int error = check_clock(which_clock); | |
145 | if (error == 0) { | |
146 | error = -EPERM; | |
147 | } | |
148 | return error; | |
149 | } | |
150 | ||
151 | ||
152 | /* | |
153 | * Sample a per-thread clock for the given task. | |
154 | */ | |
ebd7e7fc FW |
155 | static int cpu_clock_sample(const clockid_t which_clock, |
156 | struct task_struct *p, u64 *sample) | |
1da177e4 LT |
157 | { |
158 | switch (CPUCLOCK_WHICH(which_clock)) { | |
159 | default: | |
160 | return -EINVAL; | |
161 | case CPUCLOCK_PROF: | |
55ccb616 | 162 | *sample = prof_ticks(p); |
1da177e4 LT |
163 | break; |
164 | case CPUCLOCK_VIRT: | |
55ccb616 | 165 | *sample = virt_ticks(p); |
1da177e4 LT |
166 | break; |
167 | case CPUCLOCK_SCHED: | |
55ccb616 | 168 | *sample = task_sched_runtime(p); |
1da177e4 LT |
169 | break; |
170 | } | |
171 | return 0; | |
172 | } | |
173 | ||
1018016c JL |
174 | /* |
175 | * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg | |
176 | * to avoid race conditions with concurrent updates to cputime. | |
177 | */ | |
178 | static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) | |
4da94d49 | 179 | { |
1018016c JL |
180 | u64 curr_cputime; |
181 | retry: | |
182 | curr_cputime = atomic64_read(cputime); | |
183 | if (sum_cputime > curr_cputime) { | |
184 | if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) | |
185 | goto retry; | |
186 | } | |
187 | } | |
4da94d49 | 188 | |
ebd7e7fc | 189 | static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) |
1018016c | 190 | { |
71107445 JL |
191 | __update_gt_cputime(&cputime_atomic->utime, sum->utime); |
192 | __update_gt_cputime(&cputime_atomic->stime, sum->stime); | |
193 | __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); | |
1018016c | 194 | } |
4da94d49 | 195 | |
71107445 | 196 | /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ |
ebd7e7fc | 197 | static inline void sample_cputime_atomic(struct task_cputime *times, |
71107445 | 198 | struct task_cputime_atomic *atomic_times) |
1018016c | 199 | { |
71107445 JL |
200 | times->utime = atomic64_read(&atomic_times->utime); |
201 | times->stime = atomic64_read(&atomic_times->stime); | |
202 | times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); | |
4da94d49 PZ |
203 | } |
204 | ||
ebd7e7fc | 205 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) |
4da94d49 PZ |
206 | { |
207 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | |
ebd7e7fc | 208 | struct task_cputime sum; |
4da94d49 | 209 | |
1018016c JL |
210 | /* Check if cputimer isn't running. This is accessed without locking. */ |
211 | if (!READ_ONCE(cputimer->running)) { | |
4da94d49 PZ |
212 | /* |
213 | * The POSIX timer interface allows for absolute time expiry | |
214 | * values through the TIMER_ABSTIME flag, therefore we have | |
1018016c | 215 | * to synchronize the timer to the clock every time we start it. |
4da94d49 | 216 | */ |
ebd7e7fc | 217 | thread_group_cputime(tsk, &sum); |
71107445 | 218 | update_gt_cputime(&cputimer->cputime_atomic, &sum); |
1018016c JL |
219 | |
220 | /* | |
221 | * We're setting cputimer->running without a lock. Ensure | |
222 | * this only gets written to in one operation. We set | |
223 | * running after update_gt_cputime() as a small optimization, | |
224 | * but barriers are not required because update_gt_cputime() | |
225 | * can handle concurrent updates. | |
226 | */ | |
d5c373eb | 227 | WRITE_ONCE(cputimer->running, true); |
1018016c | 228 | } |
71107445 | 229 | sample_cputime_atomic(times, &cputimer->cputime_atomic); |
4da94d49 PZ |
230 | } |
231 | ||
1da177e4 LT |
232 | /* |
233 | * Sample a process (thread group) clock for the given group_leader task. | |
e73d84e3 FW |
234 | * Must be called with task sighand lock held for safe while_each_thread() |
235 | * traversal. | |
1da177e4 | 236 | */ |
bb34d92f FM |
237 | static int cpu_clock_sample_group(const clockid_t which_clock, |
238 | struct task_struct *p, | |
ebd7e7fc | 239 | u64 *sample) |
1da177e4 | 240 | { |
ebd7e7fc | 241 | struct task_cputime cputime; |
f06febc9 | 242 | |
eccdaeaf | 243 | switch (CPUCLOCK_WHICH(which_clock)) { |
1da177e4 LT |
244 | default: |
245 | return -EINVAL; | |
246 | case CPUCLOCK_PROF: | |
ebd7e7fc FW |
247 | thread_group_cputime(p, &cputime); |
248 | *sample = cputime.utime + cputime.stime; | |
1da177e4 LT |
249 | break; |
250 | case CPUCLOCK_VIRT: | |
ebd7e7fc FW |
251 | thread_group_cputime(p, &cputime); |
252 | *sample = cputime.utime; | |
1da177e4 LT |
253 | break; |
254 | case CPUCLOCK_SCHED: | |
ebd7e7fc | 255 | thread_group_cputime(p, &cputime); |
55ccb616 | 256 | *sample = cputime.sum_exec_runtime; |
1da177e4 LT |
257 | break; |
258 | } | |
259 | return 0; | |
260 | } | |
261 | ||
33ab0fec FW |
262 | static int posix_cpu_clock_get_task(struct task_struct *tsk, |
263 | const clockid_t which_clock, | |
264 | struct timespec *tp) | |
265 | { | |
266 | int err = -EINVAL; | |
ebd7e7fc | 267 | u64 rtn; |
33ab0fec FW |
268 | |
269 | if (CPUCLOCK_PERTHREAD(which_clock)) { | |
270 | if (same_thread_group(tsk, current)) | |
271 | err = cpu_clock_sample(which_clock, tsk, &rtn); | |
272 | } else { | |
50875788 | 273 | if (tsk == current || thread_group_leader(tsk)) |
33ab0fec | 274 | err = cpu_clock_sample_group(which_clock, tsk, &rtn); |
33ab0fec FW |
275 | } |
276 | ||
277 | if (!err) | |
ebd7e7fc | 278 | *tp = ns_to_timespec(rtn); |
33ab0fec FW |
279 | |
280 | return err; | |
281 | } | |
282 | ||
1da177e4 | 283 | |
bc2c8ea4 | 284 | static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) |
1da177e4 LT |
285 | { |
286 | const pid_t pid = CPUCLOCK_PID(which_clock); | |
33ab0fec | 287 | int err = -EINVAL; |
1da177e4 LT |
288 | |
289 | if (pid == 0) { | |
290 | /* | |
291 | * Special case constant value for our own clocks. | |
292 | * We don't have to do any lookup to find ourselves. | |
293 | */ | |
33ab0fec | 294 | err = posix_cpu_clock_get_task(current, which_clock, tp); |
1da177e4 LT |
295 | } else { |
296 | /* | |
297 | * Find the given PID, and validate that the caller | |
298 | * should be able to see it. | |
299 | */ | |
300 | struct task_struct *p; | |
1f2ea083 | 301 | rcu_read_lock(); |
8dc86af0 | 302 | p = find_task_by_vpid(pid); |
33ab0fec FW |
303 | if (p) |
304 | err = posix_cpu_clock_get_task(p, which_clock, tp); | |
1f2ea083 | 305 | rcu_read_unlock(); |
1da177e4 LT |
306 | } |
307 | ||
33ab0fec | 308 | return err; |
1da177e4 LT |
309 | } |
310 | ||
1da177e4 LT |
311 | /* |
312 | * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. | |
ba5ea951 SG |
313 | * This is called from sys_timer_create() and do_cpu_nanosleep() with the |
314 | * new timer already all-zeros initialized. | |
1da177e4 | 315 | */ |
bc2c8ea4 | 316 | static int posix_cpu_timer_create(struct k_itimer *new_timer) |
1da177e4 LT |
317 | { |
318 | int ret = 0; | |
319 | const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); | |
320 | struct task_struct *p; | |
321 | ||
322 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) | |
323 | return -EINVAL; | |
324 | ||
325 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); | |
1da177e4 | 326 | |
c0deae8c | 327 | rcu_read_lock(); |
1da177e4 LT |
328 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { |
329 | if (pid == 0) { | |
330 | p = current; | |
331 | } else { | |
8dc86af0 | 332 | p = find_task_by_vpid(pid); |
bac0abd6 | 333 | if (p && !same_thread_group(p, current)) |
1da177e4 LT |
334 | p = NULL; |
335 | } | |
336 | } else { | |
337 | if (pid == 0) { | |
338 | p = current->group_leader; | |
339 | } else { | |
8dc86af0 | 340 | p = find_task_by_vpid(pid); |
c0deae8c | 341 | if (p && !has_group_leader_pid(p)) |
1da177e4 LT |
342 | p = NULL; |
343 | } | |
344 | } | |
345 | new_timer->it.cpu.task = p; | |
346 | if (p) { | |
347 | get_task_struct(p); | |
348 | } else { | |
349 | ret = -EINVAL; | |
350 | } | |
c0deae8c | 351 | rcu_read_unlock(); |
1da177e4 LT |
352 | |
353 | return ret; | |
354 | } | |
355 | ||
356 | /* | |
357 | * Clean up a CPU-clock timer that is about to be destroyed. | |
358 | * This is called from timer deletion with the timer already locked. | |
359 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | |
360 | * and try again. (This happens when the timer is in the middle of firing.) | |
361 | */ | |
bc2c8ea4 | 362 | static int posix_cpu_timer_del(struct k_itimer *timer) |
1da177e4 | 363 | { |
108150ea | 364 | int ret = 0; |
3d7a1427 FW |
365 | unsigned long flags; |
366 | struct sighand_struct *sighand; | |
367 | struct task_struct *p = timer->it.cpu.task; | |
1da177e4 | 368 | |
a3222f88 | 369 | WARN_ON_ONCE(p == NULL); |
108150ea | 370 | |
3d7a1427 FW |
371 | /* |
372 | * Protect against sighand release/switch in exit/exec and process/ | |
373 | * thread timer list entry concurrent read/writes. | |
374 | */ | |
375 | sighand = lock_task_sighand(p, &flags); | |
376 | if (unlikely(sighand == NULL)) { | |
a3222f88 FW |
377 | /* |
378 | * We raced with the reaping of the task. | |
379 | * The deletion should have cleared us off the list. | |
380 | */ | |
531f64fd | 381 | WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); |
a3222f88 | 382 | } else { |
a3222f88 FW |
383 | if (timer->it.cpu.firing) |
384 | ret = TIMER_RETRY; | |
385 | else | |
386 | list_del(&timer->it.cpu.entry); | |
3d7a1427 FW |
387 | |
388 | unlock_task_sighand(p, &flags); | |
1da177e4 | 389 | } |
a3222f88 FW |
390 | |
391 | if (!ret) | |
392 | put_task_struct(p); | |
1da177e4 | 393 | |
108150ea | 394 | return ret; |
1da177e4 LT |
395 | } |
396 | ||
af82eb3c | 397 | static void cleanup_timers_list(struct list_head *head) |
1a7fa510 FW |
398 | { |
399 | struct cpu_timer_list *timer, *next; | |
400 | ||
a0b2062b | 401 | list_for_each_entry_safe(timer, next, head, entry) |
1a7fa510 | 402 | list_del_init(&timer->entry); |
1a7fa510 FW |
403 | } |
404 | ||
1da177e4 LT |
405 | /* |
406 | * Clean out CPU timers still ticking when a thread exited. The task | |
407 | * pointer is cleared, and the expiry time is replaced with the residual | |
408 | * time for later timer_gettime calls to return. | |
409 | * This must be called with the siglock held. | |
410 | */ | |
af82eb3c | 411 | static void cleanup_timers(struct list_head *head) |
1da177e4 | 412 | { |
af82eb3c FW |
413 | cleanup_timers_list(head); |
414 | cleanup_timers_list(++head); | |
415 | cleanup_timers_list(++head); | |
1da177e4 LT |
416 | } |
417 | ||
418 | /* | |
419 | * These are both called with the siglock held, when the current thread | |
420 | * is being reaped. When the final (leader) thread in the group is reaped, | |
421 | * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. | |
422 | */ | |
423 | void posix_cpu_timers_exit(struct task_struct *tsk) | |
424 | { | |
af82eb3c | 425 | cleanup_timers(tsk->cpu_timers); |
1da177e4 LT |
426 | } |
427 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | |
428 | { | |
af82eb3c | 429 | cleanup_timers(tsk->signal->cpu_timers); |
1da177e4 LT |
430 | } |
431 | ||
ebd7e7fc | 432 | static inline int expires_gt(u64 expires, u64 new_exp) |
d1e3b6d1 | 433 | { |
64861634 | 434 | return expires == 0 || expires > new_exp; |
d1e3b6d1 SG |
435 | } |
436 | ||
1da177e4 LT |
437 | /* |
438 | * Insert the timer on the appropriate list before any timers that | |
e73d84e3 | 439 | * expire later. This must be called with the sighand lock held. |
1da177e4 | 440 | */ |
5eb9aa64 | 441 | static void arm_timer(struct k_itimer *timer) |
1da177e4 LT |
442 | { |
443 | struct task_struct *p = timer->it.cpu.task; | |
444 | struct list_head *head, *listpos; | |
ebd7e7fc | 445 | struct task_cputime *cputime_expires; |
1da177e4 LT |
446 | struct cpu_timer_list *const nt = &timer->it.cpu; |
447 | struct cpu_timer_list *next; | |
1da177e4 | 448 | |
5eb9aa64 SG |
449 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
450 | head = p->cpu_timers; | |
451 | cputime_expires = &p->cputime_expires; | |
452 | } else { | |
453 | head = p->signal->cpu_timers; | |
454 | cputime_expires = &p->signal->cputime_expires; | |
455 | } | |
1da177e4 LT |
456 | head += CPUCLOCK_WHICH(timer->it_clock); |
457 | ||
1da177e4 | 458 | listpos = head; |
5eb9aa64 | 459 | list_for_each_entry(next, head, entry) { |
55ccb616 | 460 | if (nt->expires < next->expires) |
5eb9aa64 SG |
461 | break; |
462 | listpos = &next->entry; | |
1da177e4 LT |
463 | } |
464 | list_add(&nt->entry, listpos); | |
465 | ||
466 | if (listpos == head) { | |
ebd7e7fc | 467 | u64 exp = nt->expires; |
5eb9aa64 | 468 | |
1da177e4 | 469 | /* |
5eb9aa64 SG |
470 | * We are the new earliest-expiring POSIX 1.b timer, hence |
471 | * need to update expiration cache. Take into account that | |
472 | * for process timers we share expiration cache with itimers | |
473 | * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. | |
1da177e4 LT |
474 | */ |
475 | ||
5eb9aa64 SG |
476 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
477 | case CPUCLOCK_PROF: | |
ebd7e7fc FW |
478 | if (expires_gt(cputime_expires->prof_exp, exp)) |
479 | cputime_expires->prof_exp = exp; | |
5eb9aa64 SG |
480 | break; |
481 | case CPUCLOCK_VIRT: | |
ebd7e7fc FW |
482 | if (expires_gt(cputime_expires->virt_exp, exp)) |
483 | cputime_expires->virt_exp = exp; | |
5eb9aa64 SG |
484 | break; |
485 | case CPUCLOCK_SCHED: | |
ebd7e7fc | 486 | if (expires_gt(cputime_expires->sched_exp, exp)) |
55ccb616 | 487 | cputime_expires->sched_exp = exp; |
5eb9aa64 | 488 | break; |
1da177e4 | 489 | } |
b7878300 FW |
490 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) |
491 | tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); | |
492 | else | |
493 | tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); | |
1da177e4 | 494 | } |
1da177e4 LT |
495 | } |
496 | ||
497 | /* | |
498 | * The timer is locked, fire it and arrange for its reload. | |
499 | */ | |
500 | static void cpu_timer_fire(struct k_itimer *timer) | |
501 | { | |
1f169f84 SG |
502 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { |
503 | /* | |
504 | * User don't want any signal. | |
505 | */ | |
55ccb616 | 506 | timer->it.cpu.expires = 0; |
1f169f84 | 507 | } else if (unlikely(timer->sigq == NULL)) { |
1da177e4 LT |
508 | /* |
509 | * This a special case for clock_nanosleep, | |
510 | * not a normal timer from sys_timer_create. | |
511 | */ | |
512 | wake_up_process(timer->it_process); | |
55ccb616 FW |
513 | timer->it.cpu.expires = 0; |
514 | } else if (timer->it.cpu.incr == 0) { | |
1da177e4 LT |
515 | /* |
516 | * One-shot timer. Clear it as soon as it's fired. | |
517 | */ | |
518 | posix_timer_event(timer, 0); | |
55ccb616 | 519 | timer->it.cpu.expires = 0; |
1da177e4 LT |
520 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { |
521 | /* | |
522 | * The signal did not get queued because the signal | |
523 | * was ignored, so we won't get any callback to | |
524 | * reload the timer. But we need to keep it | |
525 | * ticking in case the signal is deliverable next time. | |
526 | */ | |
527 | posix_cpu_timer_schedule(timer); | |
528 | } | |
529 | } | |
530 | ||
3997ad31 PZ |
531 | /* |
532 | * Sample a process (thread group) timer for the given group_leader task. | |
e73d84e3 FW |
533 | * Must be called with task sighand lock held for safe while_each_thread() |
534 | * traversal. | |
3997ad31 PZ |
535 | */ |
536 | static int cpu_timer_sample_group(const clockid_t which_clock, | |
ebd7e7fc | 537 | struct task_struct *p, u64 *sample) |
3997ad31 | 538 | { |
ebd7e7fc | 539 | struct task_cputime cputime; |
3997ad31 PZ |
540 | |
541 | thread_group_cputimer(p, &cputime); | |
542 | switch (CPUCLOCK_WHICH(which_clock)) { | |
543 | default: | |
544 | return -EINVAL; | |
545 | case CPUCLOCK_PROF: | |
ebd7e7fc | 546 | *sample = cputime.utime + cputime.stime; |
3997ad31 PZ |
547 | break; |
548 | case CPUCLOCK_VIRT: | |
ebd7e7fc | 549 | *sample = cputime.utime; |
3997ad31 PZ |
550 | break; |
551 | case CPUCLOCK_SCHED: | |
23cfa361 | 552 | *sample = cputime.sum_exec_runtime; |
3997ad31 PZ |
553 | break; |
554 | } | |
555 | return 0; | |
556 | } | |
557 | ||
1da177e4 LT |
558 | /* |
559 | * Guts of sys_timer_settime for CPU timers. | |
560 | * This is called with the timer locked and interrupts disabled. | |
561 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | |
562 | * and try again. (This happens when the timer is in the middle of firing.) | |
563 | */ | |
e73d84e3 | 564 | static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, |
bc2c8ea4 | 565 | struct itimerspec *new, struct itimerspec *old) |
1da177e4 | 566 | { |
e73d84e3 FW |
567 | unsigned long flags; |
568 | struct sighand_struct *sighand; | |
1da177e4 | 569 | struct task_struct *p = timer->it.cpu.task; |
ebd7e7fc | 570 | u64 old_expires, new_expires, old_incr, val; |
1da177e4 LT |
571 | int ret; |
572 | ||
a3222f88 | 573 | WARN_ON_ONCE(p == NULL); |
1da177e4 | 574 | |
ebd7e7fc | 575 | new_expires = timespec_to_ns(&new->it_value); |
1da177e4 | 576 | |
1da177e4 | 577 | /* |
e73d84e3 FW |
578 | * Protect against sighand release/switch in exit/exec and p->cpu_timers |
579 | * and p->signal->cpu_timers read/write in arm_timer() | |
580 | */ | |
581 | sighand = lock_task_sighand(p, &flags); | |
582 | /* | |
583 | * If p has just been reaped, we can no | |
1da177e4 LT |
584 | * longer get any information about it at all. |
585 | */ | |
e73d84e3 | 586 | if (unlikely(sighand == NULL)) { |
1da177e4 LT |
587 | return -ESRCH; |
588 | } | |
589 | ||
590 | /* | |
591 | * Disarm any old timer after extracting its expiry time. | |
592 | */ | |
531f64fd | 593 | WARN_ON_ONCE(!irqs_disabled()); |
a69ac4a7 ON |
594 | |
595 | ret = 0; | |
ae1a78ee | 596 | old_incr = timer->it.cpu.incr; |
1da177e4 | 597 | old_expires = timer->it.cpu.expires; |
a69ac4a7 ON |
598 | if (unlikely(timer->it.cpu.firing)) { |
599 | timer->it.cpu.firing = -1; | |
600 | ret = TIMER_RETRY; | |
601 | } else | |
602 | list_del_init(&timer->it.cpu.entry); | |
1da177e4 LT |
603 | |
604 | /* | |
605 | * We need to sample the current value to convert the new | |
606 | * value from to relative and absolute, and to convert the | |
607 | * old value from absolute to relative. To set a process | |
608 | * timer, we need a sample to balance the thread expiry | |
609 | * times (in arm_timer). With an absolute time, we must | |
610 | * check if it's already passed. In short, we need a sample. | |
611 | */ | |
612 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | |
613 | cpu_clock_sample(timer->it_clock, p, &val); | |
614 | } else { | |
3997ad31 | 615 | cpu_timer_sample_group(timer->it_clock, p, &val); |
1da177e4 LT |
616 | } |
617 | ||
618 | if (old) { | |
55ccb616 | 619 | if (old_expires == 0) { |
1da177e4 LT |
620 | old->it_value.tv_sec = 0; |
621 | old->it_value.tv_nsec = 0; | |
622 | } else { | |
623 | /* | |
624 | * Update the timer in case it has | |
625 | * overrun already. If it has, | |
626 | * we'll report it as having overrun | |
627 | * and with the next reloaded timer | |
628 | * already ticking, though we are | |
629 | * swallowing that pending | |
630 | * notification here to install the | |
631 | * new setting. | |
632 | */ | |
633 | bump_cpu_timer(timer, val); | |
55ccb616 FW |
634 | if (val < timer->it.cpu.expires) { |
635 | old_expires = timer->it.cpu.expires - val; | |
ebd7e7fc | 636 | old->it_value = ns_to_timespec(old_expires); |
1da177e4 LT |
637 | } else { |
638 | old->it_value.tv_nsec = 1; | |
639 | old->it_value.tv_sec = 0; | |
640 | } | |
641 | } | |
642 | } | |
643 | ||
a69ac4a7 | 644 | if (unlikely(ret)) { |
1da177e4 LT |
645 | /* |
646 | * We are colliding with the timer actually firing. | |
647 | * Punt after filling in the timer's old value, and | |
648 | * disable this firing since we are already reporting | |
649 | * it as an overrun (thanks to bump_cpu_timer above). | |
650 | */ | |
e73d84e3 | 651 | unlock_task_sighand(p, &flags); |
1da177e4 LT |
652 | goto out; |
653 | } | |
654 | ||
e73d84e3 | 655 | if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { |
55ccb616 | 656 | new_expires += val; |
1da177e4 LT |
657 | } |
658 | ||
659 | /* | |
660 | * Install the new expiry time (or zero). | |
661 | * For a timer with no notification action, we don't actually | |
662 | * arm the timer (we'll just fake it for timer_gettime). | |
663 | */ | |
664 | timer->it.cpu.expires = new_expires; | |
55ccb616 | 665 | if (new_expires != 0 && val < new_expires) { |
5eb9aa64 | 666 | arm_timer(timer); |
1da177e4 LT |
667 | } |
668 | ||
e73d84e3 | 669 | unlock_task_sighand(p, &flags); |
1da177e4 LT |
670 | /* |
671 | * Install the new reload setting, and | |
672 | * set up the signal and overrun bookkeeping. | |
673 | */ | |
ebd7e7fc | 674 | timer->it.cpu.incr = timespec_to_ns(&new->it_interval); |
1da177e4 LT |
675 | |
676 | /* | |
677 | * This acts as a modification timestamp for the timer, | |
678 | * so any automatic reload attempt will punt on seeing | |
679 | * that we have reset the timer manually. | |
680 | */ | |
681 | timer->it_requeue_pending = (timer->it_requeue_pending + 2) & | |
682 | ~REQUEUE_PENDING; | |
683 | timer->it_overrun_last = 0; | |
684 | timer->it_overrun = -1; | |
685 | ||
55ccb616 | 686 | if (new_expires != 0 && !(val < new_expires)) { |
1da177e4 LT |
687 | /* |
688 | * The designated time already passed, so we notify | |
689 | * immediately, even if the thread never runs to | |
690 | * accumulate more time on this clock. | |
691 | */ | |
692 | cpu_timer_fire(timer); | |
693 | } | |
694 | ||
695 | ret = 0; | |
696 | out: | |
ebd7e7fc FW |
697 | if (old) |
698 | old->it_interval = ns_to_timespec(old_incr); | |
b7878300 | 699 | |
1da177e4 LT |
700 | return ret; |
701 | } | |
702 | ||
bc2c8ea4 | 703 | static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) |
1da177e4 | 704 | { |
ebd7e7fc | 705 | u64 now; |
1da177e4 | 706 | struct task_struct *p = timer->it.cpu.task; |
1da177e4 | 707 | |
a3222f88 FW |
708 | WARN_ON_ONCE(p == NULL); |
709 | ||
1da177e4 LT |
710 | /* |
711 | * Easy part: convert the reload time. | |
712 | */ | |
ebd7e7fc | 713 | itp->it_interval = ns_to_timespec(timer->it.cpu.incr); |
1da177e4 | 714 | |
55ccb616 | 715 | if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ |
1da177e4 LT |
716 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
717 | return; | |
718 | } | |
719 | ||
1da177e4 LT |
720 | /* |
721 | * Sample the clock to take the difference with the expiry time. | |
722 | */ | |
723 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | |
724 | cpu_clock_sample(timer->it_clock, p, &now); | |
1da177e4 | 725 | } else { |
e73d84e3 FW |
726 | struct sighand_struct *sighand; |
727 | unsigned long flags; | |
728 | ||
729 | /* | |
730 | * Protect against sighand release/switch in exit/exec and | |
731 | * also make timer sampling safe if it ends up calling | |
ebd7e7fc | 732 | * thread_group_cputime(). |
e73d84e3 FW |
733 | */ |
734 | sighand = lock_task_sighand(p, &flags); | |
735 | if (unlikely(sighand == NULL)) { | |
1da177e4 LT |
736 | /* |
737 | * The process has been reaped. | |
738 | * We can't even collect a sample any more. | |
739 | * Call the timer disarmed, nothing else to do. | |
740 | */ | |
55ccb616 | 741 | timer->it.cpu.expires = 0; |
ebd7e7fc | 742 | itp->it_value = ns_to_timespec(timer->it.cpu.expires); |
2c13ce8f | 743 | return; |
1da177e4 | 744 | } else { |
3997ad31 | 745 | cpu_timer_sample_group(timer->it_clock, p, &now); |
e73d84e3 | 746 | unlock_task_sighand(p, &flags); |
1da177e4 | 747 | } |
1da177e4 LT |
748 | } |
749 | ||
55ccb616 | 750 | if (now < timer->it.cpu.expires) { |
ebd7e7fc | 751 | itp->it_value = ns_to_timespec(timer->it.cpu.expires - now); |
1da177e4 LT |
752 | } else { |
753 | /* | |
754 | * The timer should have expired already, but the firing | |
755 | * hasn't taken place yet. Say it's just about to expire. | |
756 | */ | |
757 | itp->it_value.tv_nsec = 1; | |
758 | itp->it_value.tv_sec = 0; | |
759 | } | |
760 | } | |
761 | ||
2473f3e7 FW |
762 | static unsigned long long |
763 | check_timers_list(struct list_head *timers, | |
764 | struct list_head *firing, | |
765 | unsigned long long curr) | |
766 | { | |
767 | int maxfire = 20; | |
768 | ||
769 | while (!list_empty(timers)) { | |
770 | struct cpu_timer_list *t; | |
771 | ||
772 | t = list_first_entry(timers, struct cpu_timer_list, entry); | |
773 | ||
774 | if (!--maxfire || curr < t->expires) | |
775 | return t->expires; | |
776 | ||
777 | t->firing = 1; | |
778 | list_move_tail(&t->entry, firing); | |
779 | } | |
780 | ||
781 | return 0; | |
782 | } | |
783 | ||
1da177e4 LT |
784 | /* |
785 | * Check for any per-thread CPU timers that have fired and move them off | |
786 | * the tsk->cpu_timers[N] list onto the firing list. Here we update the | |
787 | * tsk->it_*_expires values to reflect the remaining thread CPU timers. | |
788 | */ | |
789 | static void check_thread_timers(struct task_struct *tsk, | |
790 | struct list_head *firing) | |
791 | { | |
792 | struct list_head *timers = tsk->cpu_timers; | |
78f2c7db | 793 | struct signal_struct *const sig = tsk->signal; |
ebd7e7fc FW |
794 | struct task_cputime *tsk_expires = &tsk->cputime_expires; |
795 | u64 expires; | |
d4bb5274 | 796 | unsigned long soft; |
1da177e4 | 797 | |
934715a1 JL |
798 | /* |
799 | * If cputime_expires is zero, then there are no active | |
800 | * per thread CPU timers. | |
801 | */ | |
802 | if (task_cputime_zero(&tsk->cputime_expires)) | |
803 | return; | |
804 | ||
2473f3e7 | 805 | expires = check_timers_list(timers, firing, prof_ticks(tsk)); |
ebd7e7fc | 806 | tsk_expires->prof_exp = expires; |
1da177e4 | 807 | |
2473f3e7 | 808 | expires = check_timers_list(++timers, firing, virt_ticks(tsk)); |
ebd7e7fc | 809 | tsk_expires->virt_exp = expires; |
1da177e4 | 810 | |
2473f3e7 FW |
811 | tsk_expires->sched_exp = check_timers_list(++timers, firing, |
812 | tsk->se.sum_exec_runtime); | |
78f2c7db PZ |
813 | |
814 | /* | |
815 | * Check for the special case thread timers. | |
816 | */ | |
316c1608 | 817 | soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); |
d4bb5274 | 818 | if (soft != RLIM_INFINITY) { |
78d7d407 | 819 | unsigned long hard = |
316c1608 | 820 | READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); |
78f2c7db | 821 | |
5a52dd50 PZ |
822 | if (hard != RLIM_INFINITY && |
823 | tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { | |
78f2c7db PZ |
824 | /* |
825 | * At the hard limit, we just die. | |
826 | * No need to calculate anything else now. | |
827 | */ | |
828 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | |
829 | return; | |
830 | } | |
d4bb5274 | 831 | if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { |
78f2c7db PZ |
832 | /* |
833 | * At the soft limit, send a SIGXCPU every second. | |
834 | */ | |
d4bb5274 JS |
835 | if (soft < hard) { |
836 | soft += USEC_PER_SEC; | |
837 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; | |
78f2c7db | 838 | } |
81d50bb2 HS |
839 | printk(KERN_INFO |
840 | "RT Watchdog Timeout: %s[%d]\n", | |
841 | tsk->comm, task_pid_nr(tsk)); | |
78f2c7db PZ |
842 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
843 | } | |
844 | } | |
b7878300 FW |
845 | if (task_cputime_zero(tsk_expires)) |
846 | tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); | |
1da177e4 LT |
847 | } |
848 | ||
1018016c | 849 | static inline void stop_process_timers(struct signal_struct *sig) |
3fccfd67 | 850 | { |
15365c10 | 851 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
3fccfd67 | 852 | |
1018016c | 853 | /* Turn off cputimer->running. This is done without locking. */ |
d5c373eb | 854 | WRITE_ONCE(cputimer->running, false); |
b7878300 | 855 | tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); |
3fccfd67 PZ |
856 | } |
857 | ||
42c4ab41 | 858 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
ebd7e7fc | 859 | u64 *expires, u64 cur_time, int signo) |
42c4ab41 | 860 | { |
64861634 | 861 | if (!it->expires) |
42c4ab41 SG |
862 | return; |
863 | ||
858cf3a8 FW |
864 | if (cur_time >= it->expires) { |
865 | if (it->incr) | |
64861634 | 866 | it->expires += it->incr; |
858cf3a8 | 867 | else |
64861634 | 868 | it->expires = 0; |
42c4ab41 | 869 | |
3f0a525e XG |
870 | trace_itimer_expire(signo == SIGPROF ? |
871 | ITIMER_PROF : ITIMER_VIRTUAL, | |
872 | tsk->signal->leader_pid, cur_time); | |
42c4ab41 SG |
873 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); |
874 | } | |
875 | ||
858cf3a8 FW |
876 | if (it->expires && (!*expires || it->expires < *expires)) |
877 | *expires = it->expires; | |
42c4ab41 SG |
878 | } |
879 | ||
1da177e4 LT |
880 | /* |
881 | * Check for any per-thread CPU timers that have fired and move them | |
882 | * off the tsk->*_timers list onto the firing list. Per-thread timers | |
883 | * have already been taken off. | |
884 | */ | |
885 | static void check_process_timers(struct task_struct *tsk, | |
886 | struct list_head *firing) | |
887 | { | |
888 | struct signal_struct *const sig = tsk->signal; | |
ebd7e7fc FW |
889 | u64 utime, ptime, virt_expires, prof_expires; |
890 | u64 sum_sched_runtime, sched_expires; | |
1da177e4 | 891 | struct list_head *timers = sig->cpu_timers; |
ebd7e7fc | 892 | struct task_cputime cputime; |
d4bb5274 | 893 | unsigned long soft; |
1da177e4 | 894 | |
934715a1 JL |
895 | /* |
896 | * If cputimer is not running, then there are no active | |
897 | * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). | |
898 | */ | |
899 | if (!READ_ONCE(tsk->signal->cputimer.running)) | |
900 | return; | |
901 | ||
c8d75aa4 JL |
902 | /* |
903 | * Signify that a thread is checking for process timers. | |
904 | * Write access to this field is protected by the sighand lock. | |
905 | */ | |
906 | sig->cputimer.checking_timer = true; | |
907 | ||
1da177e4 LT |
908 | /* |
909 | * Collect the current process totals. | |
910 | */ | |
4cd4c1b4 | 911 | thread_group_cputimer(tsk, &cputime); |
ebd7e7fc FW |
912 | utime = cputime.utime; |
913 | ptime = utime + cputime.stime; | |
f06febc9 | 914 | sum_sched_runtime = cputime.sum_exec_runtime; |
1da177e4 | 915 | |
2473f3e7 FW |
916 | prof_expires = check_timers_list(timers, firing, ptime); |
917 | virt_expires = check_timers_list(++timers, firing, utime); | |
918 | sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); | |
1da177e4 LT |
919 | |
920 | /* | |
921 | * Check for the special case process timers. | |
922 | */ | |
42c4ab41 SG |
923 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, |
924 | SIGPROF); | |
925 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, | |
926 | SIGVTALRM); | |
316c1608 | 927 | soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
d4bb5274 | 928 | if (soft != RLIM_INFINITY) { |
ebd7e7fc | 929 | unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); |
78d7d407 | 930 | unsigned long hard = |
316c1608 | 931 | READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); |
ebd7e7fc | 932 | u64 x; |
d4bb5274 | 933 | if (psecs >= hard) { |
1da177e4 LT |
934 | /* |
935 | * At the hard limit, we just die. | |
936 | * No need to calculate anything else now. | |
937 | */ | |
938 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | |
939 | return; | |
940 | } | |
d4bb5274 | 941 | if (psecs >= soft) { |
1da177e4 LT |
942 | /* |
943 | * At the soft limit, send a SIGXCPU every second. | |
944 | */ | |
945 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | |
d4bb5274 JS |
946 | if (soft < hard) { |
947 | soft++; | |
948 | sig->rlim[RLIMIT_CPU].rlim_cur = soft; | |
1da177e4 LT |
949 | } |
950 | } | |
ebd7e7fc FW |
951 | x = soft * NSEC_PER_SEC; |
952 | if (!prof_expires || x < prof_expires) | |
1da177e4 | 953 | prof_expires = x; |
1da177e4 LT |
954 | } |
955 | ||
ebd7e7fc FW |
956 | sig->cputime_expires.prof_exp = prof_expires; |
957 | sig->cputime_expires.virt_exp = virt_expires; | |
29f87b79 SG |
958 | sig->cputime_expires.sched_exp = sched_expires; |
959 | if (task_cputime_zero(&sig->cputime_expires)) | |
960 | stop_process_timers(sig); | |
c8d75aa4 JL |
961 | |
962 | sig->cputimer.checking_timer = false; | |
1da177e4 LT |
963 | } |
964 | ||
965 | /* | |
966 | * This is called from the signal code (via do_schedule_next_timer) | |
967 | * when the last timer signal was delivered and we have to reload the timer. | |
968 | */ | |
969 | void posix_cpu_timer_schedule(struct k_itimer *timer) | |
970 | { | |
e73d84e3 FW |
971 | struct sighand_struct *sighand; |
972 | unsigned long flags; | |
1da177e4 | 973 | struct task_struct *p = timer->it.cpu.task; |
ebd7e7fc | 974 | u64 now; |
1da177e4 | 975 | |
a3222f88 | 976 | WARN_ON_ONCE(p == NULL); |
1da177e4 LT |
977 | |
978 | /* | |
979 | * Fetch the current sample and update the timer's expiry time. | |
980 | */ | |
981 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | |
982 | cpu_clock_sample(timer->it_clock, p, &now); | |
983 | bump_cpu_timer(timer, now); | |
724a3713 | 984 | if (unlikely(p->exit_state)) |
708f430d | 985 | goto out; |
724a3713 | 986 | |
e73d84e3 FW |
987 | /* Protect timer list r/w in arm_timer() */ |
988 | sighand = lock_task_sighand(p, &flags); | |
989 | if (!sighand) | |
990 | goto out; | |
1da177e4 | 991 | } else { |
e73d84e3 FW |
992 | /* |
993 | * Protect arm_timer() and timer sampling in case of call to | |
ebd7e7fc | 994 | * thread_group_cputime(). |
e73d84e3 FW |
995 | */ |
996 | sighand = lock_task_sighand(p, &flags); | |
997 | if (unlikely(sighand == NULL)) { | |
1da177e4 LT |
998 | /* |
999 | * The process has been reaped. | |
1000 | * We can't even collect a sample any more. | |
1001 | */ | |
55ccb616 | 1002 | timer->it.cpu.expires = 0; |
c925077c | 1003 | goto out; |
1da177e4 | 1004 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
e73d84e3 | 1005 | unlock_task_sighand(p, &flags); |
d430b917 | 1006 | /* Optimizations: if the process is dying, no need to rearm */ |
c925077c | 1007 | goto out; |
1da177e4 | 1008 | } |
3997ad31 | 1009 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1da177e4 | 1010 | bump_cpu_timer(timer, now); |
e73d84e3 | 1011 | /* Leave the sighand locked for the call below. */ |
1da177e4 LT |
1012 | } |
1013 | ||
1014 | /* | |
1015 | * Now re-arm for the new expiry time. | |
1016 | */ | |
531f64fd | 1017 | WARN_ON_ONCE(!irqs_disabled()); |
5eb9aa64 | 1018 | arm_timer(timer); |
e73d84e3 | 1019 | unlock_task_sighand(p, &flags); |
708f430d RM |
1020 | |
1021 | out: | |
1022 | timer->it_overrun_last = timer->it_overrun; | |
1023 | timer->it_overrun = -1; | |
1024 | ++timer->it_requeue_pending; | |
1da177e4 LT |
1025 | } |
1026 | ||
f06febc9 FM |
1027 | /** |
1028 | * task_cputime_expired - Compare two task_cputime entities. | |
1029 | * | |
1030 | * @sample: The task_cputime structure to be checked for expiration. | |
1031 | * @expires: Expiration times, against which @sample will be checked. | |
1032 | * | |
1033 | * Checks @sample against @expires to see if any field of @sample has expired. | |
1034 | * Returns true if any field of the former is greater than the corresponding | |
1035 | * field of the latter if the latter field is set. Otherwise returns false. | |
1036 | */ | |
ebd7e7fc FW |
1037 | static inline int task_cputime_expired(const struct task_cputime *sample, |
1038 | const struct task_cputime *expires) | |
f06febc9 | 1039 | { |
64861634 | 1040 | if (expires->utime && sample->utime >= expires->utime) |
f06febc9 | 1041 | return 1; |
64861634 | 1042 | if (expires->stime && sample->utime + sample->stime >= expires->stime) |
f06febc9 FM |
1043 | return 1; |
1044 | if (expires->sum_exec_runtime != 0 && | |
1045 | sample->sum_exec_runtime >= expires->sum_exec_runtime) | |
1046 | return 1; | |
1047 | return 0; | |
1048 | } | |
1049 | ||
1050 | /** | |
1051 | * fastpath_timer_check - POSIX CPU timers fast path. | |
1052 | * | |
1053 | * @tsk: The task (thread) being checked. | |
f06febc9 | 1054 | * |
bb34d92f FM |
1055 | * Check the task and thread group timers. If both are zero (there are no |
1056 | * timers set) return false. Otherwise snapshot the task and thread group | |
1057 | * timers and compare them with the corresponding expiration times. Return | |
1058 | * true if a timer has expired, else return false. | |
f06febc9 | 1059 | */ |
bb34d92f | 1060 | static inline int fastpath_timer_check(struct task_struct *tsk) |
f06febc9 | 1061 | { |
ad133ba3 | 1062 | struct signal_struct *sig; |
bb34d92f | 1063 | |
bb34d92f | 1064 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
ebd7e7fc | 1065 | struct task_cputime task_sample; |
bb34d92f | 1066 | |
ebd7e7fc | 1067 | task_cputime(tsk, &task_sample.utime, &task_sample.stime); |
7c177d99 | 1068 | task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; |
bb34d92f FM |
1069 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
1070 | return 1; | |
1071 | } | |
ad133ba3 ON |
1072 | |
1073 | sig = tsk->signal; | |
c8d75aa4 JL |
1074 | /* |
1075 | * Check if thread group timers expired when the cputimer is | |
1076 | * running and no other thread in the group is already checking | |
1077 | * for thread group cputimers. These fields are read without the | |
1078 | * sighand lock. However, this is fine because this is meant to | |
1079 | * be a fastpath heuristic to determine whether we should try to | |
1080 | * acquire the sighand lock to check/handle timers. | |
1081 | * | |
1082 | * In the worst case scenario, if 'running' or 'checking_timer' gets | |
1083 | * set but the current thread doesn't see the change yet, we'll wait | |
1084 | * until the next thread in the group gets a scheduler interrupt to | |
1085 | * handle the timer. This isn't an issue in practice because these | |
1086 | * types of delays with signals actually getting sent are expected. | |
1087 | */ | |
1088 | if (READ_ONCE(sig->cputimer.running) && | |
1089 | !READ_ONCE(sig->cputimer.checking_timer)) { | |
ebd7e7fc | 1090 | struct task_cputime group_sample; |
bb34d92f | 1091 | |
71107445 | 1092 | sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); |
8d1f431c | 1093 | |
bb34d92f FM |
1094 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
1095 | return 1; | |
1096 | } | |
37bebc70 | 1097 | |
f55db609 | 1098 | return 0; |
f06febc9 FM |
1099 | } |
1100 | ||
1da177e4 LT |
1101 | /* |
1102 | * This is called from the timer interrupt handler. The irq handler has | |
1103 | * already updated our counts. We need to check if any timers fire now. | |
1104 | * Interrupts are disabled. | |
1105 | */ | |
1106 | void run_posix_cpu_timers(struct task_struct *tsk) | |
1107 | { | |
1108 | LIST_HEAD(firing); | |
1109 | struct k_itimer *timer, *next; | |
0bdd2ed4 | 1110 | unsigned long flags; |
1da177e4 | 1111 | |
531f64fd | 1112 | WARN_ON_ONCE(!irqs_disabled()); |
1da177e4 | 1113 | |
1da177e4 | 1114 | /* |
f06febc9 | 1115 | * The fast path checks that there are no expired thread or thread |
bb34d92f | 1116 | * group timers. If that's so, just return. |
1da177e4 | 1117 | */ |
bb34d92f | 1118 | if (!fastpath_timer_check(tsk)) |
f06febc9 | 1119 | return; |
5ce73a4a | 1120 | |
0bdd2ed4 ON |
1121 | if (!lock_task_sighand(tsk, &flags)) |
1122 | return; | |
bb34d92f FM |
1123 | /* |
1124 | * Here we take off tsk->signal->cpu_timers[N] and | |
1125 | * tsk->cpu_timers[N] all the timers that are firing, and | |
1126 | * put them on the firing list. | |
1127 | */ | |
1128 | check_thread_timers(tsk, &firing); | |
934715a1 JL |
1129 | |
1130 | check_process_timers(tsk, &firing); | |
1da177e4 | 1131 | |
bb34d92f FM |
1132 | /* |
1133 | * We must release these locks before taking any timer's lock. | |
1134 | * There is a potential race with timer deletion here, as the | |
1135 | * siglock now protects our private firing list. We have set | |
1136 | * the firing flag in each timer, so that a deletion attempt | |
1137 | * that gets the timer lock before we do will give it up and | |
1138 | * spin until we've taken care of that timer below. | |
1139 | */ | |
0bdd2ed4 | 1140 | unlock_task_sighand(tsk, &flags); |
1da177e4 LT |
1141 | |
1142 | /* | |
1143 | * Now that all the timers on our list have the firing flag, | |
25985edc | 1144 | * no one will touch their list entries but us. We'll take |
1da177e4 LT |
1145 | * each timer's lock before clearing its firing flag, so no |
1146 | * timer call will interfere. | |
1147 | */ | |
1148 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { | |
6e85c5ba HS |
1149 | int cpu_firing; |
1150 | ||
1da177e4 LT |
1151 | spin_lock(&timer->it_lock); |
1152 | list_del_init(&timer->it.cpu.entry); | |
6e85c5ba | 1153 | cpu_firing = timer->it.cpu.firing; |
1da177e4 LT |
1154 | timer->it.cpu.firing = 0; |
1155 | /* | |
1156 | * The firing flag is -1 if we collided with a reset | |
1157 | * of the timer, which already reported this | |
1158 | * almost-firing as an overrun. So don't generate an event. | |
1159 | */ | |
6e85c5ba | 1160 | if (likely(cpu_firing >= 0)) |
1da177e4 | 1161 | cpu_timer_fire(timer); |
1da177e4 LT |
1162 | spin_unlock(&timer->it_lock); |
1163 | } | |
1164 | } | |
1165 | ||
1166 | /* | |
f55db609 | 1167 | * Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
f06febc9 | 1168 | * The tsk->sighand->siglock must be held by the caller. |
1da177e4 LT |
1169 | */ |
1170 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |
858cf3a8 | 1171 | u64 *newval, u64 *oldval) |
1da177e4 | 1172 | { |
858cf3a8 | 1173 | u64 now; |
1da177e4 | 1174 | |
531f64fd | 1175 | WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); |
4cd4c1b4 | 1176 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1da177e4 LT |
1177 | |
1178 | if (oldval) { | |
f55db609 SG |
1179 | /* |
1180 | * We are setting itimer. The *oldval is absolute and we update | |
1181 | * it to be relative, *newval argument is relative and we update | |
1182 | * it to be absolute. | |
1183 | */ | |
64861634 | 1184 | if (*oldval) { |
858cf3a8 | 1185 | if (*oldval <= now) { |
1da177e4 | 1186 | /* Just about to fire. */ |
858cf3a8 | 1187 | *oldval = TICK_NSEC; |
1da177e4 | 1188 | } else { |
858cf3a8 | 1189 | *oldval -= now; |
1da177e4 LT |
1190 | } |
1191 | } | |
1192 | ||
64861634 | 1193 | if (!*newval) |
b7878300 | 1194 | return; |
858cf3a8 | 1195 | *newval += now; |
1da177e4 LT |
1196 | } |
1197 | ||
1198 | /* | |
f55db609 SG |
1199 | * Update expiration cache if we are the earliest timer, or eventually |
1200 | * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. | |
1da177e4 | 1201 | */ |
f55db609 SG |
1202 | switch (clock_idx) { |
1203 | case CPUCLOCK_PROF: | |
858cf3a8 FW |
1204 | if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) |
1205 | tsk->signal->cputime_expires.prof_exp = *newval; | |
f55db609 SG |
1206 | break; |
1207 | case CPUCLOCK_VIRT: | |
858cf3a8 FW |
1208 | if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) |
1209 | tsk->signal->cputime_expires.virt_exp = *newval; | |
f55db609 | 1210 | break; |
1da177e4 | 1211 | } |
b7878300 FW |
1212 | |
1213 | tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); | |
1da177e4 LT |
1214 | } |
1215 | ||
e4b76555 TA |
1216 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1217 | struct timespec *rqtp, struct itimerspec *it) | |
1da177e4 | 1218 | { |
1da177e4 LT |
1219 | struct k_itimer timer; |
1220 | int error; | |
1221 | ||
1da177e4 LT |
1222 | /* |
1223 | * Set up a temporary timer and then wait for it to go off. | |
1224 | */ | |
1225 | memset(&timer, 0, sizeof timer); | |
1226 | spin_lock_init(&timer.it_lock); | |
1227 | timer.it_clock = which_clock; | |
1228 | timer.it_overrun = -1; | |
1229 | error = posix_cpu_timer_create(&timer); | |
1230 | timer.it_process = current; | |
1231 | if (!error) { | |
1da177e4 | 1232 | static struct itimerspec zero_it; |
e4b76555 TA |
1233 | |
1234 | memset(it, 0, sizeof *it); | |
1235 | it->it_value = *rqtp; | |
1da177e4 LT |
1236 | |
1237 | spin_lock_irq(&timer.it_lock); | |
e4b76555 | 1238 | error = posix_cpu_timer_set(&timer, flags, it, NULL); |
1da177e4 LT |
1239 | if (error) { |
1240 | spin_unlock_irq(&timer.it_lock); | |
1241 | return error; | |
1242 | } | |
1243 | ||
1244 | while (!signal_pending(current)) { | |
55ccb616 | 1245 | if (timer.it.cpu.expires == 0) { |
1da177e4 | 1246 | /* |
e6c42c29 SG |
1247 | * Our timer fired and was reset, below |
1248 | * deletion can not fail. | |
1da177e4 | 1249 | */ |
e6c42c29 | 1250 | posix_cpu_timer_del(&timer); |
1da177e4 LT |
1251 | spin_unlock_irq(&timer.it_lock); |
1252 | return 0; | |
1253 | } | |
1254 | ||
1255 | /* | |
1256 | * Block until cpu_timer_fire (or a signal) wakes us. | |
1257 | */ | |
1258 | __set_current_state(TASK_INTERRUPTIBLE); | |
1259 | spin_unlock_irq(&timer.it_lock); | |
1260 | schedule(); | |
1261 | spin_lock_irq(&timer.it_lock); | |
1262 | } | |
1263 | ||
1264 | /* | |
1265 | * We were interrupted by a signal. | |
1266 | */ | |
ebd7e7fc | 1267 | *rqtp = ns_to_timespec(timer.it.cpu.expires); |
e6c42c29 SG |
1268 | error = posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1269 | if (!error) { | |
1270 | /* | |
1271 | * Timer is now unarmed, deletion can not fail. | |
1272 | */ | |
1273 | posix_cpu_timer_del(&timer); | |
1274 | } | |
1da177e4 LT |
1275 | spin_unlock_irq(&timer.it_lock); |
1276 | ||
e6c42c29 SG |
1277 | while (error == TIMER_RETRY) { |
1278 | /* | |
1279 | * We need to handle case when timer was or is in the | |
1280 | * middle of firing. In other cases we already freed | |
1281 | * resources. | |
1282 | */ | |
1283 | spin_lock_irq(&timer.it_lock); | |
1284 | error = posix_cpu_timer_del(&timer); | |
1285 | spin_unlock_irq(&timer.it_lock); | |
1286 | } | |
1287 | ||
e4b76555 | 1288 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1da177e4 LT |
1289 | /* |
1290 | * It actually did fire already. | |
1291 | */ | |
1292 | return 0; | |
1293 | } | |
1294 | ||
e4b76555 TA |
1295 | error = -ERESTART_RESTARTBLOCK; |
1296 | } | |
1297 | ||
1298 | return error; | |
1299 | } | |
1300 | ||
bc2c8ea4 TG |
1301 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block); |
1302 | ||
1303 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | |
1304 | struct timespec *rqtp, struct timespec __user *rmtp) | |
e4b76555 | 1305 | { |
f56141e3 | 1306 | struct restart_block *restart_block = ¤t->restart_block; |
e4b76555 TA |
1307 | struct itimerspec it; |
1308 | int error; | |
1309 | ||
1310 | /* | |
1311 | * Diagnose required errors first. | |
1312 | */ | |
1313 | if (CPUCLOCK_PERTHREAD(which_clock) && | |
1314 | (CPUCLOCK_PID(which_clock) == 0 || | |
01a21974 | 1315 | CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) |
e4b76555 TA |
1316 | return -EINVAL; |
1317 | ||
1318 | error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); | |
1319 | ||
1320 | if (error == -ERESTART_RESTARTBLOCK) { | |
1321 | ||
3751f9f2 | 1322 | if (flags & TIMER_ABSTIME) |
e4b76555 | 1323 | return -ERESTARTNOHAND; |
1da177e4 | 1324 | /* |
3751f9f2 TG |
1325 | * Report back to the user the time still remaining. |
1326 | */ | |
1327 | if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) | |
1da177e4 LT |
1328 | return -EFAULT; |
1329 | ||
1711ef38 | 1330 | restart_block->fn = posix_cpu_nsleep_restart; |
ab8177bc | 1331 | restart_block->nanosleep.clockid = which_clock; |
3751f9f2 TG |
1332 | restart_block->nanosleep.rmtp = rmtp; |
1333 | restart_block->nanosleep.expires = timespec_to_ns(rqtp); | |
1da177e4 | 1334 | } |
1da177e4 LT |
1335 | return error; |
1336 | } | |
1337 | ||
bc2c8ea4 | 1338 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1da177e4 | 1339 | { |
ab8177bc | 1340 | clockid_t which_clock = restart_block->nanosleep.clockid; |
97735f25 | 1341 | struct timespec t; |
e4b76555 TA |
1342 | struct itimerspec it; |
1343 | int error; | |
97735f25 | 1344 | |
3751f9f2 | 1345 | t = ns_to_timespec(restart_block->nanosleep.expires); |
97735f25 | 1346 | |
e4b76555 TA |
1347 | error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); |
1348 | ||
1349 | if (error == -ERESTART_RESTARTBLOCK) { | |
3751f9f2 | 1350 | struct timespec __user *rmtp = restart_block->nanosleep.rmtp; |
e4b76555 | 1351 | /* |
3751f9f2 TG |
1352 | * Report back to the user the time still remaining. |
1353 | */ | |
1354 | if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) | |
e4b76555 TA |
1355 | return -EFAULT; |
1356 | ||
3751f9f2 | 1357 | restart_block->nanosleep.expires = timespec_to_ns(&t); |
e4b76555 TA |
1358 | } |
1359 | return error; | |
1360 | ||
1da177e4 LT |
1361 | } |
1362 | ||
1da177e4 LT |
1363 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) |
1364 | #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) | |
1365 | ||
a924b04d TG |
1366 | static int process_cpu_clock_getres(const clockid_t which_clock, |
1367 | struct timespec *tp) | |
1da177e4 LT |
1368 | { |
1369 | return posix_cpu_clock_getres(PROCESS_CLOCK, tp); | |
1370 | } | |
a924b04d TG |
1371 | static int process_cpu_clock_get(const clockid_t which_clock, |
1372 | struct timespec *tp) | |
1da177e4 LT |
1373 | { |
1374 | return posix_cpu_clock_get(PROCESS_CLOCK, tp); | |
1375 | } | |
1376 | static int process_cpu_timer_create(struct k_itimer *timer) | |
1377 | { | |
1378 | timer->it_clock = PROCESS_CLOCK; | |
1379 | return posix_cpu_timer_create(timer); | |
1380 | } | |
a924b04d | 1381 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
97735f25 TG |
1382 | struct timespec *rqtp, |
1383 | struct timespec __user *rmtp) | |
1da177e4 | 1384 | { |
97735f25 | 1385 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); |
1da177e4 | 1386 | } |
1711ef38 TA |
1387 | static long process_cpu_nsleep_restart(struct restart_block *restart_block) |
1388 | { | |
1389 | return -EINVAL; | |
1390 | } | |
a924b04d TG |
1391 | static int thread_cpu_clock_getres(const clockid_t which_clock, |
1392 | struct timespec *tp) | |
1da177e4 LT |
1393 | { |
1394 | return posix_cpu_clock_getres(THREAD_CLOCK, tp); | |
1395 | } | |
a924b04d TG |
1396 | static int thread_cpu_clock_get(const clockid_t which_clock, |
1397 | struct timespec *tp) | |
1da177e4 LT |
1398 | { |
1399 | return posix_cpu_clock_get(THREAD_CLOCK, tp); | |
1400 | } | |
1401 | static int thread_cpu_timer_create(struct k_itimer *timer) | |
1402 | { | |
1403 | timer->it_clock = THREAD_CLOCK; | |
1404 | return posix_cpu_timer_create(timer); | |
1405 | } | |
1da177e4 | 1406 | |
1976945e TG |
1407 | struct k_clock clock_posix_cpu = { |
1408 | .clock_getres = posix_cpu_clock_getres, | |
1409 | .clock_set = posix_cpu_clock_set, | |
1410 | .clock_get = posix_cpu_clock_get, | |
1411 | .timer_create = posix_cpu_timer_create, | |
1412 | .nsleep = posix_cpu_nsleep, | |
1413 | .nsleep_restart = posix_cpu_nsleep_restart, | |
1414 | .timer_set = posix_cpu_timer_set, | |
1415 | .timer_del = posix_cpu_timer_del, | |
1416 | .timer_get = posix_cpu_timer_get, | |
1417 | }; | |
1418 | ||
1da177e4 LT |
1419 | static __init int init_posix_cpu_timers(void) |
1420 | { | |
1421 | struct k_clock process = { | |
2fd1f040 TG |
1422 | .clock_getres = process_cpu_clock_getres, |
1423 | .clock_get = process_cpu_clock_get, | |
2fd1f040 TG |
1424 | .timer_create = process_cpu_timer_create, |
1425 | .nsleep = process_cpu_nsleep, | |
1426 | .nsleep_restart = process_cpu_nsleep_restart, | |
1da177e4 LT |
1427 | }; |
1428 | struct k_clock thread = { | |
2fd1f040 TG |
1429 | .clock_getres = thread_cpu_clock_getres, |
1430 | .clock_get = thread_cpu_clock_get, | |
2fd1f040 | 1431 | .timer_create = thread_cpu_timer_create, |
1da177e4 LT |
1432 | }; |
1433 | ||
52708737 TG |
1434 | posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1435 | posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | |
1da177e4 LT |
1436 | |
1437 | return 0; | |
1438 | } | |
1439 | __initcall(init_posix_cpu_timers); |