]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/sched/cputime.c
netfilter: ingress: translate 0 nf_hook_slow retval to -1
[mirror_ubuntu-artful-kernel.git] / kernel / sched / cputime.c
CommitLineData
73fbec60
FW
1#include <linux/export.h>
2#include <linux/sched.h>
3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h>
5#include <linux/static_key.h>
abf917cd 6#include <linux/context_tracking.h>
73fbec60 7#include "sched.h"
1fe7c4ef
SS
8#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h>
10#endif
73fbec60
FW
11
12
13#ifdef CONFIG_IRQ_TIME_ACCOUNTING
14
15/*
16 * There are no locks covering percpu hardirq/softirq time.
bf9fae9f 17 * They are only modified in vtime_account, on corresponding CPU
73fbec60
FW
18 * with interrupts disabled. So, writes are safe.
19 * They are read and saved off onto struct rq in update_rq_clock().
20 * This may result in other CPU reading this CPU's irq time and can
bf9fae9f 21 * race with irq/vtime_account on this CPU. We would either get old
73fbec60
FW
22 * or new value with a side effect of accounting a slice of irq time to wrong
23 * task when irq is in progress while we read rq->clock. That is a worthy
24 * compromise in place of having locks on each irq in account_system_time.
25 */
19d23dbf 26DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
73fbec60 27
73fbec60
FW
28static int sched_clock_irqtime;
29
30void enable_sched_clock_irqtime(void)
31{
32 sched_clock_irqtime = 1;
33}
34
35void disable_sched_clock_irqtime(void)
36{
37 sched_clock_irqtime = 0;
38}
39
73fbec60
FW
40/*
41 * Called before incrementing preempt_count on {soft,}irq_enter
42 * and before decrementing preempt_count on {soft,}irq_exit.
43 */
3e1df4f5 44void irqtime_account_irq(struct task_struct *curr)
73fbec60 45{
19d23dbf 46 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
73fbec60
FW
47 s64 delta;
48 int cpu;
49
50 if (!sched_clock_irqtime)
51 return;
52
73fbec60 53 cpu = smp_processor_id();
19d23dbf
FW
54 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
55 irqtime->irq_start_time += delta;
73fbec60 56
19d23dbf 57 u64_stats_update_begin(&irqtime->sync);
73fbec60
FW
58 /*
59 * We do not account for softirq time from ksoftirqd here.
60 * We want to continue accounting softirq time to ksoftirqd thread
61 * in that case, so as not to confuse scheduler with a special task
62 * that do not consume any time, but still wants to run.
63 */
64 if (hardirq_count())
19d23dbf 65 irqtime->hardirq_time += delta;
73fbec60 66 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
19d23dbf 67 irqtime->softirq_time += delta;
73fbec60 68
19d23dbf 69 u64_stats_update_end(&irqtime->sync);
73fbec60 70}
3e1df4f5 71EXPORT_SYMBOL_GPL(irqtime_account_irq);
73fbec60 72
447976ef 73static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime)
73fbec60
FW
74{
75 u64 *cpustat = kcpustat_this_cpu->cpustat;
57430218 76 cputime_t irq_cputime;
73fbec60 77
447976ef 78 irq_cputime = nsecs_to_cputime64(irqtime) - cpustat[idx];
57430218 79 irq_cputime = min(irq_cputime, maxtime);
447976ef 80 cpustat[idx] += irq_cputime;
2810f611 81
57430218 82 return irq_cputime;
73fbec60
FW
83}
84
447976ef 85static cputime_t irqtime_account_hi_update(cputime_t maxtime)
73fbec60 86{
447976ef
FW
87 return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time),
88 CPUTIME_IRQ, maxtime);
89}
2810f611 90
447976ef
FW
91static cputime_t irqtime_account_si_update(cputime_t maxtime)
92{
93 return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time),
94 CPUTIME_SOFTIRQ, maxtime);
73fbec60
FW
95}
96
97#else /* CONFIG_IRQ_TIME_ACCOUNTING */
98
99#define sched_clock_irqtime (0)
100
57430218
RR
101static cputime_t irqtime_account_hi_update(cputime_t dummy)
102{
103 return 0;
104}
105
106static cputime_t irqtime_account_si_update(cputime_t dummy)
107{
108 return 0;
109}
110
73fbec60
FW
111#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
112
113static inline void task_group_account_field(struct task_struct *p, int index,
114 u64 tmp)
115{
73fbec60
FW
116 /*
117 * Since all updates are sure to touch the root cgroup, we
118 * get ourselves ahead and touch it first. If the root cgroup
119 * is the only cgroup, then nothing else should be necessary.
120 *
121 */
a4f61cc0 122 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
73fbec60 123
1966aaf7 124 cpuacct_account_field(p, index, tmp);
73fbec60
FW
125}
126
127/*
128 * Account user cpu time to a process.
129 * @p: the process that the cpu time gets accounted to
130 * @cputime: the cpu time spent in user space since the last update
131 * @cputime_scaled: cputime scaled by cpu frequency
132 */
133void account_user_time(struct task_struct *p, cputime_t cputime,
134 cputime_t cputime_scaled)
135{
136 int index;
137
138 /* Add user time to process. */
139 p->utime += cputime;
140 p->utimescaled += cputime_scaled;
141 account_group_user_time(p, cputime);
142
d0ea0268 143 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
73fbec60
FW
144
145 /* Add user time to cpustat. */
146 task_group_account_field(p, index, (__force u64) cputime);
147
148 /* Account for user time used */
6fac4829 149 acct_account_cputime(p);
73fbec60
FW
150}
151
152/*
153 * Account guest cpu time to a process.
154 * @p: the process that the cpu time gets accounted to
155 * @cputime: the cpu time spent in virtual machine since the last update
156 * @cputime_scaled: cputime scaled by cpu frequency
157 */
158static void account_guest_time(struct task_struct *p, cputime_t cputime,
159 cputime_t cputime_scaled)
160{
161 u64 *cpustat = kcpustat_this_cpu->cpustat;
162
163 /* Add guest time to process. */
164 p->utime += cputime;
165 p->utimescaled += cputime_scaled;
166 account_group_user_time(p, cputime);
167 p->gtime += cputime;
168
169 /* Add guest time to cpustat. */
d0ea0268 170 if (task_nice(p) > 0) {
73fbec60
FW
171 cpustat[CPUTIME_NICE] += (__force u64) cputime;
172 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
173 } else {
174 cpustat[CPUTIME_USER] += (__force u64) cputime;
175 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
176 }
177}
178
179/*
180 * Account system cpu time to a process and desired cpustat field
181 * @p: the process that the cpu time gets accounted to
182 * @cputime: the cpu time spent in kernel space since the last update
183 * @cputime_scaled: cputime scaled by cpu frequency
184 * @target_cputime64: pointer to cpustat field that has to be updated
185 */
186static inline
187void __account_system_time(struct task_struct *p, cputime_t cputime,
188 cputime_t cputime_scaled, int index)
189{
190 /* Add system time to process. */
191 p->stime += cputime;
192 p->stimescaled += cputime_scaled;
193 account_group_system_time(p, cputime);
194
195 /* Add system time to cpustat. */
196 task_group_account_field(p, index, (__force u64) cputime);
197
198 /* Account for system time used */
6fac4829 199 acct_account_cputime(p);
73fbec60
FW
200}
201
202/*
203 * Account system cpu time to a process.
204 * @p: the process that the cpu time gets accounted to
205 * @hardirq_offset: the offset to subtract from hardirq_count()
206 * @cputime: the cpu time spent in kernel space since the last update
207 * @cputime_scaled: cputime scaled by cpu frequency
208 */
209void account_system_time(struct task_struct *p, int hardirq_offset,
210 cputime_t cputime, cputime_t cputime_scaled)
211{
212 int index;
213
214 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
215 account_guest_time(p, cputime, cputime_scaled);
216 return;
217 }
218
219 if (hardirq_count() - hardirq_offset)
220 index = CPUTIME_IRQ;
221 else if (in_serving_softirq())
222 index = CPUTIME_SOFTIRQ;
223 else
224 index = CPUTIME_SYSTEM;
225
226 __account_system_time(p, cputime, cputime_scaled, index);
227}
228
229/*
230 * Account for involuntary wait time.
231 * @cputime: the cpu time spent in involuntary wait
232 */
233void account_steal_time(cputime_t cputime)
234{
235 u64 *cpustat = kcpustat_this_cpu->cpustat;
236
237 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
238}
239
240/*
241 * Account for idle time.
242 * @cputime: the cpu time spent in idle wait
243 */
244void account_idle_time(cputime_t cputime)
245{
246 u64 *cpustat = kcpustat_this_cpu->cpustat;
247 struct rq *rq = this_rq();
248
249 if (atomic_read(&rq->nr_iowait) > 0)
250 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
251 else
252 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
253}
254
03cbc732
WL
255/*
256 * When a guest is interrupted for a longer amount of time, missed clock
257 * ticks are not redelivered later. Due to that, this function may on
258 * occasion account more time than the calling functions think elapsed.
259 */
57430218 260static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
73fbec60
FW
261{
262#ifdef CONFIG_PARAVIRT
263 if (static_key_false(&paravirt_steal_enabled)) {
57430218 264 cputime_t steal_cputime;
dee08a72 265 u64 steal;
73fbec60
FW
266
267 steal = paravirt_steal_clock(smp_processor_id());
268 steal -= this_rq()->prev_steal_time;
269
57430218
RR
270 steal_cputime = min(nsecs_to_cputime(steal), maxtime);
271 account_steal_time(steal_cputime);
272 this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
73fbec60 273
57430218 274 return steal_cputime;
73fbec60
FW
275 }
276#endif
807e5b80 277 return 0;
73fbec60
FW
278}
279
57430218
RR
280/*
281 * Account how much elapsed time was spent in steal, irq, or softirq time.
282 */
283static inline cputime_t account_other_time(cputime_t max)
284{
285 cputime_t accounted;
286
2810f611
FW
287 /* Shall be converted to a lockdep-enabled lightweight check */
288 WARN_ON_ONCE(!irqs_disabled());
289
57430218
RR
290 accounted = steal_account_process_time(max);
291
292 if (accounted < max)
293 accounted += irqtime_account_hi_update(max - accounted);
294
295 if (accounted < max)
296 accounted += irqtime_account_si_update(max - accounted);
297
298 return accounted;
299}
300
a1eb1411
SG
301#ifdef CONFIG_64BIT
302static inline u64 read_sum_exec_runtime(struct task_struct *t)
303{
304 return t->se.sum_exec_runtime;
305}
306#else
307static u64 read_sum_exec_runtime(struct task_struct *t)
308{
309 u64 ns;
310 struct rq_flags rf;
311 struct rq *rq;
312
313 rq = task_rq_lock(t, &rf);
314 ns = t->se.sum_exec_runtime;
315 task_rq_unlock(rq, t, &rf);
316
317 return ns;
318}
319#endif
320
a634f933
FW
321/*
322 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
323 * tasks (sum on group iteration) belonging to @tsk's group.
324 */
325void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
326{
327 struct signal_struct *sig = tsk->signal;
6fac4829 328 cputime_t utime, stime;
a634f933 329 struct task_struct *t;
e78c3496 330 unsigned int seq, nextseq;
9c368b5b 331 unsigned long flags;
a634f933 332
a1eb1411
SG
333 /*
334 * Update current task runtime to account pending time since last
335 * scheduler action or thread_group_cputime() call. This thread group
336 * might have other running tasks on different CPUs, but updating
337 * their runtime can affect syscall performance, so we skip account
338 * those pending times and rely only on values updated on tick or
339 * other scheduler action.
340 */
341 if (same_thread_group(current, tsk))
342 (void) task_sched_runtime(current);
343
a634f933 344 rcu_read_lock();
e78c3496
RR
345 /* Attempt a lockless read on the first round. */
346 nextseq = 0;
347 do {
348 seq = nextseq;
9c368b5b 349 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
e78c3496
RR
350 times->utime = sig->utime;
351 times->stime = sig->stime;
352 times->sum_exec_runtime = sig->sum_sched_runtime;
353
354 for_each_thread(tsk, t) {
355 task_cputime(t, &utime, &stime);
356 times->utime += utime;
357 times->stime += stime;
a1eb1411 358 times->sum_exec_runtime += read_sum_exec_runtime(t);
e78c3496
RR
359 }
360 /* If lockless access failed, take the lock. */
361 nextseq = 1;
362 } while (need_seqretry(&sig->stats_lock, seq));
9c368b5b 363 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
a634f933
FW
364 rcu_read_unlock();
365}
366
73fbec60
FW
367#ifdef CONFIG_IRQ_TIME_ACCOUNTING
368/*
369 * Account a tick to a process and cpustat
370 * @p: the process that the cpu time gets accounted to
371 * @user_tick: is the tick from userspace
372 * @rq: the pointer to rq
373 *
374 * Tick demultiplexing follows the order
375 * - pending hardirq update
376 * - pending softirq update
377 * - user_time
378 * - idle_time
379 * - system time
380 * - check for guest_time
381 * - else account as system_time
382 *
383 * Check for hardirq is done both for system and user time as there is
384 * no timer going off while we are on hardirq and hence we may never get an
385 * opportunity to update it solely in system time.
386 * p->stime and friends are only updated on system time and not on irq
387 * softirq as those do not count in task exec_runtime any more.
388 */
389static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2d513868 390 struct rq *rq, int ticks)
73fbec60 391{
57430218
RR
392 u64 cputime = (__force u64) cputime_one_jiffy * ticks;
393 cputime_t scaled, other;
73fbec60 394
57430218
RR
395 /*
396 * When returning from idle, many ticks can get accounted at
397 * once, including some ticks of steal, irq, and softirq time.
398 * Subtract those ticks from the amount of time accounted to
399 * idle, or potentially user or system time. Due to rounding,
400 * other time can exceed ticks occasionally.
401 */
03cbc732 402 other = account_other_time(ULONG_MAX);
57430218 403 if (other >= cputime)
73fbec60 404 return;
57430218
RR
405 cputime -= other;
406 scaled = cputime_to_scaled(cputime);
73fbec60 407
57430218 408 if (this_cpu_ksoftirqd() == p) {
73fbec60
FW
409 /*
410 * ksoftirqd time do not get accounted in cpu_softirq_time.
411 * So, we have to handle it separately here.
412 * Also, p->stime needs to be updated for ksoftirqd.
413 */
2d513868 414 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
73fbec60 415 } else if (user_tick) {
2d513868 416 account_user_time(p, cputime, scaled);
73fbec60 417 } else if (p == rq->idle) {
2d513868 418 account_idle_time(cputime);
73fbec60 419 } else if (p->flags & PF_VCPU) { /* System time or guest time */
2d513868 420 account_guest_time(p, cputime, scaled);
73fbec60 421 } else {
2d513868 422 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
73fbec60
FW
423 }
424}
425
426static void irqtime_account_idle_ticks(int ticks)
427{
73fbec60
FW
428 struct rq *rq = this_rq();
429
2d513868 430 irqtime_account_process_tick(current, 0, rq, ticks);
73fbec60
FW
431}
432#else /* CONFIG_IRQ_TIME_ACCOUNTING */
3f4724ea
FW
433static inline void irqtime_account_idle_ticks(int ticks) {}
434static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2d513868 435 struct rq *rq, int nr_ticks) {}
73fbec60
FW
436#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
437
73fbec60
FW
438/*
439 * Use precise platform statistics if available:
440 */
441#ifdef CONFIG_VIRT_CPU_ACCOUNTING
a7e1a9e3 442
e3942ba0 443#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
b0493406 444void vtime_common_task_switch(struct task_struct *prev)
e3942ba0
FW
445{
446 if (is_idle_task(prev))
447 vtime_account_idle(prev);
448 else
449 vtime_account_system(prev);
450
abf917cd 451#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
e3942ba0 452 vtime_account_user(prev);
abf917cd 453#endif
e3942ba0
FW
454 arch_vtime_task_switch(prev);
455}
456#endif
11113334 457
0cfdf9a1
FW
458#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
459
460
461#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
a7e1a9e3
FW
462/*
463 * Archs that account the whole time spent in the idle task
464 * (outside irq) as idle time can rely on this and just implement
fd25b4c2 465 * vtime_account_system() and vtime_account_idle(). Archs that
a7e1a9e3
FW
466 * have other meaning of the idle time (s390 only includes the
467 * time spent by the CPU when it's in low power mode) must override
468 * vtime_account().
469 */
470#ifndef __ARCH_HAS_VTIME_ACCOUNT
0cfdf9a1 471void vtime_account_irq_enter(struct task_struct *tsk)
a7e1a9e3 472{
0cfdf9a1
FW
473 if (!in_interrupt() && is_idle_task(tsk))
474 vtime_account_idle(tsk);
475 else
476 vtime_account_system(tsk);
a7e1a9e3 477}
0cfdf9a1 478EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
a7e1a9e3 479#endif /* __ARCH_HAS_VTIME_ACCOUNT */
9fbc42ea 480
9fbc42ea
FW
481void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
482{
483 *ut = p->utime;
484 *st = p->stime;
485}
9eec50b8 486EXPORT_SYMBOL_GPL(task_cputime_adjusted);
a7e1a9e3 487
9fbc42ea
FW
488void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
489{
490 struct task_cputime cputime;
73fbec60 491
9fbc42ea
FW
492 thread_group_cputime(p, &cputime);
493
494 *ut = cputime.utime;
495 *st = cputime.stime;
496}
497#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
498/*
499 * Account a single tick of cpu time.
500 * @p: the process that the cpu time gets accounted to
501 * @user_tick: indicates if the tick is a user or a system tick
502 */
503void account_process_tick(struct task_struct *p, int user_tick)
73fbec60 504{
57430218 505 cputime_t cputime, scaled, steal;
9fbc42ea 506 struct rq *rq = this_rq();
73fbec60 507
55dbdcfa 508 if (vtime_accounting_cpu_enabled())
9fbc42ea
FW
509 return;
510
511 if (sched_clock_irqtime) {
2d513868 512 irqtime_account_process_tick(p, user_tick, rq, 1);
9fbc42ea
FW
513 return;
514 }
515
57430218 516 cputime = cputime_one_jiffy;
03cbc732 517 steal = steal_account_process_time(ULONG_MAX);
57430218
RR
518
519 if (steal >= cputime)
9fbc42ea 520 return;
73fbec60 521
57430218
RR
522 cputime -= steal;
523 scaled = cputime_to_scaled(cputime);
524
9fbc42ea 525 if (user_tick)
57430218 526 account_user_time(p, cputime, scaled);
9fbc42ea 527 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
57430218 528 account_system_time(p, HARDIRQ_OFFSET, cputime, scaled);
73fbec60 529 else
57430218 530 account_idle_time(cputime);
9fbc42ea 531}
73fbec60 532
9fbc42ea
FW
533/*
534 * Account multiple ticks of idle time.
535 * @ticks: number of stolen ticks
536 */
537void account_idle_ticks(unsigned long ticks)
538{
f9bcf1e0 539 cputime_t cputime, steal;
26f2c75c 540
9fbc42ea
FW
541 if (sched_clock_irqtime) {
542 irqtime_account_idle_ticks(ticks);
543 return;
544 }
545
26f2c75c 546 cputime = jiffies_to_cputime(ticks);
03cbc732 547 steal = steal_account_process_time(ULONG_MAX);
f9bcf1e0
WL
548
549 if (steal >= cputime)
550 return;
551
552 cputime -= steal;
553 account_idle_time(cputime);
9fbc42ea 554}
73fbec60 555
d9a3c982 556/*
55eaa7c1
SG
557 * Perform (stime * rtime) / total, but avoid multiplication overflow by
558 * loosing precision when the numbers are big.
d9a3c982
FW
559 */
560static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
73fbec60 561{
55eaa7c1 562 u64 scaled;
73fbec60 563
55eaa7c1
SG
564 for (;;) {
565 /* Make sure "rtime" is the bigger of stime/rtime */
84f9f3a1
SG
566 if (stime > rtime)
567 swap(rtime, stime);
55eaa7c1
SG
568
569 /* Make sure 'total' fits in 32 bits */
570 if (total >> 32)
571 goto drop_precision;
572
573 /* Does rtime (and thus stime) fit in 32 bits? */
574 if (!(rtime >> 32))
575 break;
576
577 /* Can we just balance rtime/stime rather than dropping bits? */
578 if (stime >> 31)
579 goto drop_precision;
580
581 /* We can grow stime and shrink rtime and try to make them both fit */
582 stime <<= 1;
583 rtime >>= 1;
584 continue;
585
586drop_precision:
587 /* We drop from rtime, it has more bits than stime */
588 rtime >>= 1;
589 total >>= 1;
d9a3c982 590 }
73fbec60 591
55eaa7c1
SG
592 /*
593 * Make sure gcc understands that this is a 32x32->64 multiply,
594 * followed by a 64/32->64 divide.
595 */
596 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
d9a3c982 597 return (__force cputime_t) scaled;
73fbec60
FW
598}
599
347abad9 600/*
9d7fb042
PZ
601 * Adjust tick based cputime random precision against scheduler runtime
602 * accounting.
347abad9 603 *
9d7fb042
PZ
604 * Tick based cputime accounting depend on random scheduling timeslices of a
605 * task to be interrupted or not by the timer. Depending on these
606 * circumstances, the number of these interrupts may be over or
607 * under-optimistic, matching the real user and system cputime with a variable
608 * precision.
609 *
610 * Fix this by scaling these tick based values against the total runtime
611 * accounted by the CFS scheduler.
612 *
613 * This code provides the following guarantees:
614 *
615 * stime + utime == rtime
616 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
617 *
618 * Assuming that rtime_i+1 >= rtime_i.
fa092057 619 */
d37f761d 620static void cputime_adjust(struct task_cputime *curr,
9d7fb042 621 struct prev_cputime *prev,
d37f761d 622 cputime_t *ut, cputime_t *st)
73fbec60 623{
5a8e01f8 624 cputime_t rtime, stime, utime;
9d7fb042 625 unsigned long flags;
fa092057 626
9d7fb042
PZ
627 /* Serialize concurrent callers such that we can honour our guarantees */
628 raw_spin_lock_irqsave(&prev->lock, flags);
d37f761d 629 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
73fbec60 630
772c808a 631 /*
9d7fb042
PZ
632 * This is possible under two circumstances:
633 * - rtime isn't monotonic after all (a bug);
634 * - we got reordered by the lock.
635 *
636 * In both cases this acts as a filter such that the rest of the code
637 * can assume it is monotonic regardless of anything else.
772c808a
SG
638 */
639 if (prev->stime + prev->utime >= rtime)
640 goto out;
641
5a8e01f8
SG
642 stime = curr->stime;
643 utime = curr->utime;
644
173be9a1
PZ
645 /*
646 * If either stime or both stime and utime are 0, assume all runtime is
647 * userspace. Once a task gets some ticks, the monotonicy code at
648 * 'update' will ensure things converge to the observed ratio.
649 */
650 if (stime == 0) {
651 utime = rtime;
9d7fb042
PZ
652 goto update;
653 }
5a8e01f8 654
173be9a1
PZ
655 if (utime == 0) {
656 stime = rtime;
9d7fb042 657 goto update;
d9a3c982 658 }
73fbec60 659
9d7fb042
PZ
660 stime = scale_stime((__force u64)stime, (__force u64)rtime,
661 (__force u64)(stime + utime));
662
173be9a1 663update:
9d7fb042
PZ
664 /*
665 * Make sure stime doesn't go backwards; this preserves monotonicity
666 * for utime because rtime is monotonic.
667 *
668 * utime_i+1 = rtime_i+1 - stime_i
669 * = rtime_i+1 - (rtime_i - utime_i)
670 * = (rtime_i+1 - rtime_i) + utime_i
671 * >= utime_i
672 */
673 if (stime < prev->stime)
674 stime = prev->stime;
675 utime = rtime - stime;
676
677 /*
678 * Make sure utime doesn't go backwards; this still preserves
679 * monotonicity for stime, analogous argument to above.
680 */
681 if (utime < prev->utime) {
682 utime = prev->utime;
683 stime = rtime - utime;
684 }
d37f761d 685
9d7fb042
PZ
686 prev->stime = stime;
687 prev->utime = utime;
772c808a 688out:
d37f761d
FW
689 *ut = prev->utime;
690 *st = prev->stime;
9d7fb042 691 raw_spin_unlock_irqrestore(&prev->lock, flags);
d37f761d 692}
73fbec60 693
d37f761d
FW
694void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
695{
696 struct task_cputime cputime = {
d37f761d
FW
697 .sum_exec_runtime = p->se.sum_exec_runtime,
698 };
699
6fac4829 700 task_cputime(p, &cputime.utime, &cputime.stime);
d37f761d 701 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
73fbec60 702}
9eec50b8 703EXPORT_SYMBOL_GPL(task_cputime_adjusted);
73fbec60 704
e80d0a1a 705void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
73fbec60 706{
73fbec60 707 struct task_cputime cputime;
73fbec60
FW
708
709 thread_group_cputime(p, &cputime);
d37f761d 710 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
73fbec60 711}
9fbc42ea 712#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
abf917cd
FW
713
714#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
ff9a9b4c 715static cputime_t vtime_delta(struct task_struct *tsk)
6a61671b 716{
ff9a9b4c 717 unsigned long now = READ_ONCE(jiffies);
6a61671b 718
ff9a9b4c 719 if (time_before(now, (unsigned long)tsk->vtime_snap))
6a61671b 720 return 0;
abf917cd 721
ff9a9b4c 722 return jiffies_to_cputime(now - tsk->vtime_snap);
6a61671b
FW
723}
724
725static cputime_t get_vtime_delta(struct task_struct *tsk)
abf917cd 726{
ff9a9b4c 727 unsigned long now = READ_ONCE(jiffies);
b58c3584 728 cputime_t delta, other;
abf917cd 729
03cbc732
WL
730 /*
731 * Unlike tick based timing, vtime based timing never has lost
732 * ticks, and no need for steal time accounting to make up for
733 * lost ticks. Vtime accounts a rounded version of actual
734 * elapsed time. Limit account_other_time to prevent rounding
735 * errors from causing elapsed vtime to go negative.
736 */
57430218 737 delta = jiffies_to_cputime(now - tsk->vtime_snap);
b58c3584 738 other = account_other_time(delta);
7098c1ea 739 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
ff9a9b4c 740 tsk->vtime_snap = now;
abf917cd 741
b58c3584 742 return delta - other;
abf917cd
FW
743}
744
6a61671b
FW
745static void __vtime_account_system(struct task_struct *tsk)
746{
747 cputime_t delta_cpu = get_vtime_delta(tsk);
748
749 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
750}
751
abf917cd
FW
752void vtime_account_system(struct task_struct *tsk)
753{
ff9a9b4c
RR
754 if (!vtime_delta(tsk))
755 return;
756
b7ce2277 757 write_seqcount_begin(&tsk->vtime_seqcount);
6a61671b 758 __vtime_account_system(tsk);
b7ce2277 759 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b 760}
3f4724ea 761
abf917cd
FW
762void vtime_account_user(struct task_struct *tsk)
763{
3f4724ea
FW
764 cputime_t delta_cpu;
765
b7ce2277 766 write_seqcount_begin(&tsk->vtime_seqcount);
6a61671b 767 tsk->vtime_snap_whence = VTIME_SYS;
ff9a9b4c
RR
768 if (vtime_delta(tsk)) {
769 delta_cpu = get_vtime_delta(tsk);
770 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
771 }
b7ce2277 772 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b
FW
773}
774
775void vtime_user_enter(struct task_struct *tsk)
776{
b7ce2277 777 write_seqcount_begin(&tsk->vtime_seqcount);
ff9a9b4c
RR
778 if (vtime_delta(tsk))
779 __vtime_account_system(tsk);
af2350bd 780 tsk->vtime_snap_whence = VTIME_USER;
b7ce2277 781 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b
FW
782}
783
784void vtime_guest_enter(struct task_struct *tsk)
785{
5b206d48
FW
786 /*
787 * The flags must be updated under the lock with
788 * the vtime_snap flush and update.
789 * That enforces a right ordering and update sequence
790 * synchronization against the reader (task_gtime())
791 * that can thus safely catch up with a tickless delta.
792 */
b7ce2277 793 write_seqcount_begin(&tsk->vtime_seqcount);
ff9a9b4c
RR
794 if (vtime_delta(tsk))
795 __vtime_account_system(tsk);
6a61671b 796 current->flags |= PF_VCPU;
b7ce2277 797 write_seqcount_end(&tsk->vtime_seqcount);
6a61671b 798}
48d6a816 799EXPORT_SYMBOL_GPL(vtime_guest_enter);
6a61671b
FW
800
801void vtime_guest_exit(struct task_struct *tsk)
802{
b7ce2277 803 write_seqcount_begin(&tsk->vtime_seqcount);
6a61671b
FW
804 __vtime_account_system(tsk);
805 current->flags &= ~PF_VCPU;
b7ce2277 806 write_seqcount_end(&tsk->vtime_seqcount);
abf917cd 807}
48d6a816 808EXPORT_SYMBOL_GPL(vtime_guest_exit);
abf917cd
FW
809
810void vtime_account_idle(struct task_struct *tsk)
811{
6a61671b 812 cputime_t delta_cpu = get_vtime_delta(tsk);
abf917cd
FW
813
814 account_idle_time(delta_cpu);
815}
3f4724ea 816
6a61671b
FW
817void arch_vtime_task_switch(struct task_struct *prev)
818{
b7ce2277 819 write_seqcount_begin(&prev->vtime_seqcount);
7098c1ea 820 prev->vtime_snap_whence = VTIME_INACTIVE;
b7ce2277 821 write_seqcount_end(&prev->vtime_seqcount);
6a61671b 822
b7ce2277 823 write_seqcount_begin(&current->vtime_seqcount);
6a61671b 824 current->vtime_snap_whence = VTIME_SYS;
ff9a9b4c 825 current->vtime_snap = jiffies;
b7ce2277 826 write_seqcount_end(&current->vtime_seqcount);
6a61671b
FW
827}
828
45eacc69 829void vtime_init_idle(struct task_struct *t, int cpu)
6a61671b
FW
830{
831 unsigned long flags;
832
b7ce2277
FW
833 local_irq_save(flags);
834 write_seqcount_begin(&t->vtime_seqcount);
6a61671b 835 t->vtime_snap_whence = VTIME_SYS;
ff9a9b4c 836 t->vtime_snap = jiffies;
b7ce2277
FW
837 write_seqcount_end(&t->vtime_seqcount);
838 local_irq_restore(flags);
6a61671b
FW
839}
840
841cputime_t task_gtime(struct task_struct *t)
842{
6a61671b
FW
843 unsigned int seq;
844 cputime_t gtime;
845
e5925394 846 if (!vtime_accounting_enabled())
2541117b
HS
847 return t->gtime;
848
6a61671b 849 do {
b7ce2277 850 seq = read_seqcount_begin(&t->vtime_seqcount);
6a61671b
FW
851
852 gtime = t->gtime;
cab245d6 853 if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
6a61671b
FW
854 gtime += vtime_delta(t);
855
b7ce2277 856 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
6a61671b
FW
857
858 return gtime;
859}
860
861/*
862 * Fetch cputime raw values from fields of task_struct and
863 * add up the pending nohz execution time since the last
864 * cputime snapshot.
865 */
866static void
867fetch_task_cputime(struct task_struct *t,
868 cputime_t *u_dst, cputime_t *s_dst,
869 cputime_t *u_src, cputime_t *s_src,
870 cputime_t *udelta, cputime_t *sdelta)
871{
6a61671b
FW
872 unsigned int seq;
873 unsigned long long delta;
874
875 do {
876 *udelta = 0;
877 *sdelta = 0;
878
b7ce2277 879 seq = read_seqcount_begin(&t->vtime_seqcount);
6a61671b
FW
880
881 if (u_dst)
882 *u_dst = *u_src;
883 if (s_dst)
884 *s_dst = *s_src;
885
886 /* Task is sleeping, nothing to add */
7098c1ea 887 if (t->vtime_snap_whence == VTIME_INACTIVE ||
6a61671b
FW
888 is_idle_task(t))
889 continue;
890
891 delta = vtime_delta(t);
892
893 /*
894 * Task runs either in user or kernel space, add pending nohz time to
895 * the right place.
896 */
897 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
898 *udelta = delta;
899 } else {
900 if (t->vtime_snap_whence == VTIME_SYS)
901 *sdelta = delta;
902 }
b7ce2277 903 } while (read_seqcount_retry(&t->vtime_seqcount, seq));
6a61671b
FW
904}
905
906
907void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
908{
909 cputime_t udelta, sdelta;
910
e5925394 911 if (!vtime_accounting_enabled()) {
7877a0ba
HS
912 if (utime)
913 *utime = t->utime;
914 if (stime)
915 *stime = t->stime;
916 return;
917 }
918
6a61671b
FW
919 fetch_task_cputime(t, utime, stime, &t->utime,
920 &t->stime, &udelta, &sdelta);
921 if (utime)
922 *utime += udelta;
923 if (stime)
924 *stime += sdelta;
925}
926
927void task_cputime_scaled(struct task_struct *t,
928 cputime_t *utimescaled, cputime_t *stimescaled)
929{
930 cputime_t udelta, sdelta;
931
e5925394 932 if (!vtime_accounting_enabled()) {
7877a0ba
HS
933 if (utimescaled)
934 *utimescaled = t->utimescaled;
935 if (stimescaled)
936 *stimescaled = t->stimescaled;
937 return;
938 }
939
6a61671b
FW
940 fetch_task_cputime(t, utimescaled, stimescaled,
941 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
942 if (utimescaled)
943 *utimescaled += cputime_to_scaled(udelta);
944 if (stimescaled)
945 *stimescaled += cputime_to_scaled(sdelta);
946}
abf917cd 947#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */