]>
git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - kernel/sched/cputime.c
1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
7 #include <linux/sched/cputime.h>
10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
13 * There are no locks covering percpu hardirq/softirq time.
14 * They are only modified in vtime_account, on corresponding CPU
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
18 * race with irq/vtime_account on this CPU. We would either get old
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
23 DEFINE_PER_CPU(struct irqtime
, cpu_irqtime
);
25 static int sched_clock_irqtime
;
27 void enable_sched_clock_irqtime(void)
29 sched_clock_irqtime
= 1;
32 void disable_sched_clock_irqtime(void)
34 sched_clock_irqtime
= 0;
37 static void irqtime_account_delta(struct irqtime
*irqtime
, u64 delta
,
38 enum cpu_usage_stat idx
)
40 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
42 u64_stats_update_begin(&irqtime
->sync
);
43 cpustat
[idx
] += delta
;
44 irqtime
->total
+= delta
;
45 irqtime
->tick_delta
+= delta
;
46 u64_stats_update_end(&irqtime
->sync
);
50 * Called before incrementing preempt_count on {soft,}irq_enter
51 * and before decrementing preempt_count on {soft,}irq_exit.
53 void irqtime_account_irq(struct task_struct
*curr
)
55 struct irqtime
*irqtime
= this_cpu_ptr(&cpu_irqtime
);
59 if (!sched_clock_irqtime
)
62 cpu
= smp_processor_id();
63 delta
= sched_clock_cpu(cpu
) - irqtime
->irq_start_time
;
64 irqtime
->irq_start_time
+= delta
;
67 * We do not account for softirq time from ksoftirqd here.
68 * We want to continue accounting softirq time to ksoftirqd thread
69 * in that case, so as not to confuse scheduler with a special task
70 * that do not consume any time, but still wants to run.
73 irqtime_account_delta(irqtime
, delta
, CPUTIME_IRQ
);
74 else if (in_serving_softirq() && curr
!= this_cpu_ksoftirqd())
75 irqtime_account_delta(irqtime
, delta
, CPUTIME_SOFTIRQ
);
77 EXPORT_SYMBOL_GPL(irqtime_account_irq
);
79 static u64
irqtime_tick_accounted(u64 maxtime
)
81 struct irqtime
*irqtime
= this_cpu_ptr(&cpu_irqtime
);
84 delta
= min(irqtime
->tick_delta
, maxtime
);
85 irqtime
->tick_delta
-= delta
;
90 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
92 #define sched_clock_irqtime (0)
94 static u64
irqtime_tick_accounted(u64 dummy
)
99 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
101 static inline void task_group_account_field(struct task_struct
*p
, int index
,
105 * Since all updates are sure to touch the root cgroup, we
106 * get ourselves ahead and touch it first. If the root cgroup
107 * is the only cgroup, then nothing else should be necessary.
110 __this_cpu_add(kernel_cpustat
.cpustat
[index
], tmp
);
112 cpuacct_account_field(p
, index
, tmp
);
116 * Account user cpu time to a process.
117 * @p: the process that the cpu time gets accounted to
118 * @cputime: the cpu time spent in user space since the last update
120 void account_user_time(struct task_struct
*p
, u64 cputime
)
124 /* Add user time to process. */
126 account_group_user_time(p
, cputime
);
128 index
= (task_nice(p
) > 0) ? CPUTIME_NICE
: CPUTIME_USER
;
130 /* Add user time to cpustat. */
131 task_group_account_field(p
, index
, cputime
);
133 /* Account for user time used */
134 acct_account_cputime(p
);
138 * Account guest cpu time to a process.
139 * @p: the process that the cpu time gets accounted to
140 * @cputime: the cpu time spent in virtual machine since the last update
142 void account_guest_time(struct task_struct
*p
, u64 cputime
)
144 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
146 /* Add guest time to process. */
148 account_group_user_time(p
, cputime
);
151 /* Add guest time to cpustat. */
152 if (task_nice(p
) > 0) {
153 cpustat
[CPUTIME_NICE
] += cputime
;
154 cpustat
[CPUTIME_GUEST_NICE
] += cputime
;
156 cpustat
[CPUTIME_USER
] += cputime
;
157 cpustat
[CPUTIME_GUEST
] += cputime
;
162 * Account system cpu time to a process and desired cpustat field
163 * @p: the process that the cpu time gets accounted to
164 * @cputime: the cpu time spent in kernel space since the last update
165 * @index: pointer to cpustat field that has to be updated
167 void account_system_index_time(struct task_struct
*p
,
168 u64 cputime
, enum cpu_usage_stat index
)
170 /* Add system time to process. */
172 account_group_system_time(p
, cputime
);
174 /* Add system time to cpustat. */
175 task_group_account_field(p
, index
, cputime
);
177 /* Account for system time used */
178 acct_account_cputime(p
);
182 * Account system cpu time to a process.
183 * @p: the process that the cpu time gets accounted to
184 * @hardirq_offset: the offset to subtract from hardirq_count()
185 * @cputime: the cpu time spent in kernel space since the last update
187 void account_system_time(struct task_struct
*p
, int hardirq_offset
, u64 cputime
)
191 if ((p
->flags
& PF_VCPU
) && (irq_count() - hardirq_offset
== 0)) {
192 account_guest_time(p
, cputime
);
196 if (hardirq_count() - hardirq_offset
)
198 else if (in_serving_softirq())
199 index
= CPUTIME_SOFTIRQ
;
201 index
= CPUTIME_SYSTEM
;
203 account_system_index_time(p
, cputime
, index
);
207 * Account for involuntary wait time.
208 * @cputime: the cpu time spent in involuntary wait
210 void account_steal_time(u64 cputime
)
212 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
214 cpustat
[CPUTIME_STEAL
] += cputime
;
218 * Account for idle time.
219 * @cputime: the cpu time spent in idle wait
221 void account_idle_time(u64 cputime
)
223 u64
*cpustat
= kcpustat_this_cpu
->cpustat
;
224 struct rq
*rq
= this_rq();
226 if (atomic_read(&rq
->nr_iowait
) > 0)
227 cpustat
[CPUTIME_IOWAIT
] += cputime
;
229 cpustat
[CPUTIME_IDLE
] += cputime
;
233 * When a guest is interrupted for a longer amount of time, missed clock
234 * ticks are not redelivered later. Due to that, this function may on
235 * occasion account more time than the calling functions think elapsed.
237 static __always_inline u64
steal_account_process_time(u64 maxtime
)
239 #ifdef CONFIG_PARAVIRT
240 if (static_key_false(¶virt_steal_enabled
)) {
243 steal
= paravirt_steal_clock(smp_processor_id());
244 steal
-= this_rq()->prev_steal_time
;
245 steal
= min(steal
, maxtime
);
246 account_steal_time(steal
);
247 this_rq()->prev_steal_time
+= steal
;
256 * Account how much elapsed time was spent in steal, irq, or softirq time.
258 static inline u64
account_other_time(u64 max
)
262 /* Shall be converted to a lockdep-enabled lightweight check */
263 WARN_ON_ONCE(!irqs_disabled());
265 accounted
= steal_account_process_time(max
);
268 accounted
+= irqtime_tick_accounted(max
- accounted
);
274 static inline u64
read_sum_exec_runtime(struct task_struct
*t
)
276 return t
->se
.sum_exec_runtime
;
279 static u64
read_sum_exec_runtime(struct task_struct
*t
)
285 rq
= task_rq_lock(t
, &rf
);
286 ns
= t
->se
.sum_exec_runtime
;
287 task_rq_unlock(rq
, t
, &rf
);
294 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
295 * tasks (sum on group iteration) belonging to @tsk's group.
297 void thread_group_cputime(struct task_struct
*tsk
, struct task_cputime
*times
)
299 struct signal_struct
*sig
= tsk
->signal
;
301 struct task_struct
*t
;
302 unsigned int seq
, nextseq
;
306 * Update current task runtime to account pending time since last
307 * scheduler action or thread_group_cputime() call. This thread group
308 * might have other running tasks on different CPUs, but updating
309 * their runtime can affect syscall performance, so we skip account
310 * those pending times and rely only on values updated on tick or
311 * other scheduler action.
313 if (same_thread_group(current
, tsk
))
314 (void) task_sched_runtime(current
);
317 /* Attempt a lockless read on the first round. */
321 flags
= read_seqbegin_or_lock_irqsave(&sig
->stats_lock
, &seq
);
322 times
->utime
= sig
->utime
;
323 times
->stime
= sig
->stime
;
324 times
->sum_exec_runtime
= sig
->sum_sched_runtime
;
326 for_each_thread(tsk
, t
) {
327 task_cputime(t
, &utime
, &stime
);
328 times
->utime
+= utime
;
329 times
->stime
+= stime
;
330 times
->sum_exec_runtime
+= read_sum_exec_runtime(t
);
332 /* If lockless access failed, take the lock. */
334 } while (need_seqretry(&sig
->stats_lock
, seq
));
335 done_seqretry_irqrestore(&sig
->stats_lock
, seq
, flags
);
339 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
341 * Account a tick to a process and cpustat
342 * @p: the process that the cpu time gets accounted to
343 * @user_tick: is the tick from userspace
344 * @rq: the pointer to rq
346 * Tick demultiplexing follows the order
347 * - pending hardirq update
348 * - pending softirq update
352 * - check for guest_time
353 * - else account as system_time
355 * Check for hardirq is done both for system and user time as there is
356 * no timer going off while we are on hardirq and hence we may never get an
357 * opportunity to update it solely in system time.
358 * p->stime and friends are only updated on system time and not on irq
359 * softirq as those do not count in task exec_runtime any more.
361 static void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
362 struct rq
*rq
, int ticks
)
364 u64 other
, cputime
= TICK_NSEC
* ticks
;
367 * When returning from idle, many ticks can get accounted at
368 * once, including some ticks of steal, irq, and softirq time.
369 * Subtract those ticks from the amount of time accounted to
370 * idle, or potentially user or system time. Due to rounding,
371 * other time can exceed ticks occasionally.
373 other
= account_other_time(ULONG_MAX
);
374 if (other
>= cputime
)
379 if (this_cpu_ksoftirqd() == p
) {
381 * ksoftirqd time do not get accounted in cpu_softirq_time.
382 * So, we have to handle it separately here.
383 * Also, p->stime needs to be updated for ksoftirqd.
385 account_system_index_time(p
, cputime
, CPUTIME_SOFTIRQ
);
386 } else if (user_tick
) {
387 account_user_time(p
, cputime
);
388 } else if (p
== rq
->idle
) {
389 account_idle_time(cputime
);
390 } else if (p
->flags
& PF_VCPU
) { /* System time or guest time */
391 account_guest_time(p
, cputime
);
393 account_system_index_time(p
, cputime
, CPUTIME_SYSTEM
);
397 static void irqtime_account_idle_ticks(int ticks
)
399 struct rq
*rq
= this_rq();
401 irqtime_account_process_tick(current
, 0, rq
, ticks
);
403 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
404 static inline void irqtime_account_idle_ticks(int ticks
) {}
405 static inline void irqtime_account_process_tick(struct task_struct
*p
, int user_tick
,
406 struct rq
*rq
, int nr_ticks
) {}
407 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
410 * Use precise platform statistics if available:
412 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
414 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
415 void vtime_common_task_switch(struct task_struct
*prev
)
417 if (is_idle_task(prev
))
418 vtime_account_idle(prev
);
420 vtime_account_system(prev
);
423 arch_vtime_task_switch(prev
);
427 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
430 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
432 * Archs that account the whole time spent in the idle task
433 * (outside irq) as idle time can rely on this and just implement
434 * vtime_account_system() and vtime_account_idle(). Archs that
435 * have other meaning of the idle time (s390 only includes the
436 * time spent by the CPU when it's in low power mode) must override
439 #ifndef __ARCH_HAS_VTIME_ACCOUNT
440 void vtime_account_irq_enter(struct task_struct
*tsk
)
442 if (!in_interrupt() && is_idle_task(tsk
))
443 vtime_account_idle(tsk
);
445 vtime_account_system(tsk
);
447 EXPORT_SYMBOL_GPL(vtime_account_irq_enter
);
448 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
450 void task_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
)
455 EXPORT_SYMBOL_GPL(task_cputime_adjusted
);
457 void thread_group_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
)
459 struct task_cputime cputime
;
461 thread_group_cputime(p
, &cputime
);
466 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
468 * Account a single tick of cpu time.
469 * @p: the process that the cpu time gets accounted to
470 * @user_tick: indicates if the tick is a user or a system tick
472 void account_process_tick(struct task_struct
*p
, int user_tick
)
475 struct rq
*rq
= this_rq();
477 if (vtime_accounting_cpu_enabled())
480 if (sched_clock_irqtime
) {
481 irqtime_account_process_tick(p
, user_tick
, rq
, 1);
486 steal
= steal_account_process_time(ULONG_MAX
);
488 if (steal
>= cputime
)
494 account_user_time(p
, cputime
);
495 else if ((p
!= rq
->idle
) || (irq_count() != HARDIRQ_OFFSET
))
496 account_system_time(p
, HARDIRQ_OFFSET
, cputime
);
498 account_idle_time(cputime
);
502 * Account multiple ticks of idle time.
503 * @ticks: number of stolen ticks
505 void account_idle_ticks(unsigned long ticks
)
509 if (sched_clock_irqtime
) {
510 irqtime_account_idle_ticks(ticks
);
514 cputime
= ticks
* TICK_NSEC
;
515 steal
= steal_account_process_time(ULONG_MAX
);
517 if (steal
>= cputime
)
521 account_idle_time(cputime
);
525 * Perform (stime * rtime) / total, but avoid multiplication overflow by
526 * loosing precision when the numbers are big.
528 static u64
scale_stime(u64 stime
, u64 rtime
, u64 total
)
533 /* Make sure "rtime" is the bigger of stime/rtime */
537 /* Make sure 'total' fits in 32 bits */
541 /* Does rtime (and thus stime) fit in 32 bits? */
545 /* Can we just balance rtime/stime rather than dropping bits? */
549 /* We can grow stime and shrink rtime and try to make them both fit */
555 /* We drop from rtime, it has more bits than stime */
561 * Make sure gcc understands that this is a 32x32->64 multiply,
562 * followed by a 64/32->64 divide.
564 scaled
= div_u64((u64
) (u32
) stime
* (u64
) (u32
) rtime
, (u32
)total
);
569 * Adjust tick based cputime random precision against scheduler runtime
572 * Tick based cputime accounting depend on random scheduling timeslices of a
573 * task to be interrupted or not by the timer. Depending on these
574 * circumstances, the number of these interrupts may be over or
575 * under-optimistic, matching the real user and system cputime with a variable
578 * Fix this by scaling these tick based values against the total runtime
579 * accounted by the CFS scheduler.
581 * This code provides the following guarantees:
583 * stime + utime == rtime
584 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
586 * Assuming that rtime_i+1 >= rtime_i.
588 static void cputime_adjust(struct task_cputime
*curr
,
589 struct prev_cputime
*prev
,
592 u64 rtime
, stime
, utime
;
595 /* Serialize concurrent callers such that we can honour our guarantees */
596 raw_spin_lock_irqsave(&prev
->lock
, flags
);
597 rtime
= curr
->sum_exec_runtime
;
600 * This is possible under two circumstances:
601 * - rtime isn't monotonic after all (a bug);
602 * - we got reordered by the lock.
604 * In both cases this acts as a filter such that the rest of the code
605 * can assume it is monotonic regardless of anything else.
607 if (prev
->stime
+ prev
->utime
>= rtime
)
614 * If either stime or utime are 0, assume all runtime is userspace.
615 * Once a task gets some ticks, the monotonicy code at 'update:'
616 * will ensure things converge to the observed ratio.
628 stime
= scale_stime(stime
, rtime
, stime
+ utime
);
632 * Make sure stime doesn't go backwards; this preserves monotonicity
633 * for utime because rtime is monotonic.
635 * utime_i+1 = rtime_i+1 - stime_i
636 * = rtime_i+1 - (rtime_i - utime_i)
637 * = (rtime_i+1 - rtime_i) + utime_i
640 if (stime
< prev
->stime
)
642 utime
= rtime
- stime
;
645 * Make sure utime doesn't go backwards; this still preserves
646 * monotonicity for stime, analogous argument to above.
648 if (utime
< prev
->utime
) {
650 stime
= rtime
- utime
;
658 raw_spin_unlock_irqrestore(&prev
->lock
, flags
);
661 void task_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
)
663 struct task_cputime cputime
= {
664 .sum_exec_runtime
= p
->se
.sum_exec_runtime
,
667 task_cputime(p
, &cputime
.utime
, &cputime
.stime
);
668 cputime_adjust(&cputime
, &p
->prev_cputime
, ut
, st
);
670 EXPORT_SYMBOL_GPL(task_cputime_adjusted
);
672 void thread_group_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
)
674 struct task_cputime cputime
;
676 thread_group_cputime(p
, &cputime
);
677 cputime_adjust(&cputime
, &p
->signal
->prev_cputime
, ut
, st
);
679 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
681 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
682 static u64
vtime_delta(struct vtime
*vtime
)
684 unsigned long long clock
;
686 clock
= sched_clock_cpu(smp_processor_id());
687 if (clock
< vtime
->starttime
)
690 return clock
- vtime
->starttime
;
693 static u64
get_vtime_delta(struct vtime
*vtime
)
695 u64 delta
= vtime_delta(vtime
);
699 * Unlike tick based timing, vtime based timing never has lost
700 * ticks, and no need for steal time accounting to make up for
701 * lost ticks. Vtime accounts a rounded version of actual
702 * elapsed time. Limit account_other_time to prevent rounding
703 * errors from causing elapsed vtime to go negative.
705 other
= account_other_time(delta
);
706 WARN_ON_ONCE(vtime
->state
== VTIME_INACTIVE
);
707 vtime
->starttime
+= delta
;
709 return delta
- other
;
712 static void __vtime_account_system(struct task_struct
*tsk
,
715 vtime
->stime
+= get_vtime_delta(vtime
);
716 if (vtime
->stime
>= TICK_NSEC
) {
717 account_system_time(tsk
, irq_count(), vtime
->stime
);
722 static void vtime_account_guest(struct task_struct
*tsk
,
725 vtime
->gtime
+= get_vtime_delta(vtime
);
726 if (vtime
->gtime
>= TICK_NSEC
) {
727 account_guest_time(tsk
, vtime
->gtime
);
732 void vtime_account_system(struct task_struct
*tsk
)
734 struct vtime
*vtime
= &tsk
->vtime
;
736 if (!vtime_delta(vtime
))
739 write_seqcount_begin(&vtime
->seqcount
);
740 /* We might have scheduled out from guest path */
741 if (current
->flags
& PF_VCPU
)
742 vtime_account_guest(tsk
, vtime
);
744 __vtime_account_system(tsk
, vtime
);
745 write_seqcount_end(&vtime
->seqcount
);
748 void vtime_user_enter(struct task_struct
*tsk
)
750 struct vtime
*vtime
= &tsk
->vtime
;
752 write_seqcount_begin(&vtime
->seqcount
);
753 __vtime_account_system(tsk
, vtime
);
754 vtime
->state
= VTIME_USER
;
755 write_seqcount_end(&vtime
->seqcount
);
758 void vtime_user_exit(struct task_struct
*tsk
)
760 struct vtime
*vtime
= &tsk
->vtime
;
762 write_seqcount_begin(&vtime
->seqcount
);
763 vtime
->utime
+= get_vtime_delta(vtime
);
764 if (vtime
->utime
>= TICK_NSEC
) {
765 account_user_time(tsk
, vtime
->utime
);
768 vtime
->state
= VTIME_SYS
;
769 write_seqcount_end(&vtime
->seqcount
);
772 void vtime_guest_enter(struct task_struct
*tsk
)
774 struct vtime
*vtime
= &tsk
->vtime
;
776 * The flags must be updated under the lock with
777 * the vtime_starttime flush and update.
778 * That enforces a right ordering and update sequence
779 * synchronization against the reader (task_gtime())
780 * that can thus safely catch up with a tickless delta.
782 write_seqcount_begin(&vtime
->seqcount
);
783 __vtime_account_system(tsk
, vtime
);
784 current
->flags
|= PF_VCPU
;
785 write_seqcount_end(&vtime
->seqcount
);
787 EXPORT_SYMBOL_GPL(vtime_guest_enter
);
789 void vtime_guest_exit(struct task_struct
*tsk
)
791 struct vtime
*vtime
= &tsk
->vtime
;
793 write_seqcount_begin(&vtime
->seqcount
);
794 vtime_account_guest(tsk
, vtime
);
795 current
->flags
&= ~PF_VCPU
;
796 write_seqcount_end(&vtime
->seqcount
);
798 EXPORT_SYMBOL_GPL(vtime_guest_exit
);
800 void vtime_account_idle(struct task_struct
*tsk
)
802 account_idle_time(get_vtime_delta(&tsk
->vtime
));
805 void arch_vtime_task_switch(struct task_struct
*prev
)
807 struct vtime
*vtime
= &prev
->vtime
;
809 write_seqcount_begin(&vtime
->seqcount
);
810 vtime
->state
= VTIME_INACTIVE
;
811 write_seqcount_end(&vtime
->seqcount
);
813 vtime
= ¤t
->vtime
;
815 write_seqcount_begin(&vtime
->seqcount
);
816 vtime
->state
= VTIME_SYS
;
817 vtime
->starttime
= sched_clock_cpu(smp_processor_id());
818 write_seqcount_end(&vtime
->seqcount
);
821 void vtime_init_idle(struct task_struct
*t
, int cpu
)
823 struct vtime
*vtime
= &t
->vtime
;
826 local_irq_save(flags
);
827 write_seqcount_begin(&vtime
->seqcount
);
828 vtime
->state
= VTIME_SYS
;
829 vtime
->starttime
= sched_clock_cpu(cpu
);
830 write_seqcount_end(&vtime
->seqcount
);
831 local_irq_restore(flags
);
834 u64
task_gtime(struct task_struct
*t
)
836 struct vtime
*vtime
= &t
->vtime
;
840 if (!vtime_accounting_enabled())
844 seq
= read_seqcount_begin(&vtime
->seqcount
);
847 if (vtime
->state
== VTIME_SYS
&& t
->flags
& PF_VCPU
)
848 gtime
+= vtime
->gtime
+ vtime_delta(vtime
);
850 } while (read_seqcount_retry(&vtime
->seqcount
, seq
));
856 * Fetch cputime raw values from fields of task_struct and
857 * add up the pending nohz execution time since the last
860 void task_cputime(struct task_struct
*t
, u64
*utime
, u64
*stime
)
862 struct vtime
*vtime
= &t
->vtime
;
866 if (!vtime_accounting_enabled()) {
873 seq
= read_seqcount_begin(&vtime
->seqcount
);
878 /* Task is sleeping, nothing to add */
879 if (vtime
->state
== VTIME_INACTIVE
|| is_idle_task(t
))
882 delta
= vtime_delta(vtime
);
885 * Task runs either in user or kernel space, add pending nohz time to
888 if (vtime
->state
== VTIME_USER
|| t
->flags
& PF_VCPU
)
889 *utime
+= vtime
->utime
+ delta
;
890 else if (vtime
->state
== VTIME_SYS
)
891 *stime
+= vtime
->stime
+ delta
;
892 } while (read_seqcount_retry(&vtime
->seqcount
, seq
));
894 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */