]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/sched/stats.h
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / kernel / sched / stats.h
CommitLineData
425e0968
IM
1
2#ifdef CONFIG_SCHEDSTATS
b5aadf7f 3
425e0968
IM
4/*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7static inline void
8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9{
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
2d72376b 12 rq->rq_sched_info.pcount++;
425e0968
IM
13 }
14}
15
16/*
17 * Expects runqueue lock to be held for atomicity of update
18 */
19static inline void
20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21{
22 if (rq)
9c2c4802 23 rq->rq_cpu_time += delta;
425e0968 24}
46ac22ba
AG
25
26static inline void
27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28{
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
31}
425e0968
IM
32# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
33# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
c3c70119 34# define schedstat_set(var, val) do { var = (val); } while (0)
425e0968
IM
35#else /* !CONFIG_SCHEDSTATS */
36static inline void
37rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
38{}
39static inline void
46ac22ba
AG
40rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
41{}
42static inline void
425e0968
IM
43rq_sched_info_depart(struct rq *rq, unsigned long long delta)
44{}
45# define schedstat_inc(rq, field) do { } while (0)
46# define schedstat_add(rq, field, amt) do { } while (0)
c3c70119 47# define schedstat_set(var, val) do { } while (0)
425e0968
IM
48#endif
49
f6db8347 50#ifdef CONFIG_SCHED_INFO
46ac22ba
AG
51static inline void sched_info_reset_dequeued(struct task_struct *t)
52{
53 t->sched_info.last_queued = 0;
54}
55
425e0968 56/*
d4a6f3c3 57 * We are interested in knowing how long it was from the *first* time a
46ac22ba
AG
58 * task was queued to the time that it finally hit a cpu, we call this routine
59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 * delta taken on each cpu would annul the skew.
425e0968 61 */
43148951 62static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
425e0968 63{
43148951 64 unsigned long long now = rq_clock(rq), delta = 0;
46ac22ba
AG
65
66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued)
68 delta = now - t->sched_info.last_queued;
69 sched_info_reset_dequeued(t);
70 t->sched_info.run_delay += delta;
71
43148951 72 rq_sched_info_dequeued(rq, delta);
425e0968
IM
73}
74
75/*
76 * Called when a task finally hits the cpu. We can now calculate how
77 * long it was waiting to run. We also note when it began so that we
78 * can keep stats on how long its timeslice is.
79 */
43148951 80static void sched_info_arrive(struct rq *rq, struct task_struct *t)
425e0968 81{
43148951 82 unsigned long long now = rq_clock(rq), delta = 0;
425e0968
IM
83
84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued;
46ac22ba 86 sched_info_reset_dequeued(t);
425e0968
IM
87 t->sched_info.run_delay += delta;
88 t->sched_info.last_arrival = now;
2d72376b 89 t->sched_info.pcount++;
425e0968 90
43148951 91 rq_sched_info_arrive(rq, delta);
425e0968
IM
92}
93
94/*
425e0968
IM
95 * This function is only called from enqueue_task(), but also only updates
96 * the timestamp if it is already not set. It's assumed that
97 * sched_info_dequeued() will clear that stamp when appropriate.
98 */
43148951 99static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
425e0968
IM
100{
101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued)
43148951 103 t->sched_info.last_queued = rq_clock(rq);
425e0968
IM
104}
105
106/*
13b62e46
MT
107 * Called when a process ceases being the active-running process involuntarily
108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task). Now we can calculate how long we ran.
d4abc238
BR
110 * Also, if the process is still in the TASK_RUNNING state, call
111 * sched_info_queued() to mark that it has now again started waiting on
112 * the runqueue.
425e0968 113 */
43148951 114static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
425e0968 115{
43148951 116 unsigned long long delta = rq_clock(rq) -
9a41785c 117 t->sched_info.last_arrival;
425e0968 118
43148951 119 rq_sched_info_depart(rq, delta);
d4abc238
BR
120
121 if (t->state == TASK_RUNNING)
43148951 122 sched_info_queued(rq, t);
425e0968
IM
123}
124
125/*
126 * Called when tasks are switched involuntarily due, typically, to expiring
127 * their time slice. (This may also be called when switching to or from
128 * the idle task.) We are only called when prev != next.
129 */
130static inline void
43148951
MT
131__sched_info_switch(struct rq *rq,
132 struct task_struct *prev, struct task_struct *next)
425e0968 133{
425e0968
IM
134 /*
135 * prev now departs the cpu. It's not interesting to record
136 * stats about how efficient we were at scheduling the idle
137 * process, however.
138 */
139 if (prev != rq->idle)
43148951 140 sched_info_depart(rq, prev);
425e0968
IM
141
142 if (next != rq->idle)
43148951 143 sched_info_arrive(rq, next);
425e0968
IM
144}
145static inline void
43148951
MT
146sched_info_switch(struct rq *rq,
147 struct task_struct *prev, struct task_struct *next)
425e0968
IM
148{
149 if (unlikely(sched_info_on()))
43148951 150 __sched_info_switch(rq, prev, next);
425e0968
IM
151}
152#else
43148951 153#define sched_info_queued(rq, t) do { } while (0)
46ac22ba 154#define sched_info_reset_dequeued(t) do { } while (0)
43148951
MT
155#define sched_info_dequeued(rq, t) do { } while (0)
156#define sched_info_depart(rq, t) do { } while (0)
157#define sched_info_arrive(rq, next) do { } while (0)
158#define sched_info_switch(rq, t, next) do { } while (0)
f6db8347 159#endif /* CONFIG_SCHED_INFO */
425e0968 160
bb34d92f
FM
161/*
162 * The following are functions that support scheduler-internal time accounting.
163 * These functions are generally called at the timer tick. None of this depends
164 * on CONFIG_SCHEDSTATS.
165 */
166
fa18f7bd
KM
167/**
168 * cputimer_running - return true if cputimer is running
169 *
170 * @tsk: Pointer to target task.
171 */
172static inline bool cputimer_running(struct task_struct *tsk)
173
174{
175 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
176
1018016c
JL
177 /* Check if cputimer isn't running. This is accessed without locking. */
178 if (!READ_ONCE(cputimer->running))
fa18f7bd
KM
179 return false;
180
181 /*
182 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
183 * in __exit_signal(), we won't account to the signal struct further
184 * cputime consumed by that task, even though the task can still be
185 * ticking after __exit_signal().
186 *
187 * In order to keep a consistent behaviour between thread group cputime
188 * and thread group cputimer accounting, lets also ignore the cputime
189 * elapsing after __exit_signal() in any thread group timer running.
190 *
191 * This makes sure that POSIX CPU clocks and timers are synchronized, so
192 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
193 * clock delta is behind the expiring timer value.
194 */
195 if (unlikely(!tsk->sighand))
196 return false;
197
198 return true;
199}
200
bb34d92f 201/**
7086efe1 202 * account_group_user_time - Maintain utime for a thread group.
bb34d92f 203 *
7086efe1
FM
204 * @tsk: Pointer to task structure.
205 * @cputime: Time value by which to increment the utime field of the
206 * thread_group_cputime structure.
bb34d92f
FM
207 *
208 * If thread group time is being maintained, get the structure for the
209 * running CPU and update the utime field there.
210 */
7086efe1
FM
211static inline void account_group_user_time(struct task_struct *tsk,
212 cputime_t cputime)
bb34d92f 213{
48286d50 214 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
bb34d92f 215
fa18f7bd 216 if (!cputimer_running(tsk))
4cd4c1b4
PZ
217 return;
218
71107445 219 atomic64_add(cputime, &cputimer->cputime_atomic.utime);
bb34d92f
FM
220}
221
222/**
7086efe1 223 * account_group_system_time - Maintain stime for a thread group.
bb34d92f 224 *
7086efe1
FM
225 * @tsk: Pointer to task structure.
226 * @cputime: Time value by which to increment the stime field of the
227 * thread_group_cputime structure.
bb34d92f
FM
228 *
229 * If thread group time is being maintained, get the structure for the
230 * running CPU and update the stime field there.
231 */
7086efe1
FM
232static inline void account_group_system_time(struct task_struct *tsk,
233 cputime_t cputime)
bb34d92f 234{
48286d50 235 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b4 236
fa18f7bd 237 if (!cputimer_running(tsk))
4cd4c1b4 238 return;
bb34d92f 239
71107445 240 atomic64_add(cputime, &cputimer->cputime_atomic.stime);
bb34d92f
FM
241}
242
243/**
7086efe1 244 * account_group_exec_runtime - Maintain exec runtime for a thread group.
bb34d92f 245 *
7086efe1 246 * @tsk: Pointer to task structure.
bb34d92f 247 * @ns: Time value by which to increment the sum_exec_runtime field
7086efe1 248 * of the thread_group_cputime structure.
bb34d92f
FM
249 *
250 * If thread group time is being maintained, get the structure for the
251 * running CPU and update the sum_exec_runtime field there.
252 */
7086efe1
FM
253static inline void account_group_exec_runtime(struct task_struct *tsk,
254 unsigned long long ns)
bb34d92f 255{
48286d50 256 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
4cd4c1b4 257
fa18f7bd 258 if (!cputimer_running(tsk))
4cd4c1b4 259 return;
bb34d92f 260
71107445 261 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
bb34d92f 262}