]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched/stats.h
sched: Introduce task_is_running()
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / stats.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
425e0968
IM
2
3#ifdef CONFIG_SCHEDSTATS
b5aadf7f 4
425e0968
IM
5/*
6 * Expects runqueue lock to be held for atomicity of update
7 */
8static inline void
9rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
10{
11 if (rq) {
12 rq->rq_sched_info.run_delay += delta;
2d72376b 13 rq->rq_sched_info.pcount++;
425e0968
IM
14 }
15}
16
17/*
18 * Expects runqueue lock to be held for atomicity of update
19 */
20static inline void
21rq_sched_info_depart(struct rq *rq, unsigned long long delta)
22{
23 if (rq)
9c2c4802 24 rq->rq_cpu_time += delta;
425e0968 25}
46ac22ba
AG
26
27static inline void
4e29fb70 28rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
46ac22ba
AG
29{
30 if (rq)
31 rq->rq_sched_info.run_delay += delta;
32}
97fb7a0a 33#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
b85c8b71 34#define __schedstat_inc(var) do { var++; } while (0)
97fb7a0a 35#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
2ed41a55 36#define __schedstat_add(var, amt) do { var += (amt); } while (0)
97fb7a0a
IM
37#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
38#define __schedstat_set(var, val) do { var = (val); } while (0)
39#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
40#define schedstat_val(var) (var)
41#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
42
43#else /* !CONFIG_SCHEDSTATS: */
44static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
4e29fb70 45static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
97fb7a0a
IM
46static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
47# define schedstat_enabled() 0
48# define __schedstat_inc(var) do { } while (0)
49# define schedstat_inc(var) do { } while (0)
50# define __schedstat_add(var, amt) do { } while (0)
51# define schedstat_add(var, amt) do { } while (0)
52# define __schedstat_set(var, val) do { } while (0)
53# define schedstat_set(var, val) do { } while (0)
54# define schedstat_val(var) 0
55# define schedstat_val_or_zero(var) 0
ae92882e 56#endif /* CONFIG_SCHEDSTATS */
425e0968 57
eb414681
JW
58#ifdef CONFIG_PSI
59/*
60 * PSI tracks state that persists across sleeps, such as iowaits and
61 * memory stalls. As a result, it has to distinguish between sleeps,
62 * where a task's runnable state changes, and requeues, where a task
63 * and its state are being moved between CPUs and runqueues.
64 */
65static inline void psi_enqueue(struct task_struct *p, bool wakeup)
66{
67 int clear = 0, set = TSK_RUNNING;
68
e0c27447 69 if (static_branch_likely(&psi_disabled))
eb414681
JW
70 return;
71
72 if (!wakeup || p->sched_psi_wake_requeue) {
1066d1b6 73 if (p->in_memstall)
eb414681
JW
74 set |= TSK_MEMSTALL;
75 if (p->sched_psi_wake_requeue)
76 p->sched_psi_wake_requeue = 0;
77 } else {
78 if (p->in_iowait)
79 clear |= TSK_IOWAIT;
80 }
81
82 psi_task_change(p, clear, set);
83}
84
85static inline void psi_dequeue(struct task_struct *p, bool sleep)
86{
4117cebf 87 int clear = TSK_RUNNING;
eb414681 88
e0c27447 89 if (static_branch_likely(&psi_disabled))
eb414681
JW
90 return;
91
4117cebf
CZ
92 /*
93 * A voluntary sleep is a dequeue followed by a task switch. To
94 * avoid walking all ancestors twice, psi_task_switch() handles
95 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
96 * Do nothing here.
97 */
98 if (sleep)
99 return;
b05e75d6 100
4117cebf
CZ
101 if (p->in_memstall)
102 clear |= TSK_MEMSTALL;
eb414681 103
4117cebf 104 psi_task_change(p, clear, 0);
eb414681
JW
105}
106
107static inline void psi_ttwu_dequeue(struct task_struct *p)
108{
e0c27447 109 if (static_branch_likely(&psi_disabled))
eb414681
JW
110 return;
111 /*
112 * Is the task being migrated during a wakeup? Make sure to
113 * deregister its sleep-persistent psi states from the old
114 * queue, and let psi_enqueue() know it has to requeue.
115 */
1066d1b6 116 if (unlikely(p->in_iowait || p->in_memstall)) {
eb414681
JW
117 struct rq_flags rf;
118 struct rq *rq;
119 int clear = 0;
120
121 if (p->in_iowait)
122 clear |= TSK_IOWAIT;
1066d1b6 123 if (p->in_memstall)
eb414681
JW
124 clear |= TSK_MEMSTALL;
125
126 rq = __task_rq_lock(p, &rf);
127 psi_task_change(p, clear, 0);
128 p->sched_psi_wake_requeue = 1;
129 __task_rq_unlock(rq, &rf);
130 }
131}
132
b05e75d6
JW
133static inline void psi_sched_switch(struct task_struct *prev,
134 struct task_struct *next,
135 bool sleep)
136{
137 if (static_branch_likely(&psi_disabled))
138 return;
139
36b238d5 140 psi_task_switch(prev, next, sleep);
b05e75d6
JW
141}
142
eb414681
JW
143#else /* CONFIG_PSI */
144static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
145static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
146static inline void psi_ttwu_dequeue(struct task_struct *p) {}
b05e75d6
JW
147static inline void psi_sched_switch(struct task_struct *prev,
148 struct task_struct *next,
149 bool sleep) {}
eb414681
JW
150#endif /* CONFIG_PSI */
151
f6db8347 152#ifdef CONFIG_SCHED_INFO
425e0968 153/*
d4a6f3c3 154 * We are interested in knowing how long it was from the *first* time a
97fb7a0a
IM
155 * task was queued to the time that it finally hit a CPU, we call this routine
156 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
157 * delta taken on each CPU would annul the skew.
425e0968 158 */
4e29fb70 159static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
425e0968 160{
c5895d3f 161 unsigned long long delta = 0;
46ac22ba 162
90a0ff4e
PZ
163 if (!t->sched_info.last_queued)
164 return;
165
166 delta = rq_clock(rq) - t->sched_info.last_queued;
167 t->sched_info.last_queued = 0;
46ac22ba
AG
168 t->sched_info.run_delay += delta;
169
4e29fb70 170 rq_sched_info_dequeue(rq, delta);
425e0968
IM
171}
172
173/*
97fb7a0a 174 * Called when a task finally hits the CPU. We can now calculate how
425e0968
IM
175 * long it was waiting to run. We also note when it began so that we
176 * can keep stats on how long its timeslice is.
177 */
43148951 178static void sched_info_arrive(struct rq *rq, struct task_struct *t)
425e0968 179{
90a0ff4e 180 unsigned long long now, delta = 0;
425e0968 181
90a0ff4e
PZ
182 if (!t->sched_info.last_queued)
183 return;
184
185 now = rq_clock(rq);
186 delta = now - t->sched_info.last_queued;
187 t->sched_info.last_queued = 0;
425e0968
IM
188 t->sched_info.run_delay += delta;
189 t->sched_info.last_arrival = now;
2d72376b 190 t->sched_info.pcount++;
425e0968 191
43148951 192 rq_sched_info_arrive(rq, delta);
425e0968
IM
193}
194
195/*
425e0968
IM
196 * This function is only called from enqueue_task(), but also only updates
197 * the timestamp if it is already not set. It's assumed that
4e29fb70 198 * sched_info_dequeue() will clear that stamp when appropriate.
425e0968 199 */
4e29fb70 200static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
425e0968 201{
c5895d3f
PZ
202 if (!t->sched_info.last_queued)
203 t->sched_info.last_queued = rq_clock(rq);
425e0968
IM
204}
205
206/*
13b62e46
MT
207 * Called when a process ceases being the active-running process involuntarily
208 * due, typically, to expiring its time slice (this may also be called when
209 * switching to the idle task). Now we can calculate how long we ran.
d4abc238 210 * Also, if the process is still in the TASK_RUNNING state, call
4e29fb70 211 * sched_info_enqueue() to mark that it has now again started waiting on
d4abc238 212 * the runqueue.
425e0968 213 */
43148951 214static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
425e0968 215{
97fb7a0a 216 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
425e0968 217
43148951 218 rq_sched_info_depart(rq, delta);
d4abc238 219
b03fbd4f 220 if (task_is_running(t))
4e29fb70 221 sched_info_enqueue(rq, t);
425e0968
IM
222}
223
224/*
225 * Called when tasks are switched involuntarily due, typically, to expiring
226 * their time slice. (This may also be called when switching to or from
227 * the idle task.) We are only called when prev != next.
228 */
229static inline void
c5895d3f 230sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
425e0968 231{
425e0968 232 /*
97fb7a0a 233 * prev now departs the CPU. It's not interesting to record
425e0968
IM
234 * stats about how efficient we were at scheduling the idle
235 * process, however.
236 */
237 if (prev != rq->idle)
43148951 238 sched_info_depart(rq, prev);
425e0968
IM
239
240 if (next != rq->idle)
43148951 241 sched_info_arrive(rq, next);
425e0968 242}
97fb7a0a 243
97fb7a0a 244#else /* !CONFIG_SCHED_INFO: */
4e29fb70 245# define sched_info_enqueue(rq, t) do { } while (0)
4e29fb70 246# define sched_info_dequeue(rq, t) do { } while (0)
97fb7a0a 247# define sched_info_switch(rq, t, next) do { } while (0)
f6db8347 248#endif /* CONFIG_SCHED_INFO */