]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
425e0968 IM |
2 | |
3 | #ifdef CONFIG_SCHEDSTATS | |
b5aadf7f | 4 | |
425e0968 IM |
5 | /* |
6 | * Expects runqueue lock to be held for atomicity of update | |
7 | */ | |
8 | static inline void | |
9 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
10 | { | |
11 | if (rq) { | |
12 | rq->rq_sched_info.run_delay += delta; | |
2d72376b | 13 | rq->rq_sched_info.pcount++; |
425e0968 IM |
14 | } |
15 | } | |
16 | ||
17 | /* | |
18 | * Expects runqueue lock to be held for atomicity of update | |
19 | */ | |
20 | static inline void | |
21 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
22 | { | |
23 | if (rq) | |
9c2c4802 | 24 | rq->rq_cpu_time += delta; |
425e0968 | 25 | } |
46ac22ba AG |
26 | |
27 | static inline void | |
28 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |
29 | { | |
30 | if (rq) | |
31 | rq->rq_sched_info.run_delay += delta; | |
32 | } | |
ae92882e JP |
33 | #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) |
34 | #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) | |
35 | #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) | |
36 | #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) | |
20e1d486 JP |
37 | #define schedstat_val(var) (var) |
38 | #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) | |
9c572591 | 39 | |
425e0968 IM |
40 | #else /* !CONFIG_SCHEDSTATS */ |
41 | static inline void | |
42 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
43 | {} | |
44 | static inline void | |
46ac22ba AG |
45 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
46 | {} | |
47 | static inline void | |
425e0968 IM |
48 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
49 | {} | |
ae92882e JP |
50 | #define schedstat_enabled() 0 |
51 | #define schedstat_inc(var) do { } while (0) | |
52 | #define schedstat_add(var, amt) do { } while (0) | |
53 | #define schedstat_set(var, val) do { } while (0) | |
54 | #define schedstat_val(var) 0 | |
20e1d486 | 55 | #define schedstat_val_or_zero(var) 0 |
ae92882e | 56 | #endif /* CONFIG_SCHEDSTATS */ |
425e0968 | 57 | |
f6db8347 | 58 | #ifdef CONFIG_SCHED_INFO |
46ac22ba AG |
59 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
60 | { | |
61 | t->sched_info.last_queued = 0; | |
62 | } | |
63 | ||
425e0968 | 64 | /* |
d4a6f3c3 | 65 | * We are interested in knowing how long it was from the *first* time a |
46ac22ba AG |
66 | * task was queued to the time that it finally hit a cpu, we call this routine |
67 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | |
68 | * delta taken on each cpu would annul the skew. | |
425e0968 | 69 | */ |
43148951 | 70 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
425e0968 | 71 | { |
43148951 | 72 | unsigned long long now = rq_clock(rq), delta = 0; |
46ac22ba AG |
73 | |
74 | if (unlikely(sched_info_on())) | |
75 | if (t->sched_info.last_queued) | |
76 | delta = now - t->sched_info.last_queued; | |
77 | sched_info_reset_dequeued(t); | |
78 | t->sched_info.run_delay += delta; | |
79 | ||
43148951 | 80 | rq_sched_info_dequeued(rq, delta); |
425e0968 IM |
81 | } |
82 | ||
83 | /* | |
84 | * Called when a task finally hits the cpu. We can now calculate how | |
85 | * long it was waiting to run. We also note when it began so that we | |
86 | * can keep stats on how long its timeslice is. | |
87 | */ | |
43148951 | 88 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
425e0968 | 89 | { |
43148951 | 90 | unsigned long long now = rq_clock(rq), delta = 0; |
425e0968 IM |
91 | |
92 | if (t->sched_info.last_queued) | |
93 | delta = now - t->sched_info.last_queued; | |
46ac22ba | 94 | sched_info_reset_dequeued(t); |
425e0968 IM |
95 | t->sched_info.run_delay += delta; |
96 | t->sched_info.last_arrival = now; | |
2d72376b | 97 | t->sched_info.pcount++; |
425e0968 | 98 | |
43148951 | 99 | rq_sched_info_arrive(rq, delta); |
425e0968 IM |
100 | } |
101 | ||
102 | /* | |
425e0968 IM |
103 | * This function is only called from enqueue_task(), but also only updates |
104 | * the timestamp if it is already not set. It's assumed that | |
105 | * sched_info_dequeued() will clear that stamp when appropriate. | |
106 | */ | |
43148951 | 107 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
425e0968 IM |
108 | { |
109 | if (unlikely(sched_info_on())) | |
110 | if (!t->sched_info.last_queued) | |
43148951 | 111 | t->sched_info.last_queued = rq_clock(rq); |
425e0968 IM |
112 | } |
113 | ||
114 | /* | |
13b62e46 MT |
115 | * Called when a process ceases being the active-running process involuntarily |
116 | * due, typically, to expiring its time slice (this may also be called when | |
117 | * switching to the idle task). Now we can calculate how long we ran. | |
d4abc238 BR |
118 | * Also, if the process is still in the TASK_RUNNING state, call |
119 | * sched_info_queued() to mark that it has now again started waiting on | |
120 | * the runqueue. | |
425e0968 | 121 | */ |
43148951 | 122 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
425e0968 | 123 | { |
43148951 | 124 | unsigned long long delta = rq_clock(rq) - |
9a41785c | 125 | t->sched_info.last_arrival; |
425e0968 | 126 | |
43148951 | 127 | rq_sched_info_depart(rq, delta); |
d4abc238 BR |
128 | |
129 | if (t->state == TASK_RUNNING) | |
43148951 | 130 | sched_info_queued(rq, t); |
425e0968 IM |
131 | } |
132 | ||
133 | /* | |
134 | * Called when tasks are switched involuntarily due, typically, to expiring | |
135 | * their time slice. (This may also be called when switching to or from | |
136 | * the idle task.) We are only called when prev != next. | |
137 | */ | |
138 | static inline void | |
43148951 MT |
139 | __sched_info_switch(struct rq *rq, |
140 | struct task_struct *prev, struct task_struct *next) | |
425e0968 | 141 | { |
425e0968 IM |
142 | /* |
143 | * prev now departs the cpu. It's not interesting to record | |
144 | * stats about how efficient we were at scheduling the idle | |
145 | * process, however. | |
146 | */ | |
147 | if (prev != rq->idle) | |
43148951 | 148 | sched_info_depart(rq, prev); |
425e0968 IM |
149 | |
150 | if (next != rq->idle) | |
43148951 | 151 | sched_info_arrive(rq, next); |
425e0968 IM |
152 | } |
153 | static inline void | |
43148951 MT |
154 | sched_info_switch(struct rq *rq, |
155 | struct task_struct *prev, struct task_struct *next) | |
425e0968 IM |
156 | { |
157 | if (unlikely(sched_info_on())) | |
43148951 | 158 | __sched_info_switch(rq, prev, next); |
425e0968 IM |
159 | } |
160 | #else | |
43148951 | 161 | #define sched_info_queued(rq, t) do { } while (0) |
46ac22ba | 162 | #define sched_info_reset_dequeued(t) do { } while (0) |
43148951 MT |
163 | #define sched_info_dequeued(rq, t) do { } while (0) |
164 | #define sched_info_depart(rq, t) do { } while (0) | |
165 | #define sched_info_arrive(rq, next) do { } while (0) | |
166 | #define sched_info_switch(rq, t, next) do { } while (0) | |
f6db8347 | 167 | #endif /* CONFIG_SCHED_INFO */ |