]>
Commit | Line | Data |
---|---|---|
425e0968 IM |
1 | |
2 | #ifdef CONFIG_SCHEDSTATS | |
b5aadf7f | 3 | |
425e0968 IM |
4 | /* |
5 | * Expects runqueue lock to be held for atomicity of update | |
6 | */ | |
7 | static inline void | |
8 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
9 | { | |
10 | if (rq) { | |
11 | rq->rq_sched_info.run_delay += delta; | |
2d72376b | 12 | rq->rq_sched_info.pcount++; |
425e0968 IM |
13 | } |
14 | } | |
15 | ||
16 | /* | |
17 | * Expects runqueue lock to be held for atomicity of update | |
18 | */ | |
19 | static inline void | |
20 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
21 | { | |
22 | if (rq) | |
9c2c4802 | 23 | rq->rq_cpu_time += delta; |
425e0968 | 24 | } |
46ac22ba AG |
25 | |
26 | static inline void | |
27 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |
28 | { | |
29 | if (rq) | |
30 | rq->rq_sched_info.run_delay += delta; | |
31 | } | |
ae92882e JP |
32 | #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) |
33 | #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) | |
34 | #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) | |
35 | #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) | |
20e1d486 JP |
36 | #define schedstat_val(var) (var) |
37 | #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) | |
9c572591 | 38 | |
425e0968 IM |
39 | #else /* !CONFIG_SCHEDSTATS */ |
40 | static inline void | |
41 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
42 | {} | |
43 | static inline void | |
46ac22ba AG |
44 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
45 | {} | |
46 | static inline void | |
425e0968 IM |
47 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
48 | {} | |
ae92882e JP |
49 | #define schedstat_enabled() 0 |
50 | #define schedstat_inc(var) do { } while (0) | |
51 | #define schedstat_add(var, amt) do { } while (0) | |
52 | #define schedstat_set(var, val) do { } while (0) | |
53 | #define schedstat_val(var) 0 | |
20e1d486 | 54 | #define schedstat_val_or_zero(var) 0 |
ae92882e | 55 | #endif /* CONFIG_SCHEDSTATS */ |
425e0968 | 56 | |
f6db8347 | 57 | #ifdef CONFIG_SCHED_INFO |
46ac22ba AG |
58 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
59 | { | |
60 | t->sched_info.last_queued = 0; | |
61 | } | |
62 | ||
425e0968 | 63 | /* |
d4a6f3c3 | 64 | * We are interested in knowing how long it was from the *first* time a |
46ac22ba AG |
65 | * task was queued to the time that it finally hit a cpu, we call this routine |
66 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | |
67 | * delta taken on each cpu would annul the skew. | |
425e0968 | 68 | */ |
43148951 | 69 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
425e0968 | 70 | { |
43148951 | 71 | unsigned long long now = rq_clock(rq), delta = 0; |
46ac22ba AG |
72 | |
73 | if (unlikely(sched_info_on())) | |
74 | if (t->sched_info.last_queued) | |
75 | delta = now - t->sched_info.last_queued; | |
76 | sched_info_reset_dequeued(t); | |
77 | t->sched_info.run_delay += delta; | |
78 | ||
43148951 | 79 | rq_sched_info_dequeued(rq, delta); |
425e0968 IM |
80 | } |
81 | ||
82 | /* | |
83 | * Called when a task finally hits the cpu. We can now calculate how | |
84 | * long it was waiting to run. We also note when it began so that we | |
85 | * can keep stats on how long its timeslice is. | |
86 | */ | |
43148951 | 87 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
425e0968 | 88 | { |
43148951 | 89 | unsigned long long now = rq_clock(rq), delta = 0; |
425e0968 IM |
90 | |
91 | if (t->sched_info.last_queued) | |
92 | delta = now - t->sched_info.last_queued; | |
46ac22ba | 93 | sched_info_reset_dequeued(t); |
425e0968 IM |
94 | t->sched_info.run_delay += delta; |
95 | t->sched_info.last_arrival = now; | |
2d72376b | 96 | t->sched_info.pcount++; |
425e0968 | 97 | |
43148951 | 98 | rq_sched_info_arrive(rq, delta); |
425e0968 IM |
99 | } |
100 | ||
101 | /* | |
425e0968 IM |
102 | * This function is only called from enqueue_task(), but also only updates |
103 | * the timestamp if it is already not set. It's assumed that | |
104 | * sched_info_dequeued() will clear that stamp when appropriate. | |
105 | */ | |
43148951 | 106 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
425e0968 IM |
107 | { |
108 | if (unlikely(sched_info_on())) | |
109 | if (!t->sched_info.last_queued) | |
43148951 | 110 | t->sched_info.last_queued = rq_clock(rq); |
425e0968 IM |
111 | } |
112 | ||
113 | /* | |
13b62e46 MT |
114 | * Called when a process ceases being the active-running process involuntarily |
115 | * due, typically, to expiring its time slice (this may also be called when | |
116 | * switching to the idle task). Now we can calculate how long we ran. | |
d4abc238 BR |
117 | * Also, if the process is still in the TASK_RUNNING state, call |
118 | * sched_info_queued() to mark that it has now again started waiting on | |
119 | * the runqueue. | |
425e0968 | 120 | */ |
43148951 | 121 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
425e0968 | 122 | { |
43148951 | 123 | unsigned long long delta = rq_clock(rq) - |
9a41785c | 124 | t->sched_info.last_arrival; |
425e0968 | 125 | |
43148951 | 126 | rq_sched_info_depart(rq, delta); |
d4abc238 BR |
127 | |
128 | if (t->state == TASK_RUNNING) | |
43148951 | 129 | sched_info_queued(rq, t); |
425e0968 IM |
130 | } |
131 | ||
132 | /* | |
133 | * Called when tasks are switched involuntarily due, typically, to expiring | |
134 | * their time slice. (This may also be called when switching to or from | |
135 | * the idle task.) We are only called when prev != next. | |
136 | */ | |
137 | static inline void | |
43148951 MT |
138 | __sched_info_switch(struct rq *rq, |
139 | struct task_struct *prev, struct task_struct *next) | |
425e0968 | 140 | { |
425e0968 IM |
141 | /* |
142 | * prev now departs the cpu. It's not interesting to record | |
143 | * stats about how efficient we were at scheduling the idle | |
144 | * process, however. | |
145 | */ | |
146 | if (prev != rq->idle) | |
43148951 | 147 | sched_info_depart(rq, prev); |
425e0968 IM |
148 | |
149 | if (next != rq->idle) | |
43148951 | 150 | sched_info_arrive(rq, next); |
425e0968 IM |
151 | } |
152 | static inline void | |
43148951 MT |
153 | sched_info_switch(struct rq *rq, |
154 | struct task_struct *prev, struct task_struct *next) | |
425e0968 IM |
155 | { |
156 | if (unlikely(sched_info_on())) | |
43148951 | 157 | __sched_info_switch(rq, prev, next); |
425e0968 IM |
158 | } |
159 | #else | |
43148951 | 160 | #define sched_info_queued(rq, t) do { } while (0) |
46ac22ba | 161 | #define sched_info_reset_dequeued(t) do { } while (0) |
43148951 MT |
162 | #define sched_info_dequeued(rq, t) do { } while (0) |
163 | #define sched_info_depart(rq, t) do { } while (0) | |
164 | #define sched_info_arrive(rq, next) do { } while (0) | |
165 | #define sched_info_switch(rq, t, next) do { } while (0) | |
f6db8347 | 166 | #endif /* CONFIG_SCHED_INFO */ |