]>
Commit | Line | Data |
---|---|---|
eb414681 JW |
1 | /* |
2 | * Pressure stall information for CPU, memory and IO | |
3 | * | |
4 | * Copyright (c) 2018 Facebook, Inc. | |
5 | * Author: Johannes Weiner <hannes@cmpxchg.org> | |
6 | * | |
0e94682b SB |
7 | * Polling support by Suren Baghdasaryan <surenb@google.com> |
8 | * Copyright (c) 2018 Google, Inc. | |
9 | * | |
eb414681 JW |
10 | * When CPU, memory and IO are contended, tasks experience delays that |
11 | * reduce throughput and introduce latencies into the workload. Memory | |
12 | * and IO contention, in addition, can cause a full loss of forward | |
13 | * progress in which the CPU goes idle. | |
14 | * | |
15 | * This code aggregates individual task delays into resource pressure | |
16 | * metrics that indicate problems with both workload health and | |
17 | * resource utilization. | |
18 | * | |
19 | * Model | |
20 | * | |
21 | * The time in which a task can execute on a CPU is our baseline for | |
22 | * productivity. Pressure expresses the amount of time in which this | |
23 | * potential cannot be realized due to resource contention. | |
24 | * | |
25 | * This concept of productivity has two components: the workload and | |
26 | * the CPU. To measure the impact of pressure on both, we define two | |
27 | * contention states for a resource: SOME and FULL. | |
28 | * | |
29 | * In the SOME state of a given resource, one or more tasks are | |
30 | * delayed on that resource. This affects the workload's ability to | |
31 | * perform work, but the CPU may still be executing other tasks. | |
32 | * | |
33 | * In the FULL state of a given resource, all non-idle tasks are | |
34 | * delayed on that resource such that nobody is advancing and the CPU | |
35 | * goes idle. This leaves both workload and CPU unproductive. | |
36 | * | |
37 | * (Naturally, the FULL state doesn't exist for the CPU resource.) | |
38 | * | |
39 | * SOME = nr_delayed_tasks != 0 | |
40 | * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 | |
41 | * | |
42 | * The percentage of wallclock time spent in those compound stall | |
43 | * states gives pressure numbers between 0 and 100 for each resource, | |
44 | * where the SOME percentage indicates workload slowdowns and the FULL | |
45 | * percentage indicates reduced CPU utilization: | |
46 | * | |
47 | * %SOME = time(SOME) / period | |
48 | * %FULL = time(FULL) / period | |
49 | * | |
50 | * Multiple CPUs | |
51 | * | |
52 | * The more tasks and available CPUs there are, the more work can be | |
53 | * performed concurrently. This means that the potential that can go | |
54 | * unrealized due to resource contention *also* scales with non-idle | |
55 | * tasks and CPUs. | |
56 | * | |
57 | * Consider a scenario where 257 number crunching tasks are trying to | |
58 | * run concurrently on 256 CPUs. If we simply aggregated the task | |
59 | * states, we would have to conclude a CPU SOME pressure number of | |
60 | * 100%, since *somebody* is waiting on a runqueue at all | |
61 | * times. However, that is clearly not the amount of contention the | |
62 | * workload is experiencing: only one out of 256 possible exceution | |
63 | * threads will be contended at any given time, or about 0.4%. | |
64 | * | |
65 | * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any | |
66 | * given time *one* of the tasks is delayed due to a lack of memory. | |
67 | * Again, looking purely at the task state would yield a memory FULL | |
68 | * pressure number of 0%, since *somebody* is always making forward | |
69 | * progress. But again this wouldn't capture the amount of execution | |
70 | * potential lost, which is 1 out of 4 CPUs, or 25%. | |
71 | * | |
72 | * To calculate wasted potential (pressure) with multiple processors, | |
73 | * we have to base our calculation on the number of non-idle tasks in | |
74 | * conjunction with the number of available CPUs, which is the number | |
75 | * of potential execution threads. SOME becomes then the proportion of | |
76 | * delayed tasks to possibe threads, and FULL is the share of possible | |
77 | * threads that are unproductive due to delays: | |
78 | * | |
79 | * threads = min(nr_nonidle_tasks, nr_cpus) | |
80 | * SOME = min(nr_delayed_tasks / threads, 1) | |
81 | * FULL = (threads - min(nr_running_tasks, threads)) / threads | |
82 | * | |
83 | * For the 257 number crunchers on 256 CPUs, this yields: | |
84 | * | |
85 | * threads = min(257, 256) | |
86 | * SOME = min(1 / 256, 1) = 0.4% | |
87 | * FULL = (256 - min(257, 256)) / 256 = 0% | |
88 | * | |
89 | * For the 1 out of 4 memory-delayed tasks, this yields: | |
90 | * | |
91 | * threads = min(4, 4) | |
92 | * SOME = min(1 / 4, 1) = 25% | |
93 | * FULL = (4 - min(3, 4)) / 4 = 25% | |
94 | * | |
95 | * [ Substitute nr_cpus with 1, and you can see that it's a natural | |
96 | * extension of the single-CPU model. ] | |
97 | * | |
98 | * Implementation | |
99 | * | |
100 | * To assess the precise time spent in each such state, we would have | |
101 | * to freeze the system on task changes and start/stop the state | |
102 | * clocks accordingly. Obviously that doesn't scale in practice. | |
103 | * | |
104 | * Because the scheduler aims to distribute the compute load evenly | |
105 | * among the available CPUs, we can track task state locally to each | |
106 | * CPU and, at much lower frequency, extrapolate the global state for | |
107 | * the cumulative stall times and the running averages. | |
108 | * | |
109 | * For each runqueue, we track: | |
110 | * | |
111 | * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) | |
112 | * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) | |
113 | * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) | |
114 | * | |
115 | * and then periodically aggregate: | |
116 | * | |
117 | * tNONIDLE = sum(tNONIDLE[i]) | |
118 | * | |
119 | * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE | |
120 | * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE | |
121 | * | |
122 | * %SOME = tSOME / period | |
123 | * %FULL = tFULL / period | |
124 | * | |
125 | * This gives us an approximation of pressure that is practical | |
126 | * cost-wise, yet way more sensitive and accurate than periodic | |
127 | * sampling of the aggregate task states would be. | |
128 | */ | |
129 | ||
1b69ac6b | 130 | #include "../workqueue_internal.h" |
eb414681 JW |
131 | #include <linux/sched/loadavg.h> |
132 | #include <linux/seq_file.h> | |
133 | #include <linux/proc_fs.h> | |
134 | #include <linux/seqlock.h> | |
0e94682b | 135 | #include <linux/uaccess.h> |
eb414681 JW |
136 | #include <linux/cgroup.h> |
137 | #include <linux/module.h> | |
138 | #include <linux/sched.h> | |
0e94682b SB |
139 | #include <linux/ctype.h> |
140 | #include <linux/file.h> | |
141 | #include <linux/poll.h> | |
eb414681 JW |
142 | #include <linux/psi.h> |
143 | #include "sched.h" | |
144 | ||
145 | static int psi_bug __read_mostly; | |
146 | ||
e0c27447 JW |
147 | DEFINE_STATIC_KEY_FALSE(psi_disabled); |
148 | ||
149 | #ifdef CONFIG_PSI_DEFAULT_DISABLED | |
9289c5e6 | 150 | static bool psi_enable; |
e0c27447 | 151 | #else |
9289c5e6 | 152 | static bool psi_enable = true; |
e0c27447 JW |
153 | #endif |
154 | static int __init setup_psi(char *str) | |
155 | { | |
156 | return kstrtobool(str, &psi_enable) == 0; | |
157 | } | |
158 | __setup("psi=", setup_psi); | |
eb414681 JW |
159 | |
160 | /* Running averages - we need to be higher-res than loadavg */ | |
161 | #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ | |
162 | #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ | |
163 | #define EXP_60s 1981 /* 1/exp(2s/60s) */ | |
164 | #define EXP_300s 2034 /* 1/exp(2s/300s) */ | |
165 | ||
0e94682b SB |
166 | /* PSI trigger definitions */ |
167 | #define WINDOW_MIN_US 500000 /* Min window size is 500ms */ | |
168 | #define WINDOW_MAX_US 10000000 /* Max window size is 10s */ | |
169 | #define UPDATES_PER_WINDOW 10 /* 10 updates per window */ | |
170 | ||
eb414681 JW |
171 | /* Sampling frequency in nanoseconds */ |
172 | static u64 psi_period __read_mostly; | |
173 | ||
174 | /* System-level pressure and stall tracking */ | |
175 | static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); | |
df5ba5be | 176 | struct psi_group psi_system = { |
eb414681 JW |
177 | .pcpu = &system_group_pcpu, |
178 | }; | |
179 | ||
bcc78db6 | 180 | static void psi_avgs_work(struct work_struct *work); |
eb414681 JW |
181 | |
182 | static void group_init(struct psi_group *group) | |
183 | { | |
184 | int cpu; | |
185 | ||
186 | for_each_possible_cpu(cpu) | |
187 | seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); | |
bcc78db6 SB |
188 | group->avg_next_update = sched_clock() + psi_period; |
189 | INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); | |
190 | mutex_init(&group->avgs_lock); | |
0e94682b SB |
191 | /* Init trigger-related members */ |
192 | atomic_set(&group->poll_scheduled, 0); | |
193 | mutex_init(&group->trigger_lock); | |
194 | INIT_LIST_HEAD(&group->triggers); | |
195 | memset(group->nr_triggers, 0, sizeof(group->nr_triggers)); | |
196 | group->poll_states = 0; | |
197 | group->poll_min_period = U32_MAX; | |
198 | memset(group->polling_total, 0, sizeof(group->polling_total)); | |
199 | group->polling_next_update = ULLONG_MAX; | |
200 | group->polling_until = 0; | |
201 | rcu_assign_pointer(group->poll_kworker, NULL); | |
eb414681 JW |
202 | } |
203 | ||
204 | void __init psi_init(void) | |
205 | { | |
e0c27447 JW |
206 | if (!psi_enable) { |
207 | static_branch_enable(&psi_disabled); | |
eb414681 | 208 | return; |
e0c27447 | 209 | } |
eb414681 JW |
210 | |
211 | psi_period = jiffies_to_nsecs(PSI_FREQ); | |
212 | group_init(&psi_system); | |
213 | } | |
214 | ||
215 | static bool test_state(unsigned int *tasks, enum psi_states state) | |
216 | { | |
217 | switch (state) { | |
218 | case PSI_IO_SOME: | |
219 | return tasks[NR_IOWAIT]; | |
220 | case PSI_IO_FULL: | |
221 | return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; | |
222 | case PSI_MEM_SOME: | |
223 | return tasks[NR_MEMSTALL]; | |
224 | case PSI_MEM_FULL: | |
225 | return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; | |
226 | case PSI_CPU_SOME: | |
227 | return tasks[NR_RUNNING] > 1; | |
228 | case PSI_NONIDLE: | |
229 | return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || | |
230 | tasks[NR_RUNNING]; | |
231 | default: | |
232 | return false; | |
233 | } | |
234 | } | |
235 | ||
0e94682b SB |
236 | static void get_recent_times(struct psi_group *group, int cpu, |
237 | enum psi_aggregators aggregator, u32 *times, | |
333f3017 | 238 | u32 *pchanged_states) |
eb414681 JW |
239 | { |
240 | struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); | |
eb414681 | 241 | u64 now, state_start; |
33b2d630 | 242 | enum psi_states s; |
eb414681 | 243 | unsigned int seq; |
33b2d630 | 244 | u32 state_mask; |
eb414681 | 245 | |
333f3017 SB |
246 | *pchanged_states = 0; |
247 | ||
eb414681 JW |
248 | /* Snapshot a coherent view of the CPU state */ |
249 | do { | |
250 | seq = read_seqcount_begin(&groupc->seq); | |
251 | now = cpu_clock(cpu); | |
252 | memcpy(times, groupc->times, sizeof(groupc->times)); | |
33b2d630 | 253 | state_mask = groupc->state_mask; |
eb414681 JW |
254 | state_start = groupc->state_start; |
255 | } while (read_seqcount_retry(&groupc->seq, seq)); | |
256 | ||
257 | /* Calculate state time deltas against the previous snapshot */ | |
258 | for (s = 0; s < NR_PSI_STATES; s++) { | |
259 | u32 delta; | |
260 | /* | |
261 | * In addition to already concluded states, we also | |
262 | * incorporate currently active states on the CPU, | |
263 | * since states may last for many sampling periods. | |
264 | * | |
265 | * This way we keep our delta sampling buckets small | |
266 | * (u32) and our reported pressure close to what's | |
267 | * actually happening. | |
268 | */ | |
33b2d630 | 269 | if (state_mask & (1 << s)) |
eb414681 JW |
270 | times[s] += now - state_start; |
271 | ||
0e94682b SB |
272 | delta = times[s] - groupc->times_prev[aggregator][s]; |
273 | groupc->times_prev[aggregator][s] = times[s]; | |
eb414681 JW |
274 | |
275 | times[s] = delta; | |
333f3017 SB |
276 | if (delta) |
277 | *pchanged_states |= (1 << s); | |
eb414681 JW |
278 | } |
279 | } | |
280 | ||
281 | static void calc_avgs(unsigned long avg[3], int missed_periods, | |
282 | u64 time, u64 period) | |
283 | { | |
284 | unsigned long pct; | |
285 | ||
286 | /* Fill in zeroes for periods of no activity */ | |
287 | if (missed_periods) { | |
288 | avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); | |
289 | avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); | |
290 | avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); | |
291 | } | |
292 | ||
293 | /* Sample the most recent active period */ | |
294 | pct = div_u64(time * 100, period); | |
295 | pct *= FIXED_1; | |
296 | avg[0] = calc_load(avg[0], EXP_10s, pct); | |
297 | avg[1] = calc_load(avg[1], EXP_60s, pct); | |
298 | avg[2] = calc_load(avg[2], EXP_300s, pct); | |
299 | } | |
300 | ||
0e94682b SB |
301 | static void collect_percpu_times(struct psi_group *group, |
302 | enum psi_aggregators aggregator, | |
303 | u32 *pchanged_states) | |
eb414681 JW |
304 | { |
305 | u64 deltas[NR_PSI_STATES - 1] = { 0, }; | |
eb414681 | 306 | unsigned long nonidle_total = 0; |
333f3017 | 307 | u32 changed_states = 0; |
eb414681 JW |
308 | int cpu; |
309 | int s; | |
310 | ||
eb414681 JW |
311 | /* |
312 | * Collect the per-cpu time buckets and average them into a | |
313 | * single time sample that is normalized to wallclock time. | |
314 | * | |
315 | * For averaging, each CPU is weighted by its non-idle time in | |
316 | * the sampling period. This eliminates artifacts from uneven | |
317 | * loading, or even entirely idle CPUs. | |
318 | */ | |
319 | for_each_possible_cpu(cpu) { | |
320 | u32 times[NR_PSI_STATES]; | |
321 | u32 nonidle; | |
333f3017 | 322 | u32 cpu_changed_states; |
eb414681 | 323 | |
0e94682b | 324 | get_recent_times(group, cpu, aggregator, times, |
333f3017 SB |
325 | &cpu_changed_states); |
326 | changed_states |= cpu_changed_states; | |
eb414681 JW |
327 | |
328 | nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); | |
329 | nonidle_total += nonidle; | |
330 | ||
331 | for (s = 0; s < PSI_NONIDLE; s++) | |
332 | deltas[s] += (u64)times[s] * nonidle; | |
333 | } | |
334 | ||
335 | /* | |
336 | * Integrate the sample into the running statistics that are | |
337 | * reported to userspace: the cumulative stall times and the | |
338 | * decaying averages. | |
339 | * | |
340 | * Pressure percentages are sampled at PSI_FREQ. We might be | |
341 | * called more often when the user polls more frequently than | |
342 | * that; we might be called less often when there is no task | |
343 | * activity, thus no data, and clock ticks are sporadic. The | |
344 | * below handles both. | |
345 | */ | |
346 | ||
347 | /* total= */ | |
348 | for (s = 0; s < NR_PSI_STATES - 1; s++) | |
0e94682b SB |
349 | group->total[aggregator][s] += |
350 | div_u64(deltas[s], max(nonidle_total, 1UL)); | |
eb414681 | 351 | |
333f3017 SB |
352 | if (pchanged_states) |
353 | *pchanged_states = changed_states; | |
7fc70a39 SB |
354 | } |
355 | ||
356 | static u64 update_averages(struct psi_group *group, u64 now) | |
357 | { | |
358 | unsigned long missed_periods = 0; | |
359 | u64 expires, period; | |
360 | u64 avg_next_update; | |
361 | int s; | |
362 | ||
eb414681 | 363 | /* avgX= */ |
bcc78db6 | 364 | expires = group->avg_next_update; |
4e37504d | 365 | if (now - expires >= psi_period) |
eb414681 JW |
366 | missed_periods = div_u64(now - expires, psi_period); |
367 | ||
368 | /* | |
369 | * The periodic clock tick can get delayed for various | |
370 | * reasons, especially on loaded systems. To avoid clock | |
371 | * drift, we schedule the clock in fixed psi_period intervals. | |
372 | * But the deltas we sample out of the per-cpu buckets above | |
373 | * are based on the actual time elapsing between clock ticks. | |
374 | */ | |
7fc70a39 | 375 | avg_next_update = expires + ((1 + missed_periods) * psi_period); |
bcc78db6 SB |
376 | period = now - (group->avg_last_update + (missed_periods * psi_period)); |
377 | group->avg_last_update = now; | |
eb414681 JW |
378 | |
379 | for (s = 0; s < NR_PSI_STATES - 1; s++) { | |
380 | u32 sample; | |
381 | ||
0e94682b | 382 | sample = group->total[PSI_AVGS][s] - group->avg_total[s]; |
eb414681 JW |
383 | /* |
384 | * Due to the lockless sampling of the time buckets, | |
385 | * recorded time deltas can slip into the next period, | |
386 | * which under full pressure can result in samples in | |
387 | * excess of the period length. | |
388 | * | |
389 | * We don't want to report non-sensical pressures in | |
390 | * excess of 100%, nor do we want to drop such events | |
391 | * on the floor. Instead we punt any overage into the | |
392 | * future until pressure subsides. By doing this we | |
393 | * don't underreport the occurring pressure curve, we | |
394 | * just report it delayed by one period length. | |
395 | * | |
396 | * The error isn't cumulative. As soon as another | |
397 | * delta slips from a period P to P+1, by definition | |
398 | * it frees up its time T in P. | |
399 | */ | |
400 | if (sample > period) | |
401 | sample = period; | |
bcc78db6 | 402 | group->avg_total[s] += sample; |
eb414681 JW |
403 | calc_avgs(group->avg[s], missed_periods, sample, period); |
404 | } | |
7fc70a39 SB |
405 | |
406 | return avg_next_update; | |
eb414681 JW |
407 | } |
408 | ||
bcc78db6 | 409 | static void psi_avgs_work(struct work_struct *work) |
eb414681 JW |
410 | { |
411 | struct delayed_work *dwork; | |
412 | struct psi_group *group; | |
333f3017 | 413 | u32 changed_states; |
eb414681 | 414 | bool nonidle; |
7fc70a39 | 415 | u64 now; |
eb414681 JW |
416 | |
417 | dwork = to_delayed_work(work); | |
bcc78db6 | 418 | group = container_of(dwork, struct psi_group, avgs_work); |
eb414681 | 419 | |
7fc70a39 SB |
420 | mutex_lock(&group->avgs_lock); |
421 | ||
422 | now = sched_clock(); | |
423 | ||
0e94682b | 424 | collect_percpu_times(group, PSI_AVGS, &changed_states); |
333f3017 | 425 | nonidle = changed_states & (1 << PSI_NONIDLE); |
eb414681 JW |
426 | /* |
427 | * If there is task activity, periodically fold the per-cpu | |
428 | * times and feed samples into the running averages. If things | |
429 | * are idle and there is no data to process, stop the clock. | |
430 | * Once restarted, we'll catch up the running averages in one | |
431 | * go - see calc_avgs() and missed_periods. | |
432 | */ | |
7fc70a39 SB |
433 | if (now >= group->avg_next_update) |
434 | group->avg_next_update = update_averages(group, now); | |
eb414681 JW |
435 | |
436 | if (nonidle) { | |
7fc70a39 SB |
437 | schedule_delayed_work(dwork, nsecs_to_jiffies( |
438 | group->avg_next_update - now) + 1); | |
eb414681 | 439 | } |
7fc70a39 SB |
440 | |
441 | mutex_unlock(&group->avgs_lock); | |
eb414681 JW |
442 | } |
443 | ||
0e94682b SB |
444 | /* Trigger tracking window manupulations */ |
445 | static void window_reset(struct psi_window *win, u64 now, u64 value, | |
446 | u64 prev_growth) | |
447 | { | |
448 | win->start_time = now; | |
449 | win->start_value = value; | |
450 | win->prev_growth = prev_growth; | |
451 | } | |
452 | ||
453 | /* | |
454 | * PSI growth tracking window update and growth calculation routine. | |
455 | * | |
456 | * This approximates a sliding tracking window by interpolating | |
457 | * partially elapsed windows using historical growth data from the | |
458 | * previous intervals. This minimizes memory requirements (by not storing | |
459 | * all the intermediate values in the previous window) and simplifies | |
460 | * the calculations. It works well because PSI signal changes only in | |
461 | * positive direction and over relatively small window sizes the growth | |
462 | * is close to linear. | |
463 | */ | |
464 | static u64 window_update(struct psi_window *win, u64 now, u64 value) | |
465 | { | |
466 | u64 elapsed; | |
467 | u64 growth; | |
468 | ||
469 | elapsed = now - win->start_time; | |
470 | growth = value - win->start_value; | |
471 | /* | |
472 | * After each tracking window passes win->start_value and | |
473 | * win->start_time get reset and win->prev_growth stores | |
474 | * the average per-window growth of the previous window. | |
475 | * win->prev_growth is then used to interpolate additional | |
476 | * growth from the previous window assuming it was linear. | |
477 | */ | |
478 | if (elapsed > win->size) | |
479 | window_reset(win, now, value, growth); | |
480 | else { | |
481 | u32 remaining; | |
482 | ||
483 | remaining = win->size - elapsed; | |
484 | growth += div_u64(win->prev_growth * remaining, win->size); | |
485 | } | |
486 | ||
487 | return growth; | |
488 | } | |
489 | ||
490 | static void init_triggers(struct psi_group *group, u64 now) | |
491 | { | |
492 | struct psi_trigger *t; | |
493 | ||
494 | list_for_each_entry(t, &group->triggers, node) | |
495 | window_reset(&t->win, now, | |
496 | group->total[PSI_POLL][t->state], 0); | |
497 | memcpy(group->polling_total, group->total[PSI_POLL], | |
498 | sizeof(group->polling_total)); | |
499 | group->polling_next_update = now + group->poll_min_period; | |
500 | } | |
501 | ||
502 | static u64 update_triggers(struct psi_group *group, u64 now) | |
503 | { | |
504 | struct psi_trigger *t; | |
505 | bool new_stall = false; | |
506 | u64 *total = group->total[PSI_POLL]; | |
507 | ||
508 | /* | |
509 | * On subsequent updates, calculate growth deltas and let | |
510 | * watchers know when their specified thresholds are exceeded. | |
511 | */ | |
512 | list_for_each_entry(t, &group->triggers, node) { | |
513 | u64 growth; | |
514 | ||
515 | /* Check for stall activity */ | |
516 | if (group->polling_total[t->state] == total[t->state]) | |
517 | continue; | |
518 | ||
519 | /* | |
520 | * Multiple triggers might be looking at the same state, | |
521 | * remember to update group->polling_total[] once we've | |
522 | * been through all of them. Also remember to extend the | |
523 | * polling time if we see new stall activity. | |
524 | */ | |
525 | new_stall = true; | |
526 | ||
527 | /* Calculate growth since last update */ | |
528 | growth = window_update(&t->win, now, total[t->state]); | |
529 | if (growth < t->threshold) | |
530 | continue; | |
531 | ||
532 | /* Limit event signaling to once per window */ | |
533 | if (now < t->last_event_time + t->win.size) | |
534 | continue; | |
535 | ||
536 | /* Generate an event */ | |
537 | if (cmpxchg(&t->event, 0, 1) == 0) | |
538 | wake_up_interruptible(&t->event_wait); | |
539 | t->last_event_time = now; | |
540 | } | |
541 | ||
542 | if (new_stall) | |
543 | memcpy(group->polling_total, total, | |
544 | sizeof(group->polling_total)); | |
545 | ||
546 | return now + group->poll_min_period; | |
547 | } | |
548 | ||
549 | /* | |
550 | * Schedule polling if it's not already scheduled. It's safe to call even from | |
551 | * hotpath because even though kthread_queue_delayed_work takes worker->lock | |
552 | * spinlock that spinlock is never contended due to poll_scheduled atomic | |
553 | * preventing such competition. | |
554 | */ | |
555 | static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) | |
556 | { | |
557 | struct kthread_worker *kworker; | |
558 | ||
559 | /* Do not reschedule if already scheduled */ | |
560 | if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0) | |
561 | return; | |
562 | ||
563 | rcu_read_lock(); | |
564 | ||
565 | kworker = rcu_dereference(group->poll_kworker); | |
566 | /* | |
567 | * kworker might be NULL in case psi_trigger_destroy races with | |
568 | * psi_task_change (hotpath) which can't use locks | |
569 | */ | |
570 | if (likely(kworker)) | |
571 | kthread_queue_delayed_work(kworker, &group->poll_work, delay); | |
572 | else | |
573 | atomic_set(&group->poll_scheduled, 0); | |
574 | ||
575 | rcu_read_unlock(); | |
576 | } | |
577 | ||
578 | static void psi_poll_work(struct kthread_work *work) | |
579 | { | |
580 | struct kthread_delayed_work *dwork; | |
581 | struct psi_group *group; | |
582 | u32 changed_states; | |
583 | u64 now; | |
584 | ||
585 | dwork = container_of(work, struct kthread_delayed_work, work); | |
586 | group = container_of(dwork, struct psi_group, poll_work); | |
587 | ||
588 | atomic_set(&group->poll_scheduled, 0); | |
589 | ||
590 | mutex_lock(&group->trigger_lock); | |
591 | ||
592 | now = sched_clock(); | |
593 | ||
594 | collect_percpu_times(group, PSI_POLL, &changed_states); | |
595 | ||
596 | if (changed_states & group->poll_states) { | |
597 | /* Initialize trigger windows when entering polling mode */ | |
598 | if (now > group->polling_until) | |
599 | init_triggers(group, now); | |
600 | ||
601 | /* | |
602 | * Keep the monitor active for at least the duration of the | |
603 | * minimum tracking window as long as monitor states are | |
604 | * changing. | |
605 | */ | |
606 | group->polling_until = now + | |
607 | group->poll_min_period * UPDATES_PER_WINDOW; | |
608 | } | |
609 | ||
610 | if (now > group->polling_until) { | |
611 | group->polling_next_update = ULLONG_MAX; | |
612 | goto out; | |
613 | } | |
614 | ||
615 | if (now >= group->polling_next_update) | |
616 | group->polling_next_update = update_triggers(group, now); | |
617 | ||
618 | psi_schedule_poll_work(group, | |
619 | nsecs_to_jiffies(group->polling_next_update - now) + 1); | |
620 | ||
621 | out: | |
622 | mutex_unlock(&group->trigger_lock); | |
623 | } | |
624 | ||
eb414681 JW |
625 | static void record_times(struct psi_group_cpu *groupc, int cpu, |
626 | bool memstall_tick) | |
627 | { | |
628 | u32 delta; | |
629 | u64 now; | |
630 | ||
631 | now = cpu_clock(cpu); | |
632 | delta = now - groupc->state_start; | |
633 | groupc->state_start = now; | |
634 | ||
33b2d630 | 635 | if (groupc->state_mask & (1 << PSI_IO_SOME)) { |
eb414681 | 636 | groupc->times[PSI_IO_SOME] += delta; |
33b2d630 | 637 | if (groupc->state_mask & (1 << PSI_IO_FULL)) |
eb414681 JW |
638 | groupc->times[PSI_IO_FULL] += delta; |
639 | } | |
640 | ||
33b2d630 | 641 | if (groupc->state_mask & (1 << PSI_MEM_SOME)) { |
eb414681 | 642 | groupc->times[PSI_MEM_SOME] += delta; |
33b2d630 | 643 | if (groupc->state_mask & (1 << PSI_MEM_FULL)) |
eb414681 JW |
644 | groupc->times[PSI_MEM_FULL] += delta; |
645 | else if (memstall_tick) { | |
646 | u32 sample; | |
647 | /* | |
648 | * Since we care about lost potential, a | |
649 | * memstall is FULL when there are no other | |
650 | * working tasks, but also when the CPU is | |
651 | * actively reclaiming and nothing productive | |
652 | * could run even if it were runnable. | |
653 | * | |
654 | * When the timer tick sees a reclaiming CPU, | |
655 | * regardless of runnable tasks, sample a FULL | |
656 | * tick (or less if it hasn't been a full tick | |
657 | * since the last state change). | |
658 | */ | |
659 | sample = min(delta, (u32)jiffies_to_nsecs(1)); | |
660 | groupc->times[PSI_MEM_FULL] += sample; | |
661 | } | |
662 | } | |
663 | ||
33b2d630 | 664 | if (groupc->state_mask & (1 << PSI_CPU_SOME)) |
eb414681 JW |
665 | groupc->times[PSI_CPU_SOME] += delta; |
666 | ||
33b2d630 | 667 | if (groupc->state_mask & (1 << PSI_NONIDLE)) |
eb414681 JW |
668 | groupc->times[PSI_NONIDLE] += delta; |
669 | } | |
670 | ||
0e94682b SB |
671 | static u32 psi_group_change(struct psi_group *group, int cpu, |
672 | unsigned int clear, unsigned int set) | |
eb414681 JW |
673 | { |
674 | struct psi_group_cpu *groupc; | |
675 | unsigned int t, m; | |
33b2d630 SB |
676 | enum psi_states s; |
677 | u32 state_mask = 0; | |
eb414681 JW |
678 | |
679 | groupc = per_cpu_ptr(group->pcpu, cpu); | |
680 | ||
681 | /* | |
682 | * First we assess the aggregate resource states this CPU's | |
683 | * tasks have been in since the last change, and account any | |
684 | * SOME and FULL time these may have resulted in. | |
685 | * | |
686 | * Then we update the task counts according to the state | |
687 | * change requested through the @clear and @set bits. | |
688 | */ | |
689 | write_seqcount_begin(&groupc->seq); | |
690 | ||
691 | record_times(groupc, cpu, false); | |
692 | ||
693 | for (t = 0, m = clear; m; m &= ~(1 << t), t++) { | |
694 | if (!(m & (1 << t))) | |
695 | continue; | |
696 | if (groupc->tasks[t] == 0 && !psi_bug) { | |
697 | printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n", | |
698 | cpu, t, groupc->tasks[0], | |
699 | groupc->tasks[1], groupc->tasks[2], | |
700 | clear, set); | |
701 | psi_bug = 1; | |
702 | } | |
703 | groupc->tasks[t]--; | |
704 | } | |
705 | ||
706 | for (t = 0; set; set &= ~(1 << t), t++) | |
707 | if (set & (1 << t)) | |
708 | groupc->tasks[t]++; | |
709 | ||
33b2d630 SB |
710 | /* Calculate state mask representing active states */ |
711 | for (s = 0; s < NR_PSI_STATES; s++) { | |
712 | if (test_state(groupc->tasks, s)) | |
713 | state_mask |= (1 << s); | |
714 | } | |
715 | groupc->state_mask = state_mask; | |
716 | ||
eb414681 | 717 | write_seqcount_end(&groupc->seq); |
0e94682b SB |
718 | |
719 | return state_mask; | |
eb414681 JW |
720 | } |
721 | ||
2ce7135a JW |
722 | static struct psi_group *iterate_groups(struct task_struct *task, void **iter) |
723 | { | |
724 | #ifdef CONFIG_CGROUPS | |
725 | struct cgroup *cgroup = NULL; | |
726 | ||
727 | if (!*iter) | |
728 | cgroup = task->cgroups->dfl_cgrp; | |
729 | else if (*iter == &psi_system) | |
730 | return NULL; | |
731 | else | |
732 | cgroup = cgroup_parent(*iter); | |
733 | ||
734 | if (cgroup && cgroup_parent(cgroup)) { | |
735 | *iter = cgroup; | |
736 | return cgroup_psi(cgroup); | |
737 | } | |
738 | #else | |
739 | if (*iter) | |
740 | return NULL; | |
741 | #endif | |
742 | *iter = &psi_system; | |
743 | return &psi_system; | |
744 | } | |
745 | ||
eb414681 JW |
746 | void psi_task_change(struct task_struct *task, int clear, int set) |
747 | { | |
748 | int cpu = task_cpu(task); | |
2ce7135a | 749 | struct psi_group *group; |
1b69ac6b | 750 | bool wake_clock = true; |
2ce7135a | 751 | void *iter = NULL; |
eb414681 JW |
752 | |
753 | if (!task->pid) | |
754 | return; | |
755 | ||
756 | if (((task->psi_flags & set) || | |
757 | (task->psi_flags & clear) != clear) && | |
758 | !psi_bug) { | |
759 | printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", | |
760 | task->pid, task->comm, cpu, | |
761 | task->psi_flags, clear, set); | |
762 | psi_bug = 1; | |
763 | } | |
764 | ||
765 | task->psi_flags &= ~clear; | |
766 | task->psi_flags |= set; | |
767 | ||
1b69ac6b JW |
768 | /* |
769 | * Periodic aggregation shuts off if there is a period of no | |
770 | * task changes, so we wake it back up if necessary. However, | |
771 | * don't do this if the task change is the aggregation worker | |
772 | * itself going to sleep, or we'll ping-pong forever. | |
773 | */ | |
774 | if (unlikely((clear & TSK_RUNNING) && | |
775 | (task->flags & PF_WQ_WORKER) && | |
bcc78db6 | 776 | wq_worker_last_func(task) == psi_avgs_work)) |
1b69ac6b JW |
777 | wake_clock = false; |
778 | ||
779 | while ((group = iterate_groups(task, &iter))) { | |
0e94682b SB |
780 | u32 state_mask = psi_group_change(group, cpu, clear, set); |
781 | ||
782 | if (state_mask & group->poll_states) | |
783 | psi_schedule_poll_work(group, 1); | |
784 | ||
bcc78db6 SB |
785 | if (wake_clock && !delayed_work_pending(&group->avgs_work)) |
786 | schedule_delayed_work(&group->avgs_work, PSI_FREQ); | |
1b69ac6b | 787 | } |
eb414681 JW |
788 | } |
789 | ||
790 | void psi_memstall_tick(struct task_struct *task, int cpu) | |
791 | { | |
2ce7135a JW |
792 | struct psi_group *group; |
793 | void *iter = NULL; | |
eb414681 | 794 | |
2ce7135a JW |
795 | while ((group = iterate_groups(task, &iter))) { |
796 | struct psi_group_cpu *groupc; | |
797 | ||
798 | groupc = per_cpu_ptr(group->pcpu, cpu); | |
799 | write_seqcount_begin(&groupc->seq); | |
800 | record_times(groupc, cpu, true); | |
801 | write_seqcount_end(&groupc->seq); | |
802 | } | |
eb414681 JW |
803 | } |
804 | ||
805 | /** | |
806 | * psi_memstall_enter - mark the beginning of a memory stall section | |
807 | * @flags: flags to handle nested sections | |
808 | * | |
809 | * Marks the calling task as being stalled due to a lack of memory, | |
810 | * such as waiting for a refault or performing reclaim. | |
811 | */ | |
812 | void psi_memstall_enter(unsigned long *flags) | |
813 | { | |
814 | struct rq_flags rf; | |
815 | struct rq *rq; | |
816 | ||
e0c27447 | 817 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
818 | return; |
819 | ||
820 | *flags = current->flags & PF_MEMSTALL; | |
821 | if (*flags) | |
822 | return; | |
823 | /* | |
824 | * PF_MEMSTALL setting & accounting needs to be atomic wrt | |
825 | * changes to the task's scheduling state, otherwise we can | |
826 | * race with CPU migration. | |
827 | */ | |
828 | rq = this_rq_lock_irq(&rf); | |
829 | ||
830 | current->flags |= PF_MEMSTALL; | |
831 | psi_task_change(current, 0, TSK_MEMSTALL); | |
832 | ||
833 | rq_unlock_irq(rq, &rf); | |
834 | } | |
835 | ||
836 | /** | |
837 | * psi_memstall_leave - mark the end of an memory stall section | |
838 | * @flags: flags to handle nested memdelay sections | |
839 | * | |
840 | * Marks the calling task as no longer stalled due to lack of memory. | |
841 | */ | |
842 | void psi_memstall_leave(unsigned long *flags) | |
843 | { | |
844 | struct rq_flags rf; | |
845 | struct rq *rq; | |
846 | ||
e0c27447 | 847 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
848 | return; |
849 | ||
850 | if (*flags) | |
851 | return; | |
852 | /* | |
853 | * PF_MEMSTALL clearing & accounting needs to be atomic wrt | |
854 | * changes to the task's scheduling state, otherwise we could | |
855 | * race with CPU migration. | |
856 | */ | |
857 | rq = this_rq_lock_irq(&rf); | |
858 | ||
859 | current->flags &= ~PF_MEMSTALL; | |
860 | psi_task_change(current, TSK_MEMSTALL, 0); | |
861 | ||
862 | rq_unlock_irq(rq, &rf); | |
863 | } | |
864 | ||
2ce7135a JW |
865 | #ifdef CONFIG_CGROUPS |
866 | int psi_cgroup_alloc(struct cgroup *cgroup) | |
867 | { | |
e0c27447 | 868 | if (static_branch_likely(&psi_disabled)) |
2ce7135a JW |
869 | return 0; |
870 | ||
871 | cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); | |
872 | if (!cgroup->psi.pcpu) | |
873 | return -ENOMEM; | |
874 | group_init(&cgroup->psi); | |
875 | return 0; | |
876 | } | |
877 | ||
878 | void psi_cgroup_free(struct cgroup *cgroup) | |
879 | { | |
e0c27447 | 880 | if (static_branch_likely(&psi_disabled)) |
2ce7135a JW |
881 | return; |
882 | ||
bcc78db6 | 883 | cancel_delayed_work_sync(&cgroup->psi.avgs_work); |
2ce7135a | 884 | free_percpu(cgroup->psi.pcpu); |
0e94682b SB |
885 | /* All triggers must be removed by now */ |
886 | WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n"); | |
2ce7135a JW |
887 | } |
888 | ||
889 | /** | |
890 | * cgroup_move_task - move task to a different cgroup | |
891 | * @task: the task | |
892 | * @to: the target css_set | |
893 | * | |
894 | * Move task to a new cgroup and safely migrate its associated stall | |
895 | * state between the different groups. | |
896 | * | |
897 | * This function acquires the task's rq lock to lock out concurrent | |
898 | * changes to the task's scheduling state and - in case the task is | |
899 | * running - concurrent changes to its stall state. | |
900 | */ | |
901 | void cgroup_move_task(struct task_struct *task, struct css_set *to) | |
902 | { | |
2ce7135a JW |
903 | unsigned int task_flags = 0; |
904 | struct rq_flags rf; | |
905 | struct rq *rq; | |
906 | ||
e0c27447 | 907 | if (static_branch_likely(&psi_disabled)) { |
8fcb2312 OJ |
908 | /* |
909 | * Lame to do this here, but the scheduler cannot be locked | |
910 | * from the outside, so we move cgroups from inside sched/. | |
911 | */ | |
912 | rcu_assign_pointer(task->cgroups, to); | |
913 | return; | |
914 | } | |
2ce7135a | 915 | |
8fcb2312 | 916 | rq = task_rq_lock(task, &rf); |
2ce7135a | 917 | |
8fcb2312 OJ |
918 | if (task_on_rq_queued(task)) |
919 | task_flags = TSK_RUNNING; | |
920 | else if (task->in_iowait) | |
921 | task_flags = TSK_IOWAIT; | |
2ce7135a | 922 | |
8fcb2312 OJ |
923 | if (task->flags & PF_MEMSTALL) |
924 | task_flags |= TSK_MEMSTALL; | |
2ce7135a | 925 | |
8fcb2312 OJ |
926 | if (task_flags) |
927 | psi_task_change(task, task_flags, 0); | |
928 | ||
929 | /* See comment above */ | |
2ce7135a JW |
930 | rcu_assign_pointer(task->cgroups, to); |
931 | ||
8fcb2312 OJ |
932 | if (task_flags) |
933 | psi_task_change(task, 0, task_flags); | |
2ce7135a | 934 | |
8fcb2312 | 935 | task_rq_unlock(rq, task, &rf); |
2ce7135a JW |
936 | } |
937 | #endif /* CONFIG_CGROUPS */ | |
938 | ||
939 | int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) | |
eb414681 JW |
940 | { |
941 | int full; | |
7fc70a39 | 942 | u64 now; |
eb414681 | 943 | |
e0c27447 | 944 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
945 | return -EOPNOTSUPP; |
946 | ||
7fc70a39 SB |
947 | /* Update averages before reporting them */ |
948 | mutex_lock(&group->avgs_lock); | |
949 | now = sched_clock(); | |
0e94682b | 950 | collect_percpu_times(group, PSI_AVGS, NULL); |
7fc70a39 SB |
951 | if (now >= group->avg_next_update) |
952 | group->avg_next_update = update_averages(group, now); | |
953 | mutex_unlock(&group->avgs_lock); | |
eb414681 JW |
954 | |
955 | for (full = 0; full < 2 - (res == PSI_CPU); full++) { | |
956 | unsigned long avg[3]; | |
957 | u64 total; | |
958 | int w; | |
959 | ||
960 | for (w = 0; w < 3; w++) | |
961 | avg[w] = group->avg[res * 2 + full][w]; | |
0e94682b SB |
962 | total = div_u64(group->total[PSI_AVGS][res * 2 + full], |
963 | NSEC_PER_USEC); | |
eb414681 JW |
964 | |
965 | seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", | |
966 | full ? "full" : "some", | |
967 | LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), | |
968 | LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), | |
969 | LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), | |
970 | total); | |
971 | } | |
972 | ||
973 | return 0; | |
974 | } | |
975 | ||
976 | static int psi_io_show(struct seq_file *m, void *v) | |
977 | { | |
978 | return psi_show(m, &psi_system, PSI_IO); | |
979 | } | |
980 | ||
981 | static int psi_memory_show(struct seq_file *m, void *v) | |
982 | { | |
983 | return psi_show(m, &psi_system, PSI_MEM); | |
984 | } | |
985 | ||
986 | static int psi_cpu_show(struct seq_file *m, void *v) | |
987 | { | |
988 | return psi_show(m, &psi_system, PSI_CPU); | |
989 | } | |
990 | ||
991 | static int psi_io_open(struct inode *inode, struct file *file) | |
992 | { | |
993 | return single_open(file, psi_io_show, NULL); | |
994 | } | |
995 | ||
996 | static int psi_memory_open(struct inode *inode, struct file *file) | |
997 | { | |
998 | return single_open(file, psi_memory_show, NULL); | |
999 | } | |
1000 | ||
1001 | static int psi_cpu_open(struct inode *inode, struct file *file) | |
1002 | { | |
1003 | return single_open(file, psi_cpu_show, NULL); | |
1004 | } | |
1005 | ||
0e94682b SB |
1006 | struct psi_trigger *psi_trigger_create(struct psi_group *group, |
1007 | char *buf, size_t nbytes, enum psi_res res) | |
1008 | { | |
1009 | struct psi_trigger *t; | |
1010 | enum psi_states state; | |
1011 | u32 threshold_us; | |
1012 | u32 window_us; | |
1013 | ||
1014 | if (static_branch_likely(&psi_disabled)) | |
1015 | return ERR_PTR(-EOPNOTSUPP); | |
1016 | ||
1017 | if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2) | |
1018 | state = PSI_IO_SOME + res * 2; | |
1019 | else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2) | |
1020 | state = PSI_IO_FULL + res * 2; | |
1021 | else | |
1022 | return ERR_PTR(-EINVAL); | |
1023 | ||
1024 | if (state >= PSI_NONIDLE) | |
1025 | return ERR_PTR(-EINVAL); | |
1026 | ||
1027 | if (window_us < WINDOW_MIN_US || | |
1028 | window_us > WINDOW_MAX_US) | |
1029 | return ERR_PTR(-EINVAL); | |
1030 | ||
1031 | /* Check threshold */ | |
1032 | if (threshold_us == 0 || threshold_us > window_us) | |
1033 | return ERR_PTR(-EINVAL); | |
1034 | ||
1035 | t = kmalloc(sizeof(*t), GFP_KERNEL); | |
1036 | if (!t) | |
1037 | return ERR_PTR(-ENOMEM); | |
1038 | ||
1039 | t->group = group; | |
1040 | t->state = state; | |
1041 | t->threshold = threshold_us * NSEC_PER_USEC; | |
1042 | t->win.size = window_us * NSEC_PER_USEC; | |
1043 | window_reset(&t->win, 0, 0, 0); | |
1044 | ||
1045 | t->event = 0; | |
1046 | t->last_event_time = 0; | |
1047 | init_waitqueue_head(&t->event_wait); | |
1048 | kref_init(&t->refcount); | |
1049 | ||
1050 | mutex_lock(&group->trigger_lock); | |
1051 | ||
1052 | if (!rcu_access_pointer(group->poll_kworker)) { | |
1053 | struct sched_param param = { | |
1054 | .sched_priority = MAX_RT_PRIO - 1, | |
1055 | }; | |
1056 | struct kthread_worker *kworker; | |
1057 | ||
1058 | kworker = kthread_create_worker(0, "psimon"); | |
1059 | if (IS_ERR(kworker)) { | |
1060 | kfree(t); | |
1061 | mutex_unlock(&group->trigger_lock); | |
1062 | return ERR_CAST(kworker); | |
1063 | } | |
1064 | sched_setscheduler(kworker->task, SCHED_FIFO, ¶m); | |
1065 | kthread_init_delayed_work(&group->poll_work, | |
1066 | psi_poll_work); | |
1067 | rcu_assign_pointer(group->poll_kworker, kworker); | |
1068 | } | |
1069 | ||
1070 | list_add(&t->node, &group->triggers); | |
1071 | group->poll_min_period = min(group->poll_min_period, | |
1072 | div_u64(t->win.size, UPDATES_PER_WINDOW)); | |
1073 | group->nr_triggers[t->state]++; | |
1074 | group->poll_states |= (1 << t->state); | |
1075 | ||
1076 | mutex_unlock(&group->trigger_lock); | |
1077 | ||
1078 | return t; | |
1079 | } | |
1080 | ||
1081 | static void psi_trigger_destroy(struct kref *ref) | |
1082 | { | |
1083 | struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); | |
1084 | struct psi_group *group = t->group; | |
1085 | struct kthread_worker *kworker_to_destroy = NULL; | |
1086 | ||
1087 | if (static_branch_likely(&psi_disabled)) | |
1088 | return; | |
1089 | ||
1090 | /* | |
1091 | * Wakeup waiters to stop polling. Can happen if cgroup is deleted | |
1092 | * from under a polling process. | |
1093 | */ | |
1094 | wake_up_interruptible(&t->event_wait); | |
1095 | ||
1096 | mutex_lock(&group->trigger_lock); | |
1097 | ||
1098 | if (!list_empty(&t->node)) { | |
1099 | struct psi_trigger *tmp; | |
1100 | u64 period = ULLONG_MAX; | |
1101 | ||
1102 | list_del(&t->node); | |
1103 | group->nr_triggers[t->state]--; | |
1104 | if (!group->nr_triggers[t->state]) | |
1105 | group->poll_states &= ~(1 << t->state); | |
1106 | /* reset min update period for the remaining triggers */ | |
1107 | list_for_each_entry(tmp, &group->triggers, node) | |
1108 | period = min(period, div_u64(tmp->win.size, | |
1109 | UPDATES_PER_WINDOW)); | |
1110 | group->poll_min_period = period; | |
1111 | /* Destroy poll_kworker when the last trigger is destroyed */ | |
1112 | if (group->poll_states == 0) { | |
1113 | group->polling_until = 0; | |
1114 | kworker_to_destroy = rcu_dereference_protected( | |
1115 | group->poll_kworker, | |
1116 | lockdep_is_held(&group->trigger_lock)); | |
1117 | rcu_assign_pointer(group->poll_kworker, NULL); | |
1118 | } | |
1119 | } | |
1120 | ||
1121 | mutex_unlock(&group->trigger_lock); | |
1122 | ||
1123 | /* | |
1124 | * Wait for both *trigger_ptr from psi_trigger_replace and | |
1125 | * poll_kworker RCUs to complete their read-side critical sections | |
1126 | * before destroying the trigger and optionally the poll_kworker | |
1127 | */ | |
1128 | synchronize_rcu(); | |
1129 | /* | |
1130 | * Destroy the kworker after releasing trigger_lock to prevent a | |
1131 | * deadlock while waiting for psi_poll_work to acquire trigger_lock | |
1132 | */ | |
1133 | if (kworker_to_destroy) { | |
1134 | kthread_cancel_delayed_work_sync(&group->poll_work); | |
1135 | kthread_destroy_worker(kworker_to_destroy); | |
1136 | } | |
1137 | kfree(t); | |
1138 | } | |
1139 | ||
1140 | void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new) | |
1141 | { | |
1142 | struct psi_trigger *old = *trigger_ptr; | |
1143 | ||
1144 | if (static_branch_likely(&psi_disabled)) | |
1145 | return; | |
1146 | ||
1147 | rcu_assign_pointer(*trigger_ptr, new); | |
1148 | if (old) | |
1149 | kref_put(&old->refcount, psi_trigger_destroy); | |
1150 | } | |
1151 | ||
1152 | __poll_t psi_trigger_poll(void **trigger_ptr, | |
1153 | struct file *file, poll_table *wait) | |
1154 | { | |
1155 | __poll_t ret = DEFAULT_POLLMASK; | |
1156 | struct psi_trigger *t; | |
1157 | ||
1158 | if (static_branch_likely(&psi_disabled)) | |
1159 | return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; | |
1160 | ||
1161 | rcu_read_lock(); | |
1162 | ||
1163 | t = rcu_dereference(*(void __rcu __force **)trigger_ptr); | |
1164 | if (!t) { | |
1165 | rcu_read_unlock(); | |
1166 | return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; | |
1167 | } | |
1168 | kref_get(&t->refcount); | |
1169 | ||
1170 | rcu_read_unlock(); | |
1171 | ||
1172 | poll_wait(file, &t->event_wait, wait); | |
1173 | ||
1174 | if (cmpxchg(&t->event, 1, 0) == 1) | |
1175 | ret |= EPOLLPRI; | |
1176 | ||
1177 | kref_put(&t->refcount, psi_trigger_destroy); | |
1178 | ||
1179 | return ret; | |
1180 | } | |
1181 | ||
1182 | static ssize_t psi_write(struct file *file, const char __user *user_buf, | |
1183 | size_t nbytes, enum psi_res res) | |
1184 | { | |
1185 | char buf[32]; | |
1186 | size_t buf_size; | |
1187 | struct seq_file *seq; | |
1188 | struct psi_trigger *new; | |
1189 | ||
1190 | if (static_branch_likely(&psi_disabled)) | |
1191 | return -EOPNOTSUPP; | |
1192 | ||
1193 | buf_size = min(nbytes, (sizeof(buf) - 1)); | |
1194 | if (copy_from_user(buf, user_buf, buf_size)) | |
1195 | return -EFAULT; | |
1196 | ||
1197 | buf[buf_size - 1] = '\0'; | |
1198 | ||
1199 | new = psi_trigger_create(&psi_system, buf, nbytes, res); | |
1200 | if (IS_ERR(new)) | |
1201 | return PTR_ERR(new); | |
1202 | ||
1203 | seq = file->private_data; | |
1204 | /* Take seq->lock to protect seq->private from concurrent writes */ | |
1205 | mutex_lock(&seq->lock); | |
1206 | psi_trigger_replace(&seq->private, new); | |
1207 | mutex_unlock(&seq->lock); | |
1208 | ||
1209 | return nbytes; | |
1210 | } | |
1211 | ||
1212 | static ssize_t psi_io_write(struct file *file, const char __user *user_buf, | |
1213 | size_t nbytes, loff_t *ppos) | |
1214 | { | |
1215 | return psi_write(file, user_buf, nbytes, PSI_IO); | |
1216 | } | |
1217 | ||
1218 | static ssize_t psi_memory_write(struct file *file, const char __user *user_buf, | |
1219 | size_t nbytes, loff_t *ppos) | |
1220 | { | |
1221 | return psi_write(file, user_buf, nbytes, PSI_MEM); | |
1222 | } | |
1223 | ||
1224 | static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf, | |
1225 | size_t nbytes, loff_t *ppos) | |
1226 | { | |
1227 | return psi_write(file, user_buf, nbytes, PSI_CPU); | |
1228 | } | |
1229 | ||
1230 | static __poll_t psi_fop_poll(struct file *file, poll_table *wait) | |
1231 | { | |
1232 | struct seq_file *seq = file->private_data; | |
1233 | ||
1234 | return psi_trigger_poll(&seq->private, file, wait); | |
1235 | } | |
1236 | ||
1237 | static int psi_fop_release(struct inode *inode, struct file *file) | |
1238 | { | |
1239 | struct seq_file *seq = file->private_data; | |
1240 | ||
1241 | psi_trigger_replace(&seq->private, NULL); | |
1242 | return single_release(inode, file); | |
1243 | } | |
1244 | ||
eb414681 JW |
1245 | static const struct file_operations psi_io_fops = { |
1246 | .open = psi_io_open, | |
1247 | .read = seq_read, | |
1248 | .llseek = seq_lseek, | |
0e94682b SB |
1249 | .write = psi_io_write, |
1250 | .poll = psi_fop_poll, | |
1251 | .release = psi_fop_release, | |
eb414681 JW |
1252 | }; |
1253 | ||
1254 | static const struct file_operations psi_memory_fops = { | |
1255 | .open = psi_memory_open, | |
1256 | .read = seq_read, | |
1257 | .llseek = seq_lseek, | |
0e94682b SB |
1258 | .write = psi_memory_write, |
1259 | .poll = psi_fop_poll, | |
1260 | .release = psi_fop_release, | |
eb414681 JW |
1261 | }; |
1262 | ||
1263 | static const struct file_operations psi_cpu_fops = { | |
1264 | .open = psi_cpu_open, | |
1265 | .read = seq_read, | |
1266 | .llseek = seq_lseek, | |
0e94682b SB |
1267 | .write = psi_cpu_write, |
1268 | .poll = psi_fop_poll, | |
1269 | .release = psi_fop_release, | |
eb414681 JW |
1270 | }; |
1271 | ||
1272 | static int __init psi_proc_init(void) | |
1273 | { | |
1274 | proc_mkdir("pressure", NULL); | |
1275 | proc_create("pressure/io", 0, NULL, &psi_io_fops); | |
1276 | proc_create("pressure/memory", 0, NULL, &psi_memory_fops); | |
1277 | proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops); | |
1278 | return 0; | |
1279 | } | |
1280 | module_init(psi_proc_init); |