]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
d7067512 JB |
2 | /* |
3 | * Block rq-qos base io controller | |
4 | * | |
5 | * This works similar to wbt with a few exceptions | |
6 | * | |
7 | * - It's bio based, so the latency covers the whole block layer in addition to | |
8 | * the actual io. | |
9 | * - We will throttle all IO that comes in here if we need to. | |
10 | * - We use the mean latency over the 100ms window. This is because writes can | |
11 | * be particularly fast, which could give us a false sense of the impact of | |
12 | * other workloads on our protected workload. | |
a284390b JB |
13 | * - By default there's no throttling, we set the queue_depth to UINT_MAX so |
14 | * that we can have as many outstanding bio's as we're allowed to. Only at | |
d7067512 JB |
15 | * throttle time do we pay attention to the actual queue depth. |
16 | * | |
17 | * The hierarchy works like the cpu controller does, we track the latency at | |
18 | * every configured node, and each configured node has it's own independent | |
19 | * queue depth. This means that we only care about our latency targets at the | |
20 | * peer level. Some group at the bottom of the hierarchy isn't going to affect | |
21 | * a group at the end of some other path if we're only configred at leaf level. | |
22 | * | |
23 | * Consider the following | |
24 | * | |
25 | * root blkg | |
26 | * / \ | |
27 | * fast (target=5ms) slow (target=10ms) | |
28 | * / \ / \ | |
29 | * a b normal(15ms) unloved | |
30 | * | |
31 | * "a" and "b" have no target, but their combined io under "fast" cannot exceed | |
32 | * an average latency of 5ms. If it does then we will throttle the "slow" | |
33 | * group. In the case of "normal", if it exceeds its 15ms target, we will | |
34 | * throttle "unloved", but nobody else. | |
35 | * | |
36 | * In this example "fast", "slow", and "normal" will be the only groups actually | |
37 | * accounting their io latencies. We have to walk up the heirarchy to the root | |
38 | * on every submit and complete so we can do the appropriate stat recording and | |
39 | * adjust the queue depth of ourselves if needed. | |
40 | * | |
41 | * There are 2 ways we throttle IO. | |
42 | * | |
43 | * 1) Queue depth throttling. As we throttle down we will adjust the maximum | |
44 | * number of IO's we're allowed to have in flight. This starts at (u64)-1 down | |
45 | * to 1. If the group is only ever submitting IO for itself then this is the | |
46 | * only way we throttle. | |
47 | * | |
48 | * 2) Induced delay throttling. This is for the case that a group is generating | |
49 | * IO that has to be issued by the root cg to avoid priority inversion. So think | |
50 | * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot | |
51 | * of work done for us on behalf of the root cg and are being asked to scale | |
52 | * down more then we induce a latency at userspace return. We accumulate the | |
53 | * total amount of time we need to be punished by doing | |
54 | * | |
55 | * total_time += min_lat_nsec - actual_io_completion | |
56 | * | |
57 | * and then at throttle time will do | |
58 | * | |
59 | * throttle_time = min(total_time, NSEC_PER_SEC) | |
60 | * | |
61 | * This induced delay will throttle back the activity that is generating the | |
62 | * root cg issued io's, wethere that's some metadata intensive operation or the | |
63 | * group is using so much memory that it is pushing us into swap. | |
64 | * | |
65 | * Copyright (C) 2018 Josef Bacik | |
66 | */ | |
67 | #include <linux/kernel.h> | |
68 | #include <linux/blk_types.h> | |
69 | #include <linux/backing-dev.h> | |
70 | #include <linux/module.h> | |
71 | #include <linux/timer.h> | |
72 | #include <linux/memcontrol.h> | |
c480bcf9 | 73 | #include <linux/sched/loadavg.h> |
d7067512 JB |
74 | #include <linux/sched/signal.h> |
75 | #include <trace/events/block.h> | |
8c772a9b | 76 | #include <linux/blk-mq.h> |
d7067512 JB |
77 | #include "blk-rq-qos.h" |
78 | #include "blk-stat.h" | |
373e915c | 79 | #include "blk.h" |
d7067512 JB |
80 | |
81 | #define DEFAULT_SCALE_COOKIE 1000000U | |
82 | ||
83 | static struct blkcg_policy blkcg_policy_iolatency; | |
84 | struct iolatency_grp; | |
85 | ||
86 | struct blk_iolatency { | |
87 | struct rq_qos rqos; | |
88 | struct timer_list timer; | |
79683b6e TH |
89 | |
90 | /* | |
91 | * ->enabled is the master enable switch gating the throttling logic and | |
92 | * inflight tracking. The number of cgroups which have iolat enabled is | |
93 | * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly | |
94 | * from ->enable_work with the request_queue frozen. For details, See | |
95 | * blkiolatency_enable_work_fn(). | |
96 | */ | |
97 | bool enabled; | |
98 | atomic_t enable_cnt; | |
99 | struct work_struct enable_work; | |
d7067512 JB |
100 | }; |
101 | ||
102 | static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) | |
103 | { | |
104 | return container_of(rqos, struct blk_iolatency, rqos); | |
105 | } | |
106 | ||
d7067512 JB |
107 | struct child_latency_info { |
108 | spinlock_t lock; | |
109 | ||
110 | /* Last time we adjusted the scale of everybody. */ | |
111 | u64 last_scale_event; | |
112 | ||
113 | /* The latency that we missed. */ | |
114 | u64 scale_lat; | |
115 | ||
116 | /* Total io's from all of our children for the last summation. */ | |
117 | u64 nr_samples; | |
118 | ||
119 | /* The guy who actually changed the latency numbers. */ | |
120 | struct iolatency_grp *scale_grp; | |
121 | ||
122 | /* Cookie to tell if we need to scale up or down. */ | |
123 | atomic_t scale_cookie; | |
124 | }; | |
125 | ||
1fa2840e JB |
126 | struct percentile_stats { |
127 | u64 total; | |
128 | u64 missed; | |
129 | }; | |
130 | ||
131 | struct latency_stat { | |
132 | union { | |
133 | struct percentile_stats ps; | |
134 | struct blk_rq_stat rqs; | |
135 | }; | |
136 | }; | |
137 | ||
d7067512 JB |
138 | struct iolatency_grp { |
139 | struct blkg_policy_data pd; | |
1fa2840e | 140 | struct latency_stat __percpu *stats; |
451bb7c3 | 141 | struct latency_stat cur_stat; |
d7067512 JB |
142 | struct blk_iolatency *blkiolat; |
143 | struct rq_depth rq_depth; | |
144 | struct rq_wait rq_wait; | |
145 | atomic64_t window_start; | |
146 | atomic_t scale_cookie; | |
147 | u64 min_lat_nsec; | |
148 | u64 cur_win_nsec; | |
149 | ||
150 | /* total running average of our io latency. */ | |
c480bcf9 | 151 | u64 lat_avg; |
d7067512 JB |
152 | |
153 | /* Our current number of IO's for the last summation. */ | |
154 | u64 nr_samples; | |
155 | ||
1fa2840e | 156 | bool ssd; |
d7067512 JB |
157 | struct child_latency_info child_lat; |
158 | }; | |
159 | ||
c480bcf9 DZF |
160 | #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC) |
161 | #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC | |
162 | /* | |
163 | * These are the constants used to fake the fixed-point moving average | |
8508cf3f | 164 | * calculation just like load average. The call to calc_load() folds |
c480bcf9 DZF |
165 | * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling |
166 | * window size is bucketed to try to approximately calculate average | |
167 | * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows | |
168 | * elapse immediately. Note, windows only elapse with IO activity. Idle | |
169 | * periods extend the most recent window. | |
170 | */ | |
171 | #define BLKIOLATENCY_NR_EXP_FACTORS 5 | |
172 | #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \ | |
173 | (BLKIOLATENCY_NR_EXP_FACTORS - 1)) | |
174 | static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = { | |
175 | 2045, // exp(1/600) - 600 samples | |
176 | 2039, // exp(1/240) - 240 samples | |
177 | 2031, // exp(1/120) - 120 samples | |
178 | 2023, // exp(1/80) - 80 samples | |
179 | 2014, // exp(1/60) - 60 samples | |
180 | }; | |
181 | ||
d7067512 JB |
182 | static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd) |
183 | { | |
184 | return pd ? container_of(pd, struct iolatency_grp, pd) : NULL; | |
185 | } | |
186 | ||
187 | static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg) | |
188 | { | |
189 | return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency)); | |
190 | } | |
191 | ||
192 | static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat) | |
193 | { | |
194 | return pd_to_blkg(&iolat->pd); | |
195 | } | |
196 | ||
1fa2840e JB |
197 | static inline void latency_stat_init(struct iolatency_grp *iolat, |
198 | struct latency_stat *stat) | |
199 | { | |
200 | if (iolat->ssd) { | |
201 | stat->ps.total = 0; | |
202 | stat->ps.missed = 0; | |
203 | } else | |
204 | blk_rq_stat_init(&stat->rqs); | |
205 | } | |
206 | ||
207 | static inline void latency_stat_sum(struct iolatency_grp *iolat, | |
208 | struct latency_stat *sum, | |
209 | struct latency_stat *stat) | |
210 | { | |
211 | if (iolat->ssd) { | |
212 | sum->ps.total += stat->ps.total; | |
213 | sum->ps.missed += stat->ps.missed; | |
214 | } else | |
215 | blk_rq_stat_sum(&sum->rqs, &stat->rqs); | |
216 | } | |
217 | ||
218 | static inline void latency_stat_record_time(struct iolatency_grp *iolat, | |
219 | u64 req_time) | |
220 | { | |
221 | struct latency_stat *stat = get_cpu_ptr(iolat->stats); | |
222 | if (iolat->ssd) { | |
223 | if (req_time >= iolat->min_lat_nsec) | |
224 | stat->ps.missed++; | |
225 | stat->ps.total++; | |
226 | } else | |
227 | blk_rq_stat_add(&stat->rqs, req_time); | |
228 | put_cpu_ptr(stat); | |
229 | } | |
230 | ||
231 | static inline bool latency_sum_ok(struct iolatency_grp *iolat, | |
232 | struct latency_stat *stat) | |
233 | { | |
234 | if (iolat->ssd) { | |
235 | u64 thresh = div64_u64(stat->ps.total, 10); | |
236 | thresh = max(thresh, 1ULL); | |
237 | return stat->ps.missed < thresh; | |
238 | } | |
239 | return stat->rqs.mean <= iolat->min_lat_nsec; | |
240 | } | |
241 | ||
242 | static inline u64 latency_stat_samples(struct iolatency_grp *iolat, | |
243 | struct latency_stat *stat) | |
244 | { | |
245 | if (iolat->ssd) | |
246 | return stat->ps.total; | |
247 | return stat->rqs.nr_samples; | |
248 | } | |
249 | ||
250 | static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat, | |
251 | struct latency_stat *stat) | |
252 | { | |
253 | int exp_idx; | |
254 | ||
255 | if (iolat->ssd) | |
256 | return; | |
257 | ||
258 | /* | |
8508cf3f | 259 | * calc_load() takes in a number stored in fixed point representation. |
1fa2840e JB |
260 | * Because we are using this for IO time in ns, the values stored |
261 | * are significantly larger than the FIXED_1 denominator (2048). | |
262 | * Therefore, rounding errors in the calculation are negligible and | |
263 | * can be ignored. | |
264 | */ | |
265 | exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1, | |
266 | div64_u64(iolat->cur_win_nsec, | |
267 | BLKIOLATENCY_EXP_BUCKET_SIZE)); | |
8508cf3f JW |
268 | iolat->lat_avg = calc_load(iolat->lat_avg, |
269 | iolatency_exp_factors[exp_idx], | |
270 | stat->rqs.mean); | |
1fa2840e JB |
271 | } |
272 | ||
d3fcdff1 | 273 | static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data) |
d7067512 | 274 | { |
d3fcdff1 JB |
275 | atomic_dec(&rqw->inflight); |
276 | wake_up(&rqw->wait); | |
277 | } | |
d7067512 | 278 | |
d3fcdff1 JB |
279 | static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data) |
280 | { | |
281 | struct iolatency_grp *iolat = private_data; | |
d7067512 JB |
282 | return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); |
283 | } | |
284 | ||
285 | static void __blkcg_iolatency_throttle(struct rq_qos *rqos, | |
286 | struct iolatency_grp *iolat, | |
d5337560 | 287 | bool issue_as_root, |
d7067512 | 288 | bool use_memdelay) |
d7067512 JB |
289 | { |
290 | struct rq_wait *rqw = &iolat->rq_wait; | |
291 | unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); | |
d7067512 JB |
292 | |
293 | if (use_delay) | |
294 | blkcg_schedule_throttle(rqos->q, use_memdelay); | |
295 | ||
296 | /* | |
297 | * To avoid priority inversions we want to just take a slot if we are | |
298 | * issuing as root. If we're being killed off there's no point in | |
299 | * delaying things, we may have been killed by OOM so throttling may | |
300 | * make recovery take even longer, so just let the IO's through so the | |
301 | * task can go away. | |
302 | */ | |
303 | if (issue_as_root || fatal_signal_pending(current)) { | |
304 | atomic_inc(&rqw->inflight); | |
305 | return; | |
306 | } | |
307 | ||
d3fcdff1 | 308 | rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb); |
d7067512 JB |
309 | } |
310 | ||
311 | #define SCALE_DOWN_FACTOR 2 | |
312 | #define SCALE_UP_FACTOR 4 | |
313 | ||
314 | static inline unsigned long scale_amount(unsigned long qd, bool up) | |
315 | { | |
316 | return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL); | |
317 | } | |
318 | ||
319 | /* | |
320 | * We scale the qd down faster than we scale up, so we need to use this helper | |
321 | * to adjust the scale_cookie accordingly so we don't prematurely get | |
322 | * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much. | |
323 | * | |
324 | * Each group has their own local copy of the last scale cookie they saw, so if | |
325 | * the global scale cookie goes up or down they know which way they need to go | |
326 | * based on their last knowledge of it. | |
327 | */ | |
328 | static void scale_cookie_change(struct blk_iolatency *blkiolat, | |
329 | struct child_latency_info *lat_info, | |
330 | bool up) | |
331 | { | |
ff4cee08 | 332 | unsigned long qd = blkiolat->rqos.q->nr_requests; |
d7067512 JB |
333 | unsigned long scale = scale_amount(qd, up); |
334 | unsigned long old = atomic_read(&lat_info->scale_cookie); | |
335 | unsigned long max_scale = qd << 1; | |
336 | unsigned long diff = 0; | |
337 | ||
338 | if (old < DEFAULT_SCALE_COOKIE) | |
339 | diff = DEFAULT_SCALE_COOKIE - old; | |
340 | ||
341 | if (up) { | |
342 | if (scale + old > DEFAULT_SCALE_COOKIE) | |
343 | atomic_set(&lat_info->scale_cookie, | |
344 | DEFAULT_SCALE_COOKIE); | |
345 | else if (diff > qd) | |
346 | atomic_inc(&lat_info->scale_cookie); | |
347 | else | |
348 | atomic_add(scale, &lat_info->scale_cookie); | |
349 | } else { | |
350 | /* | |
351 | * We don't want to dig a hole so deep that it takes us hours to | |
352 | * dig out of it. Just enough that we don't throttle/unthrottle | |
353 | * with jagged workloads but can still unthrottle once pressure | |
354 | * has sufficiently dissipated. | |
355 | */ | |
356 | if (diff > qd) { | |
357 | if (diff < max_scale) | |
358 | atomic_dec(&lat_info->scale_cookie); | |
359 | } else { | |
360 | atomic_sub(scale, &lat_info->scale_cookie); | |
361 | } | |
362 | } | |
363 | } | |
364 | ||
365 | /* | |
366 | * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the | |
367 | * queue depth at a time so we don't get wild swings and hopefully dial in to | |
368 | * fairer distribution of the overall queue depth. | |
369 | */ | |
370 | static void scale_change(struct iolatency_grp *iolat, bool up) | |
371 | { | |
ff4cee08 | 372 | unsigned long qd = iolat->blkiolat->rqos.q->nr_requests; |
d7067512 JB |
373 | unsigned long scale = scale_amount(qd, up); |
374 | unsigned long old = iolat->rq_depth.max_depth; | |
d7067512 JB |
375 | |
376 | if (old > qd) | |
377 | old = qd; | |
378 | ||
379 | if (up) { | |
380 | if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat))) | |
381 | return; | |
382 | ||
383 | if (old < qd) { | |
d7067512 JB |
384 | old += scale; |
385 | old = min(old, qd); | |
386 | iolat->rq_depth.max_depth = old; | |
387 | wake_up_all(&iolat->rq_wait.wait); | |
388 | } | |
9f60511a | 389 | } else { |
d7067512 | 390 | old >>= 1; |
d7067512 JB |
391 | iolat->rq_depth.max_depth = max(old, 1UL); |
392 | } | |
393 | } | |
394 | ||
395 | /* Check our parent and see if the scale cookie has changed. */ | |
396 | static void check_scale_change(struct iolatency_grp *iolat) | |
397 | { | |
398 | struct iolatency_grp *parent; | |
399 | struct child_latency_info *lat_info; | |
400 | unsigned int cur_cookie; | |
401 | unsigned int our_cookie = atomic_read(&iolat->scale_cookie); | |
402 | u64 scale_lat; | |
403 | unsigned int old; | |
404 | int direction = 0; | |
405 | ||
406 | if (lat_to_blkg(iolat)->parent == NULL) | |
407 | return; | |
408 | ||
409 | parent = blkg_to_lat(lat_to_blkg(iolat)->parent); | |
410 | if (!parent) | |
411 | return; | |
412 | ||
413 | lat_info = &parent->child_lat; | |
414 | cur_cookie = atomic_read(&lat_info->scale_cookie); | |
415 | scale_lat = READ_ONCE(lat_info->scale_lat); | |
416 | ||
417 | if (cur_cookie < our_cookie) | |
418 | direction = -1; | |
419 | else if (cur_cookie > our_cookie) | |
420 | direction = 1; | |
421 | else | |
422 | return; | |
423 | ||
424 | old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie); | |
425 | ||
426 | /* Somebody beat us to the punch, just bail. */ | |
427 | if (old != our_cookie) | |
428 | return; | |
429 | ||
430 | if (direction < 0 && iolat->min_lat_nsec) { | |
431 | u64 samples_thresh; | |
432 | ||
433 | if (!scale_lat || iolat->min_lat_nsec <= scale_lat) | |
434 | return; | |
435 | ||
436 | /* | |
437 | * Sometimes high priority groups are their own worst enemy, so | |
438 | * instead of taking it out on some poor other group that did 5% | |
439 | * or less of the IO's for the last summation just skip this | |
440 | * scale down event. | |
441 | */ | |
442 | samples_thresh = lat_info->nr_samples * 5; | |
22ed8a93 | 443 | samples_thresh = max(1ULL, div64_u64(samples_thresh, 100)); |
d7067512 JB |
444 | if (iolat->nr_samples <= samples_thresh) |
445 | return; | |
446 | } | |
447 | ||
448 | /* We're as low as we can go. */ | |
449 | if (iolat->rq_depth.max_depth == 1 && direction < 0) { | |
450 | blkcg_use_delay(lat_to_blkg(iolat)); | |
451 | return; | |
452 | } | |
453 | ||
454 | /* We're back to the default cookie, unthrottle all the things. */ | |
455 | if (cur_cookie == DEFAULT_SCALE_COOKIE) { | |
456 | blkcg_clear_delay(lat_to_blkg(iolat)); | |
a284390b | 457 | iolat->rq_depth.max_depth = UINT_MAX; |
d7067512 JB |
458 | wake_up_all(&iolat->rq_wait.wait); |
459 | return; | |
460 | } | |
461 | ||
462 | scale_change(iolat, direction > 0); | |
463 | } | |
464 | ||
d5337560 | 465 | static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) |
d7067512 JB |
466 | { |
467 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
5cdf2e3f | 468 | struct blkcg_gq *blkg = bio->bi_blkg; |
d7067512 JB |
469 | bool issue_as_root = bio_issue_as_root_blkg(bio); |
470 | ||
79683b6e | 471 | if (!blkiolat->enabled) |
d7067512 JB |
472 | return; |
473 | ||
d7067512 JB |
474 | while (blkg && blkg->parent) { |
475 | struct iolatency_grp *iolat = blkg_to_lat(blkg); | |
476 | if (!iolat) { | |
477 | blkg = blkg->parent; | |
478 | continue; | |
479 | } | |
480 | ||
481 | check_scale_change(iolat); | |
d5337560 | 482 | __blkcg_iolatency_throttle(rqos, iolat, issue_as_root, |
d7067512 JB |
483 | (bio->bi_opf & REQ_SWAP) == REQ_SWAP); |
484 | blkg = blkg->parent; | |
485 | } | |
486 | if (!timer_pending(&blkiolat->timer)) | |
487 | mod_timer(&blkiolat->timer, jiffies + HZ); | |
488 | } | |
489 | ||
490 | static void iolatency_record_time(struct iolatency_grp *iolat, | |
491 | struct bio_issue *issue, u64 now, | |
492 | bool issue_as_root) | |
493 | { | |
d7067512 JB |
494 | u64 start = bio_issue_time(issue); |
495 | u64 req_time; | |
496 | ||
71e9690b JB |
497 | /* |
498 | * Have to do this so we are truncated to the correct time that our | |
499 | * issue is truncated to. | |
500 | */ | |
501 | now = __bio_issue_time(now); | |
502 | ||
d7067512 JB |
503 | if (now <= start) |
504 | return; | |
505 | ||
506 | req_time = now - start; | |
507 | ||
508 | /* | |
509 | * We don't want to count issue_as_root bio's in the cgroups latency | |
510 | * statistics as it could skew the numbers downwards. | |
511 | */ | |
a284390b | 512 | if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) { |
d7067512 JB |
513 | u64 sub = iolat->min_lat_nsec; |
514 | if (req_time < sub) | |
515 | blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time); | |
516 | return; | |
517 | } | |
518 | ||
1fa2840e | 519 | latency_stat_record_time(iolat, req_time); |
d7067512 JB |
520 | } |
521 | ||
522 | #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC) | |
523 | #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5 | |
524 | ||
525 | static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now) | |
526 | { | |
527 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
528 | struct iolatency_grp *parent; | |
529 | struct child_latency_info *lat_info; | |
1fa2840e | 530 | struct latency_stat stat; |
d7067512 | 531 | unsigned long flags; |
1fa2840e | 532 | int cpu; |
d7067512 | 533 | |
1fa2840e | 534 | latency_stat_init(iolat, &stat); |
d7067512 JB |
535 | preempt_disable(); |
536 | for_each_online_cpu(cpu) { | |
1fa2840e | 537 | struct latency_stat *s; |
d7067512 | 538 | s = per_cpu_ptr(iolat->stats, cpu); |
1fa2840e JB |
539 | latency_stat_sum(iolat, &stat, s); |
540 | latency_stat_init(iolat, s); | |
d7067512 JB |
541 | } |
542 | preempt_enable(); | |
543 | ||
d7067512 JB |
544 | parent = blkg_to_lat(blkg->parent); |
545 | if (!parent) | |
546 | return; | |
547 | ||
548 | lat_info = &parent->child_lat; | |
549 | ||
1fa2840e | 550 | iolat_update_total_lat_avg(iolat, &stat); |
d7067512 JB |
551 | |
552 | /* Everything is ok and we don't need to adjust the scale. */ | |
1fa2840e | 553 | if (latency_sum_ok(iolat, &stat) && |
d7067512 JB |
554 | atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE) |
555 | return; | |
556 | ||
557 | /* Somebody beat us to the punch, just bail. */ | |
558 | spin_lock_irqsave(&lat_info->lock, flags); | |
451bb7c3 JB |
559 | |
560 | latency_stat_sum(iolat, &iolat->cur_stat, &stat); | |
d7067512 | 561 | lat_info->nr_samples -= iolat->nr_samples; |
451bb7c3 JB |
562 | lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat); |
563 | iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat); | |
d7067512 JB |
564 | |
565 | if ((lat_info->last_scale_event >= now || | |
451bb7c3 | 566 | now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME)) |
d7067512 JB |
567 | goto out; |
568 | ||
451bb7c3 JB |
569 | if (latency_sum_ok(iolat, &iolat->cur_stat) && |
570 | latency_sum_ok(iolat, &stat)) { | |
571 | if (latency_stat_samples(iolat, &iolat->cur_stat) < | |
1fa2840e JB |
572 | BLKIOLATENCY_MIN_GOOD_SAMPLES) |
573 | goto out; | |
d7067512 JB |
574 | if (lat_info->scale_grp == iolat) { |
575 | lat_info->last_scale_event = now; | |
576 | scale_cookie_change(iolat->blkiolat, lat_info, true); | |
577 | } | |
451bb7c3 JB |
578 | } else if (lat_info->scale_lat == 0 || |
579 | lat_info->scale_lat >= iolat->min_lat_nsec) { | |
d7067512 JB |
580 | lat_info->last_scale_event = now; |
581 | if (!lat_info->scale_grp || | |
582 | lat_info->scale_lat > iolat->min_lat_nsec) { | |
583 | WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec); | |
584 | lat_info->scale_grp = iolat; | |
585 | } | |
586 | scale_cookie_change(iolat->blkiolat, lat_info, false); | |
587 | } | |
451bb7c3 | 588 | latency_stat_init(iolat, &iolat->cur_stat); |
d7067512 JB |
589 | out: |
590 | spin_unlock_irqrestore(&lat_info->lock, flags); | |
591 | } | |
592 | ||
593 | static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) | |
594 | { | |
595 | struct blkcg_gq *blkg; | |
596 | struct rq_wait *rqw; | |
597 | struct iolatency_grp *iolat; | |
598 | u64 window_start; | |
6e2fa4dd | 599 | u64 now; |
d7067512 | 600 | bool issue_as_root = bio_issue_as_root_blkg(bio); |
391f552a | 601 | int inflight = 0; |
d7067512 JB |
602 | |
603 | blkg = bio->bi_blkg; | |
f82310de | 604 | if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED)) |
d7067512 JB |
605 | return; |
606 | ||
607 | iolat = blkg_to_lat(bio->bi_blkg); | |
608 | if (!iolat) | |
609 | return; | |
610 | ||
79683b6e | 611 | if (!iolat->blkiolat->enabled) |
8c772a9b LB |
612 | return; |
613 | ||
6e2fa4dd | 614 | now = ktime_to_ns(ktime_get()); |
d7067512 JB |
615 | while (blkg && blkg->parent) { |
616 | iolat = blkg_to_lat(blkg); | |
617 | if (!iolat) { | |
618 | blkg = blkg->parent; | |
619 | continue; | |
620 | } | |
621 | rqw = &iolat->rq_wait; | |
622 | ||
391f552a LB |
623 | inflight = atomic_dec_return(&rqw->inflight); |
624 | WARN_ON_ONCE(inflight < 0); | |
c9b3007f DZ |
625 | /* |
626 | * If bi_status is BLK_STS_AGAIN, the bio wasn't actually | |
627 | * submitted, so do not account for it. | |
628 | */ | |
629 | if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { | |
630 | iolatency_record_time(iolat, &bio->bi_issue, now, | |
631 | issue_as_root); | |
632 | window_start = atomic64_read(&iolat->window_start); | |
633 | if (now > window_start && | |
634 | (now - window_start) >= iolat->cur_win_nsec) { | |
635 | if (atomic64_cmpxchg(&iolat->window_start, | |
636 | window_start, now) == window_start) | |
637 | iolatency_check_latencies(iolat, now); | |
638 | } | |
d7067512 | 639 | } |
d7067512 | 640 | wake_up(&rqw->wait); |
d7067512 JB |
641 | blkg = blkg->parent; |
642 | } | |
643 | } | |
644 | ||
645 | static void blkcg_iolatency_exit(struct rq_qos *rqos) | |
646 | { | |
647 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
648 | ||
649 | del_timer_sync(&blkiolat->timer); | |
79683b6e | 650 | flush_work(&blkiolat->enable_work); |
d7067512 JB |
651 | blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); |
652 | kfree(blkiolat); | |
653 | } | |
654 | ||
655 | static struct rq_qos_ops blkcg_iolatency_ops = { | |
656 | .throttle = blkcg_iolatency_throttle, | |
d7067512 JB |
657 | .done_bio = blkcg_iolatency_done_bio, |
658 | .exit = blkcg_iolatency_exit, | |
659 | }; | |
660 | ||
661 | static void blkiolatency_timer_fn(struct timer_list *t) | |
662 | { | |
663 | struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer); | |
664 | struct blkcg_gq *blkg; | |
665 | struct cgroup_subsys_state *pos_css; | |
666 | u64 now = ktime_to_ns(ktime_get()); | |
667 | ||
668 | rcu_read_lock(); | |
669 | blkg_for_each_descendant_pre(blkg, pos_css, | |
670 | blkiolat->rqos.q->root_blkg) { | |
671 | struct iolatency_grp *iolat; | |
672 | struct child_latency_info *lat_info; | |
673 | unsigned long flags; | |
674 | u64 cookie; | |
675 | ||
676 | /* | |
677 | * We could be exiting, don't access the pd unless we have a | |
678 | * ref on the blkg. | |
679 | */ | |
7754f669 | 680 | if (!blkg_tryget(blkg)) |
d7067512 JB |
681 | continue; |
682 | ||
683 | iolat = blkg_to_lat(blkg); | |
684 | if (!iolat) | |
52a1199c | 685 | goto next; |
d7067512 JB |
686 | |
687 | lat_info = &iolat->child_lat; | |
688 | cookie = atomic_read(&lat_info->scale_cookie); | |
689 | ||
690 | if (cookie >= DEFAULT_SCALE_COOKIE) | |
691 | goto next; | |
692 | ||
693 | spin_lock_irqsave(&lat_info->lock, flags); | |
694 | if (lat_info->last_scale_event >= now) | |
695 | goto next_lock; | |
696 | ||
697 | /* | |
698 | * We scaled down but don't have a scale_grp, scale up and carry | |
699 | * on. | |
700 | */ | |
701 | if (lat_info->scale_grp == NULL) { | |
702 | scale_cookie_change(iolat->blkiolat, lat_info, true); | |
703 | goto next_lock; | |
704 | } | |
705 | ||
706 | /* | |
707 | * It's been 5 seconds since our last scale event, clear the | |
708 | * scale grp in case the group that needed the scale down isn't | |
709 | * doing any IO currently. | |
710 | */ | |
711 | if (now - lat_info->last_scale_event >= | |
712 | ((u64)NSEC_PER_SEC * 5)) | |
713 | lat_info->scale_grp = NULL; | |
714 | next_lock: | |
715 | spin_unlock_irqrestore(&lat_info->lock, flags); | |
716 | next: | |
717 | blkg_put(blkg); | |
718 | } | |
719 | rcu_read_unlock(); | |
720 | } | |
721 | ||
79683b6e TH |
722 | /** |
723 | * blkiolatency_enable_work_fn - Enable or disable iolatency on the device | |
724 | * @work: enable_work of the blk_iolatency of interest | |
725 | * | |
726 | * iolatency needs to keep track of the number of in-flight IOs per cgroup. This | |
727 | * is relatively expensive as it involves walking up the hierarchy twice for | |
728 | * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we | |
729 | * want to disable the in-flight tracking. | |
730 | * | |
731 | * We have to make sure that the counting is balanced - we don't want to leak | |
732 | * the in-flight counts by disabling accounting in the completion path while IOs | |
733 | * are in flight. This is achieved by ensuring that no IO is in flight by | |
734 | * freezing the queue while flipping ->enabled. As this requires a sleepable | |
735 | * context, ->enabled flipping is punted to this work function. | |
736 | */ | |
737 | static void blkiolatency_enable_work_fn(struct work_struct *work) | |
738 | { | |
739 | struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency, | |
740 | enable_work); | |
741 | bool enabled; | |
742 | ||
743 | /* | |
744 | * There can only be one instance of this function running for @blkiolat | |
745 | * and it's guaranteed to be executed at least once after the latest | |
746 | * ->enabled_cnt modification. Acting on the latest ->enable_cnt is | |
747 | * sufficient. | |
748 | * | |
749 | * Also, we know @blkiolat is safe to access as ->enable_work is flushed | |
750 | * in blkcg_iolatency_exit(). | |
751 | */ | |
752 | enabled = atomic_read(&blkiolat->enable_cnt); | |
753 | if (enabled != blkiolat->enabled) { | |
754 | blk_mq_freeze_queue(blkiolat->rqos.q); | |
755 | blkiolat->enabled = enabled; | |
756 | blk_mq_unfreeze_queue(blkiolat->rqos.q); | |
757 | } | |
758 | } | |
759 | ||
d7067512 JB |
760 | int blk_iolatency_init(struct request_queue *q) |
761 | { | |
762 | struct blk_iolatency *blkiolat; | |
763 | struct rq_qos *rqos; | |
764 | int ret; | |
765 | ||
766 | blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL); | |
767 | if (!blkiolat) | |
768 | return -ENOMEM; | |
769 | ||
770 | rqos = &blkiolat->rqos; | |
beab17fc | 771 | rqos->id = RQ_QOS_LATENCY; |
d7067512 JB |
772 | rqos->ops = &blkcg_iolatency_ops; |
773 | rqos->q = q; | |
774 | ||
eb3dde52 JH |
775 | ret = rq_qos_add(q, rqos); |
776 | if (ret) | |
777 | goto err_free; | |
d7067512 | 778 | ret = blkcg_activate_policy(q, &blkcg_policy_iolatency); |
eb3dde52 JH |
779 | if (ret) |
780 | goto err_qos_del; | |
d7067512 JB |
781 | |
782 | timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0); | |
79683b6e | 783 | INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn); |
d7067512 JB |
784 | |
785 | return 0; | |
eb3dde52 JH |
786 | |
787 | err_qos_del: | |
788 | rq_qos_del(q, rqos); | |
789 | err_free: | |
790 | kfree(blkiolat); | |
791 | return ret; | |
d7067512 JB |
792 | } |
793 | ||
79683b6e | 794 | static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) |
d7067512 JB |
795 | { |
796 | struct iolatency_grp *iolat = blkg_to_lat(blkg); | |
79683b6e | 797 | struct blk_iolatency *blkiolat = iolat->blkiolat; |
d7067512 JB |
798 | u64 oldval = iolat->min_lat_nsec; |
799 | ||
800 | iolat->min_lat_nsec = val; | |
c480bcf9 DZF |
801 | iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE); |
802 | iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, | |
803 | BLKIOLATENCY_MAX_WIN_SIZE); | |
d7067512 | 804 | |
79683b6e TH |
805 | if (!oldval && val) { |
806 | if (atomic_inc_return(&blkiolat->enable_cnt) == 1) | |
807 | schedule_work(&blkiolat->enable_work); | |
808 | } | |
5de0073f TH |
809 | if (oldval && !val) { |
810 | blkcg_clear_delay(blkg); | |
79683b6e TH |
811 | if (atomic_dec_return(&blkiolat->enable_cnt) == 0) |
812 | schedule_work(&blkiolat->enable_work); | |
5de0073f | 813 | } |
d7067512 JB |
814 | } |
815 | ||
816 | static void iolatency_clear_scaling(struct blkcg_gq *blkg) | |
817 | { | |
818 | if (blkg->parent) { | |
819 | struct iolatency_grp *iolat = blkg_to_lat(blkg->parent); | |
820 | struct child_latency_info *lat_info; | |
821 | if (!iolat) | |
822 | return; | |
823 | ||
824 | lat_info = &iolat->child_lat; | |
825 | spin_lock(&lat_info->lock); | |
826 | atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE); | |
827 | lat_info->last_scale_event = 0; | |
828 | lat_info->scale_grp = NULL; | |
829 | lat_info->scale_lat = 0; | |
830 | spin_unlock(&lat_info->lock); | |
831 | } | |
832 | } | |
833 | ||
834 | static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, | |
835 | size_t nbytes, loff_t off) | |
836 | { | |
837 | struct blkcg *blkcg = css_to_blkcg(of_css(of)); | |
838 | struct blkcg_gq *blkg; | |
d7067512 JB |
839 | struct blkg_conf_ctx ctx; |
840 | struct iolatency_grp *iolat; | |
841 | char *p, *tok; | |
842 | u64 lat_val = 0; | |
843 | u64 oldval; | |
844 | int ret; | |
845 | ||
846 | ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); | |
847 | if (ret) | |
848 | return ret; | |
849 | ||
850 | iolat = blkg_to_lat(ctx.blkg); | |
d7067512 JB |
851 | p = ctx.body; |
852 | ||
853 | ret = -EINVAL; | |
854 | while ((tok = strsep(&p, " "))) { | |
855 | char key[16]; | |
856 | char val[21]; /* 18446744073709551616 */ | |
857 | ||
858 | if (sscanf(tok, "%15[^=]=%20s", key, val) != 2) | |
859 | goto out; | |
860 | ||
861 | if (!strcmp(key, "target")) { | |
862 | u64 v; | |
863 | ||
864 | if (!strcmp(val, "max")) | |
865 | lat_val = 0; | |
866 | else if (sscanf(val, "%llu", &v) == 1) | |
867 | lat_val = v * NSEC_PER_USEC; | |
868 | else | |
869 | goto out; | |
870 | } else { | |
871 | goto out; | |
872 | } | |
873 | } | |
874 | ||
875 | /* Walk up the tree to see if our new val is lower than it should be. */ | |
876 | blkg = ctx.blkg; | |
877 | oldval = iolat->min_lat_nsec; | |
878 | ||
79683b6e TH |
879 | iolatency_set_min_lat_nsec(blkg, lat_val); |
880 | if (oldval != iolat->min_lat_nsec) | |
d7067512 | 881 | iolatency_clear_scaling(blkg); |
d7067512 JB |
882 | ret = 0; |
883 | out: | |
884 | blkg_conf_finish(&ctx); | |
885 | return ret ?: nbytes; | |
886 | } | |
887 | ||
888 | static u64 iolatency_prfill_limit(struct seq_file *sf, | |
889 | struct blkg_policy_data *pd, int off) | |
890 | { | |
891 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
892 | const char *dname = blkg_dev_name(pd->blkg); | |
893 | ||
894 | if (!dname || !iolat->min_lat_nsec) | |
895 | return 0; | |
896 | seq_printf(sf, "%s target=%llu\n", | |
88b7210c | 897 | dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC)); |
d7067512 JB |
898 | return 0; |
899 | } | |
900 | ||
901 | static int iolatency_print_limit(struct seq_file *sf, void *v) | |
902 | { | |
903 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
904 | iolatency_prfill_limit, | |
905 | &blkcg_policy_iolatency, seq_cft(sf)->private, false); | |
906 | return 0; | |
907 | } | |
908 | ||
252c651a | 909 | static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s) |
1fa2840e JB |
910 | { |
911 | struct latency_stat stat; | |
912 | int cpu; | |
913 | ||
914 | latency_stat_init(iolat, &stat); | |
915 | preempt_disable(); | |
916 | for_each_online_cpu(cpu) { | |
917 | struct latency_stat *s; | |
918 | s = per_cpu_ptr(iolat->stats, cpu); | |
919 | latency_stat_sum(iolat, &stat, s); | |
920 | } | |
921 | preempt_enable(); | |
922 | ||
923 | if (iolat->rq_depth.max_depth == UINT_MAX) | |
252c651a CH |
924 | seq_printf(s, " missed=%llu total=%llu depth=max", |
925 | (unsigned long long)stat.ps.missed, | |
926 | (unsigned long long)stat.ps.total); | |
927 | else | |
928 | seq_printf(s, " missed=%llu total=%llu depth=%u", | |
929 | (unsigned long long)stat.ps.missed, | |
930 | (unsigned long long)stat.ps.total, | |
931 | iolat->rq_depth.max_depth); | |
932 | return true; | |
1fa2840e JB |
933 | } |
934 | ||
252c651a | 935 | static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s) |
d7067512 JB |
936 | { |
937 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1fa2840e JB |
938 | unsigned long long avg_lat; |
939 | unsigned long long cur_win; | |
940 | ||
07b0fdec | 941 | if (!blkcg_debug_stats) |
252c651a | 942 | return false; |
07b0fdec | 943 | |
1fa2840e | 944 | if (iolat->ssd) |
252c651a | 945 | return iolatency_ssd_stat(iolat, s); |
d7067512 | 946 | |
1fa2840e JB |
947 | avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC); |
948 | cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC); | |
a284390b | 949 | if (iolat->rq_depth.max_depth == UINT_MAX) |
252c651a CH |
950 | seq_printf(s, " depth=max avg_lat=%llu win=%llu", |
951 | avg_lat, cur_win); | |
952 | else | |
953 | seq_printf(s, " depth=%u avg_lat=%llu win=%llu", | |
954 | iolat->rq_depth.max_depth, avg_lat, cur_win); | |
955 | return true; | |
d7067512 JB |
956 | } |
957 | ||
cf09a8ee TH |
958 | static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, |
959 | struct request_queue *q, | |
960 | struct blkcg *blkcg) | |
d7067512 JB |
961 | { |
962 | struct iolatency_grp *iolat; | |
963 | ||
cf09a8ee | 964 | iolat = kzalloc_node(sizeof(*iolat), gfp, q->node); |
d7067512 JB |
965 | if (!iolat) |
966 | return NULL; | |
1fa2840e JB |
967 | iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat), |
968 | __alignof__(struct latency_stat), gfp); | |
d7067512 JB |
969 | if (!iolat->stats) { |
970 | kfree(iolat); | |
971 | return NULL; | |
972 | } | |
973 | return &iolat->pd; | |
974 | } | |
975 | ||
976 | static void iolatency_pd_init(struct blkg_policy_data *pd) | |
977 | { | |
978 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
979 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
980 | struct rq_qos *rqos = blkcg_rq_qos(blkg->q); | |
981 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
982 | u64 now = ktime_to_ns(ktime_get()); | |
983 | int cpu; | |
984 | ||
1fa2840e JB |
985 | if (blk_queue_nonrot(blkg->q)) |
986 | iolat->ssd = true; | |
987 | else | |
988 | iolat->ssd = false; | |
989 | ||
d7067512 | 990 | for_each_possible_cpu(cpu) { |
1fa2840e | 991 | struct latency_stat *stat; |
d7067512 | 992 | stat = per_cpu_ptr(iolat->stats, cpu); |
1fa2840e | 993 | latency_stat_init(iolat, stat); |
d7067512 JB |
994 | } |
995 | ||
451bb7c3 | 996 | latency_stat_init(iolat, &iolat->cur_stat); |
d7067512 JB |
997 | rq_wait_init(&iolat->rq_wait); |
998 | spin_lock_init(&iolat->child_lat.lock); | |
ff4cee08 | 999 | iolat->rq_depth.queue_depth = blkg->q->nr_requests; |
a284390b | 1000 | iolat->rq_depth.max_depth = UINT_MAX; |
d7067512 JB |
1001 | iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth; |
1002 | iolat->blkiolat = blkiolat; | |
1003 | iolat->cur_win_nsec = 100 * NSEC_PER_MSEC; | |
1004 | atomic64_set(&iolat->window_start, now); | |
1005 | ||
1006 | /* | |
1007 | * We init things in list order, so the pd for the parent may not be | |
1008 | * init'ed yet for whatever reason. | |
1009 | */ | |
1010 | if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) { | |
1011 | struct iolatency_grp *parent = blkg_to_lat(blkg->parent); | |
1012 | atomic_set(&iolat->scale_cookie, | |
1013 | atomic_read(&parent->child_lat.scale_cookie)); | |
1014 | } else { | |
1015 | atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE); | |
1016 | } | |
1017 | ||
1018 | atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE); | |
1019 | } | |
1020 | ||
1021 | static void iolatency_pd_offline(struct blkg_policy_data *pd) | |
1022 | { | |
1023 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1024 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
1025 | ||
79683b6e | 1026 | iolatency_set_min_lat_nsec(blkg, 0); |
d7067512 JB |
1027 | iolatency_clear_scaling(blkg); |
1028 | } | |
1029 | ||
1030 | static void iolatency_pd_free(struct blkg_policy_data *pd) | |
1031 | { | |
1032 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1033 | free_percpu(iolat->stats); | |
1034 | kfree(iolat); | |
1035 | } | |
1036 | ||
1037 | static struct cftype iolatency_files[] = { | |
1038 | { | |
1039 | .name = "latency", | |
1040 | .flags = CFTYPE_NOT_ON_ROOT, | |
1041 | .seq_show = iolatency_print_limit, | |
1042 | .write = iolatency_set_limit, | |
1043 | }, | |
1044 | {} | |
1045 | }; | |
1046 | ||
1047 | static struct blkcg_policy blkcg_policy_iolatency = { | |
1048 | .dfl_cftypes = iolatency_files, | |
1049 | .pd_alloc_fn = iolatency_pd_alloc, | |
1050 | .pd_init_fn = iolatency_pd_init, | |
1051 | .pd_offline_fn = iolatency_pd_offline, | |
1052 | .pd_free_fn = iolatency_pd_free, | |
1053 | .pd_stat_fn = iolatency_pd_stat, | |
1054 | }; | |
1055 | ||
1056 | static int __init iolatency_init(void) | |
1057 | { | |
1058 | return blkcg_policy_register(&blkcg_policy_iolatency); | |
1059 | } | |
1060 | ||
1061 | static void __exit iolatency_exit(void) | |
1062 | { | |
fa1c3eaf | 1063 | blkcg_policy_unregister(&blkcg_policy_iolatency); |
d7067512 JB |
1064 | } |
1065 | ||
1066 | module_init(iolatency_init); | |
1067 | module_exit(iolatency_exit); |