]>
Commit | Line | Data |
---|---|---|
d7067512 JB |
1 | /* |
2 | * Block rq-qos base io controller | |
3 | * | |
4 | * This works similar to wbt with a few exceptions | |
5 | * | |
6 | * - It's bio based, so the latency covers the whole block layer in addition to | |
7 | * the actual io. | |
8 | * - We will throttle all IO that comes in here if we need to. | |
9 | * - We use the mean latency over the 100ms window. This is because writes can | |
10 | * be particularly fast, which could give us a false sense of the impact of | |
11 | * other workloads on our protected workload. | |
a284390b JB |
12 | * - By default there's no throttling, we set the queue_depth to UINT_MAX so |
13 | * that we can have as many outstanding bio's as we're allowed to. Only at | |
d7067512 JB |
14 | * throttle time do we pay attention to the actual queue depth. |
15 | * | |
16 | * The hierarchy works like the cpu controller does, we track the latency at | |
17 | * every configured node, and each configured node has it's own independent | |
18 | * queue depth. This means that we only care about our latency targets at the | |
19 | * peer level. Some group at the bottom of the hierarchy isn't going to affect | |
20 | * a group at the end of some other path if we're only configred at leaf level. | |
21 | * | |
22 | * Consider the following | |
23 | * | |
24 | * root blkg | |
25 | * / \ | |
26 | * fast (target=5ms) slow (target=10ms) | |
27 | * / \ / \ | |
28 | * a b normal(15ms) unloved | |
29 | * | |
30 | * "a" and "b" have no target, but their combined io under "fast" cannot exceed | |
31 | * an average latency of 5ms. If it does then we will throttle the "slow" | |
32 | * group. In the case of "normal", if it exceeds its 15ms target, we will | |
33 | * throttle "unloved", but nobody else. | |
34 | * | |
35 | * In this example "fast", "slow", and "normal" will be the only groups actually | |
36 | * accounting their io latencies. We have to walk up the heirarchy to the root | |
37 | * on every submit and complete so we can do the appropriate stat recording and | |
38 | * adjust the queue depth of ourselves if needed. | |
39 | * | |
40 | * There are 2 ways we throttle IO. | |
41 | * | |
42 | * 1) Queue depth throttling. As we throttle down we will adjust the maximum | |
43 | * number of IO's we're allowed to have in flight. This starts at (u64)-1 down | |
44 | * to 1. If the group is only ever submitting IO for itself then this is the | |
45 | * only way we throttle. | |
46 | * | |
47 | * 2) Induced delay throttling. This is for the case that a group is generating | |
48 | * IO that has to be issued by the root cg to avoid priority inversion. So think | |
49 | * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot | |
50 | * of work done for us on behalf of the root cg and are being asked to scale | |
51 | * down more then we induce a latency at userspace return. We accumulate the | |
52 | * total amount of time we need to be punished by doing | |
53 | * | |
54 | * total_time += min_lat_nsec - actual_io_completion | |
55 | * | |
56 | * and then at throttle time will do | |
57 | * | |
58 | * throttle_time = min(total_time, NSEC_PER_SEC) | |
59 | * | |
60 | * This induced delay will throttle back the activity that is generating the | |
61 | * root cg issued io's, wethere that's some metadata intensive operation or the | |
62 | * group is using so much memory that it is pushing us into swap. | |
63 | * | |
64 | * Copyright (C) 2018 Josef Bacik | |
65 | */ | |
66 | #include <linux/kernel.h> | |
67 | #include <linux/blk_types.h> | |
68 | #include <linux/backing-dev.h> | |
69 | #include <linux/module.h> | |
70 | #include <linux/timer.h> | |
71 | #include <linux/memcontrol.h> | |
c480bcf9 | 72 | #include <linux/sched/loadavg.h> |
d7067512 JB |
73 | #include <linux/sched/signal.h> |
74 | #include <trace/events/block.h> | |
75 | #include "blk-rq-qos.h" | |
76 | #include "blk-stat.h" | |
77 | ||
78 | #define DEFAULT_SCALE_COOKIE 1000000U | |
79 | ||
80 | static struct blkcg_policy blkcg_policy_iolatency; | |
81 | struct iolatency_grp; | |
82 | ||
83 | struct blk_iolatency { | |
84 | struct rq_qos rqos; | |
85 | struct timer_list timer; | |
86 | atomic_t enabled; | |
87 | }; | |
88 | ||
89 | static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) | |
90 | { | |
91 | return container_of(rqos, struct blk_iolatency, rqos); | |
92 | } | |
93 | ||
94 | static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat) | |
95 | { | |
96 | return atomic_read(&blkiolat->enabled) > 0; | |
97 | } | |
98 | ||
99 | struct child_latency_info { | |
100 | spinlock_t lock; | |
101 | ||
102 | /* Last time we adjusted the scale of everybody. */ | |
103 | u64 last_scale_event; | |
104 | ||
105 | /* The latency that we missed. */ | |
106 | u64 scale_lat; | |
107 | ||
108 | /* Total io's from all of our children for the last summation. */ | |
109 | u64 nr_samples; | |
110 | ||
111 | /* The guy who actually changed the latency numbers. */ | |
112 | struct iolatency_grp *scale_grp; | |
113 | ||
114 | /* Cookie to tell if we need to scale up or down. */ | |
115 | atomic_t scale_cookie; | |
116 | }; | |
117 | ||
1fa2840e JB |
118 | struct percentile_stats { |
119 | u64 total; | |
120 | u64 missed; | |
121 | }; | |
122 | ||
123 | struct latency_stat { | |
124 | union { | |
125 | struct percentile_stats ps; | |
126 | struct blk_rq_stat rqs; | |
127 | }; | |
128 | }; | |
129 | ||
d7067512 JB |
130 | struct iolatency_grp { |
131 | struct blkg_policy_data pd; | |
1fa2840e | 132 | struct latency_stat __percpu *stats; |
451bb7c3 | 133 | struct latency_stat cur_stat; |
d7067512 JB |
134 | struct blk_iolatency *blkiolat; |
135 | struct rq_depth rq_depth; | |
136 | struct rq_wait rq_wait; | |
137 | atomic64_t window_start; | |
138 | atomic_t scale_cookie; | |
139 | u64 min_lat_nsec; | |
140 | u64 cur_win_nsec; | |
141 | ||
142 | /* total running average of our io latency. */ | |
c480bcf9 | 143 | u64 lat_avg; |
d7067512 JB |
144 | |
145 | /* Our current number of IO's for the last summation. */ | |
146 | u64 nr_samples; | |
147 | ||
1fa2840e | 148 | bool ssd; |
d7067512 JB |
149 | struct child_latency_info child_lat; |
150 | }; | |
151 | ||
c480bcf9 DZF |
152 | #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC) |
153 | #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC | |
154 | /* | |
155 | * These are the constants used to fake the fixed-point moving average | |
156 | * calculation just like load average. The call to CALC_LOAD folds | |
157 | * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling | |
158 | * window size is bucketed to try to approximately calculate average | |
159 | * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows | |
160 | * elapse immediately. Note, windows only elapse with IO activity. Idle | |
161 | * periods extend the most recent window. | |
162 | */ | |
163 | #define BLKIOLATENCY_NR_EXP_FACTORS 5 | |
164 | #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \ | |
165 | (BLKIOLATENCY_NR_EXP_FACTORS - 1)) | |
166 | static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = { | |
167 | 2045, // exp(1/600) - 600 samples | |
168 | 2039, // exp(1/240) - 240 samples | |
169 | 2031, // exp(1/120) - 120 samples | |
170 | 2023, // exp(1/80) - 80 samples | |
171 | 2014, // exp(1/60) - 60 samples | |
172 | }; | |
173 | ||
d7067512 JB |
174 | static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd) |
175 | { | |
176 | return pd ? container_of(pd, struct iolatency_grp, pd) : NULL; | |
177 | } | |
178 | ||
179 | static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg) | |
180 | { | |
181 | return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency)); | |
182 | } | |
183 | ||
184 | static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat) | |
185 | { | |
186 | return pd_to_blkg(&iolat->pd); | |
187 | } | |
188 | ||
1fa2840e JB |
189 | static inline void latency_stat_init(struct iolatency_grp *iolat, |
190 | struct latency_stat *stat) | |
191 | { | |
192 | if (iolat->ssd) { | |
193 | stat->ps.total = 0; | |
194 | stat->ps.missed = 0; | |
195 | } else | |
196 | blk_rq_stat_init(&stat->rqs); | |
197 | } | |
198 | ||
199 | static inline void latency_stat_sum(struct iolatency_grp *iolat, | |
200 | struct latency_stat *sum, | |
201 | struct latency_stat *stat) | |
202 | { | |
203 | if (iolat->ssd) { | |
204 | sum->ps.total += stat->ps.total; | |
205 | sum->ps.missed += stat->ps.missed; | |
206 | } else | |
207 | blk_rq_stat_sum(&sum->rqs, &stat->rqs); | |
208 | } | |
209 | ||
210 | static inline void latency_stat_record_time(struct iolatency_grp *iolat, | |
211 | u64 req_time) | |
212 | { | |
213 | struct latency_stat *stat = get_cpu_ptr(iolat->stats); | |
214 | if (iolat->ssd) { | |
215 | if (req_time >= iolat->min_lat_nsec) | |
216 | stat->ps.missed++; | |
217 | stat->ps.total++; | |
218 | } else | |
219 | blk_rq_stat_add(&stat->rqs, req_time); | |
220 | put_cpu_ptr(stat); | |
221 | } | |
222 | ||
223 | static inline bool latency_sum_ok(struct iolatency_grp *iolat, | |
224 | struct latency_stat *stat) | |
225 | { | |
226 | if (iolat->ssd) { | |
227 | u64 thresh = div64_u64(stat->ps.total, 10); | |
228 | thresh = max(thresh, 1ULL); | |
229 | return stat->ps.missed < thresh; | |
230 | } | |
231 | return stat->rqs.mean <= iolat->min_lat_nsec; | |
232 | } | |
233 | ||
234 | static inline u64 latency_stat_samples(struct iolatency_grp *iolat, | |
235 | struct latency_stat *stat) | |
236 | { | |
237 | if (iolat->ssd) | |
238 | return stat->ps.total; | |
239 | return stat->rqs.nr_samples; | |
240 | } | |
241 | ||
242 | static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat, | |
243 | struct latency_stat *stat) | |
244 | { | |
245 | int exp_idx; | |
246 | ||
247 | if (iolat->ssd) | |
248 | return; | |
249 | ||
250 | /* | |
251 | * CALC_LOAD takes in a number stored in fixed point representation. | |
252 | * Because we are using this for IO time in ns, the values stored | |
253 | * are significantly larger than the FIXED_1 denominator (2048). | |
254 | * Therefore, rounding errors in the calculation are negligible and | |
255 | * can be ignored. | |
256 | */ | |
257 | exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1, | |
258 | div64_u64(iolat->cur_win_nsec, | |
259 | BLKIOLATENCY_EXP_BUCKET_SIZE)); | |
260 | CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat->rqs.mean); | |
261 | } | |
262 | ||
d7067512 JB |
263 | static inline bool iolatency_may_queue(struct iolatency_grp *iolat, |
264 | wait_queue_entry_t *wait, | |
265 | bool first_block) | |
266 | { | |
267 | struct rq_wait *rqw = &iolat->rq_wait; | |
268 | ||
269 | if (first_block && waitqueue_active(&rqw->wait) && | |
270 | rqw->wait.head.next != &wait->entry) | |
271 | return false; | |
272 | return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); | |
273 | } | |
274 | ||
275 | static void __blkcg_iolatency_throttle(struct rq_qos *rqos, | |
276 | struct iolatency_grp *iolat, | |
277 | spinlock_t *lock, bool issue_as_root, | |
278 | bool use_memdelay) | |
279 | __releases(lock) | |
280 | __acquires(lock) | |
281 | { | |
282 | struct rq_wait *rqw = &iolat->rq_wait; | |
283 | unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); | |
284 | DEFINE_WAIT(wait); | |
285 | bool first_block = true; | |
286 | ||
287 | if (use_delay) | |
288 | blkcg_schedule_throttle(rqos->q, use_memdelay); | |
289 | ||
290 | /* | |
291 | * To avoid priority inversions we want to just take a slot if we are | |
292 | * issuing as root. If we're being killed off there's no point in | |
293 | * delaying things, we may have been killed by OOM so throttling may | |
294 | * make recovery take even longer, so just let the IO's through so the | |
295 | * task can go away. | |
296 | */ | |
297 | if (issue_as_root || fatal_signal_pending(current)) { | |
298 | atomic_inc(&rqw->inflight); | |
299 | return; | |
300 | } | |
301 | ||
302 | if (iolatency_may_queue(iolat, &wait, first_block)) | |
303 | return; | |
304 | ||
305 | do { | |
306 | prepare_to_wait_exclusive(&rqw->wait, &wait, | |
307 | TASK_UNINTERRUPTIBLE); | |
308 | ||
309 | if (iolatency_may_queue(iolat, &wait, first_block)) | |
310 | break; | |
311 | first_block = false; | |
312 | ||
313 | if (lock) { | |
314 | spin_unlock_irq(lock); | |
315 | io_schedule(); | |
316 | spin_lock_irq(lock); | |
317 | } else { | |
318 | io_schedule(); | |
319 | } | |
320 | } while (1); | |
321 | ||
322 | finish_wait(&rqw->wait, &wait); | |
323 | } | |
324 | ||
325 | #define SCALE_DOWN_FACTOR 2 | |
326 | #define SCALE_UP_FACTOR 4 | |
327 | ||
328 | static inline unsigned long scale_amount(unsigned long qd, bool up) | |
329 | { | |
330 | return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL); | |
331 | } | |
332 | ||
333 | /* | |
334 | * We scale the qd down faster than we scale up, so we need to use this helper | |
335 | * to adjust the scale_cookie accordingly so we don't prematurely get | |
336 | * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much. | |
337 | * | |
338 | * Each group has their own local copy of the last scale cookie they saw, so if | |
339 | * the global scale cookie goes up or down they know which way they need to go | |
340 | * based on their last knowledge of it. | |
341 | */ | |
342 | static void scale_cookie_change(struct blk_iolatency *blkiolat, | |
343 | struct child_latency_info *lat_info, | |
344 | bool up) | |
345 | { | |
ff4cee08 | 346 | unsigned long qd = blkiolat->rqos.q->nr_requests; |
d7067512 JB |
347 | unsigned long scale = scale_amount(qd, up); |
348 | unsigned long old = atomic_read(&lat_info->scale_cookie); | |
349 | unsigned long max_scale = qd << 1; | |
350 | unsigned long diff = 0; | |
351 | ||
352 | if (old < DEFAULT_SCALE_COOKIE) | |
353 | diff = DEFAULT_SCALE_COOKIE - old; | |
354 | ||
355 | if (up) { | |
356 | if (scale + old > DEFAULT_SCALE_COOKIE) | |
357 | atomic_set(&lat_info->scale_cookie, | |
358 | DEFAULT_SCALE_COOKIE); | |
359 | else if (diff > qd) | |
360 | atomic_inc(&lat_info->scale_cookie); | |
361 | else | |
362 | atomic_add(scale, &lat_info->scale_cookie); | |
363 | } else { | |
364 | /* | |
365 | * We don't want to dig a hole so deep that it takes us hours to | |
366 | * dig out of it. Just enough that we don't throttle/unthrottle | |
367 | * with jagged workloads but can still unthrottle once pressure | |
368 | * has sufficiently dissipated. | |
369 | */ | |
370 | if (diff > qd) { | |
371 | if (diff < max_scale) | |
372 | atomic_dec(&lat_info->scale_cookie); | |
373 | } else { | |
374 | atomic_sub(scale, &lat_info->scale_cookie); | |
375 | } | |
376 | } | |
377 | } | |
378 | ||
379 | /* | |
380 | * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the | |
381 | * queue depth at a time so we don't get wild swings and hopefully dial in to | |
382 | * fairer distribution of the overall queue depth. | |
383 | */ | |
384 | static void scale_change(struct iolatency_grp *iolat, bool up) | |
385 | { | |
ff4cee08 | 386 | unsigned long qd = iolat->blkiolat->rqos.q->nr_requests; |
d7067512 JB |
387 | unsigned long scale = scale_amount(qd, up); |
388 | unsigned long old = iolat->rq_depth.max_depth; | |
d7067512 JB |
389 | |
390 | if (old > qd) | |
391 | old = qd; | |
392 | ||
393 | if (up) { | |
394 | if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat))) | |
395 | return; | |
396 | ||
397 | if (old < qd) { | |
d7067512 JB |
398 | old += scale; |
399 | old = min(old, qd); | |
400 | iolat->rq_depth.max_depth = old; | |
401 | wake_up_all(&iolat->rq_wait.wait); | |
402 | } | |
9f60511a | 403 | } else { |
d7067512 | 404 | old >>= 1; |
d7067512 JB |
405 | iolat->rq_depth.max_depth = max(old, 1UL); |
406 | } | |
407 | } | |
408 | ||
409 | /* Check our parent and see if the scale cookie has changed. */ | |
410 | static void check_scale_change(struct iolatency_grp *iolat) | |
411 | { | |
412 | struct iolatency_grp *parent; | |
413 | struct child_latency_info *lat_info; | |
414 | unsigned int cur_cookie; | |
415 | unsigned int our_cookie = atomic_read(&iolat->scale_cookie); | |
416 | u64 scale_lat; | |
417 | unsigned int old; | |
418 | int direction = 0; | |
419 | ||
420 | if (lat_to_blkg(iolat)->parent == NULL) | |
421 | return; | |
422 | ||
423 | parent = blkg_to_lat(lat_to_blkg(iolat)->parent); | |
424 | if (!parent) | |
425 | return; | |
426 | ||
427 | lat_info = &parent->child_lat; | |
428 | cur_cookie = atomic_read(&lat_info->scale_cookie); | |
429 | scale_lat = READ_ONCE(lat_info->scale_lat); | |
430 | ||
431 | if (cur_cookie < our_cookie) | |
432 | direction = -1; | |
433 | else if (cur_cookie > our_cookie) | |
434 | direction = 1; | |
435 | else | |
436 | return; | |
437 | ||
438 | old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie); | |
439 | ||
440 | /* Somebody beat us to the punch, just bail. */ | |
441 | if (old != our_cookie) | |
442 | return; | |
443 | ||
444 | if (direction < 0 && iolat->min_lat_nsec) { | |
445 | u64 samples_thresh; | |
446 | ||
447 | if (!scale_lat || iolat->min_lat_nsec <= scale_lat) | |
448 | return; | |
449 | ||
450 | /* | |
451 | * Sometimes high priority groups are their own worst enemy, so | |
452 | * instead of taking it out on some poor other group that did 5% | |
453 | * or less of the IO's for the last summation just skip this | |
454 | * scale down event. | |
455 | */ | |
456 | samples_thresh = lat_info->nr_samples * 5; | |
22ed8a93 | 457 | samples_thresh = max(1ULL, div64_u64(samples_thresh, 100)); |
d7067512 JB |
458 | if (iolat->nr_samples <= samples_thresh) |
459 | return; | |
460 | } | |
461 | ||
462 | /* We're as low as we can go. */ | |
463 | if (iolat->rq_depth.max_depth == 1 && direction < 0) { | |
464 | blkcg_use_delay(lat_to_blkg(iolat)); | |
465 | return; | |
466 | } | |
467 | ||
468 | /* We're back to the default cookie, unthrottle all the things. */ | |
469 | if (cur_cookie == DEFAULT_SCALE_COOKIE) { | |
470 | blkcg_clear_delay(lat_to_blkg(iolat)); | |
a284390b | 471 | iolat->rq_depth.max_depth = UINT_MAX; |
d7067512 JB |
472 | wake_up_all(&iolat->rq_wait.wait); |
473 | return; | |
474 | } | |
475 | ||
476 | scale_change(iolat, direction > 0); | |
477 | } | |
478 | ||
479 | static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio, | |
480 | spinlock_t *lock) | |
481 | { | |
482 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
b5f2954d DZ |
483 | struct blkcg *blkcg; |
484 | struct blkcg_gq *blkg; | |
485 | struct request_queue *q = rqos->q; | |
d7067512 JB |
486 | bool issue_as_root = bio_issue_as_root_blkg(bio); |
487 | ||
488 | if (!blk_iolatency_enabled(blkiolat)) | |
489 | return; | |
490 | ||
b5f2954d DZ |
491 | rcu_read_lock(); |
492 | blkcg = bio_blkcg(bio); | |
493 | bio_associate_blkcg(bio, &blkcg->css); | |
494 | blkg = blkg_lookup(blkcg, q); | |
495 | if (unlikely(!blkg)) { | |
496 | if (!lock) | |
497 | spin_lock_irq(q->queue_lock); | |
498 | blkg = blkg_lookup_create(blkcg, q); | |
499 | if (IS_ERR(blkg)) | |
500 | blkg = NULL; | |
501 | if (!lock) | |
502 | spin_unlock_irq(q->queue_lock); | |
503 | } | |
504 | if (!blkg) | |
505 | goto out; | |
506 | ||
507 | bio_issue_init(&bio->bi_issue, bio_sectors(bio)); | |
508 | bio_associate_blkg(bio, blkg); | |
509 | out: | |
510 | rcu_read_unlock(); | |
d7067512 JB |
511 | while (blkg && blkg->parent) { |
512 | struct iolatency_grp *iolat = blkg_to_lat(blkg); | |
513 | if (!iolat) { | |
514 | blkg = blkg->parent; | |
515 | continue; | |
516 | } | |
517 | ||
518 | check_scale_change(iolat); | |
519 | __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root, | |
520 | (bio->bi_opf & REQ_SWAP) == REQ_SWAP); | |
521 | blkg = blkg->parent; | |
522 | } | |
523 | if (!timer_pending(&blkiolat->timer)) | |
524 | mod_timer(&blkiolat->timer, jiffies + HZ); | |
525 | } | |
526 | ||
527 | static void iolatency_record_time(struct iolatency_grp *iolat, | |
528 | struct bio_issue *issue, u64 now, | |
529 | bool issue_as_root) | |
530 | { | |
d7067512 JB |
531 | u64 start = bio_issue_time(issue); |
532 | u64 req_time; | |
533 | ||
71e9690b JB |
534 | /* |
535 | * Have to do this so we are truncated to the correct time that our | |
536 | * issue is truncated to. | |
537 | */ | |
538 | now = __bio_issue_time(now); | |
539 | ||
d7067512 JB |
540 | if (now <= start) |
541 | return; | |
542 | ||
543 | req_time = now - start; | |
544 | ||
545 | /* | |
546 | * We don't want to count issue_as_root bio's in the cgroups latency | |
547 | * statistics as it could skew the numbers downwards. | |
548 | */ | |
a284390b | 549 | if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) { |
d7067512 JB |
550 | u64 sub = iolat->min_lat_nsec; |
551 | if (req_time < sub) | |
552 | blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time); | |
553 | return; | |
554 | } | |
555 | ||
1fa2840e | 556 | latency_stat_record_time(iolat, req_time); |
d7067512 JB |
557 | } |
558 | ||
559 | #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC) | |
560 | #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5 | |
561 | ||
562 | static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now) | |
563 | { | |
564 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
565 | struct iolatency_grp *parent; | |
566 | struct child_latency_info *lat_info; | |
1fa2840e | 567 | struct latency_stat stat; |
d7067512 | 568 | unsigned long flags; |
1fa2840e | 569 | int cpu; |
d7067512 | 570 | |
1fa2840e | 571 | latency_stat_init(iolat, &stat); |
d7067512 JB |
572 | preempt_disable(); |
573 | for_each_online_cpu(cpu) { | |
1fa2840e | 574 | struct latency_stat *s; |
d7067512 | 575 | s = per_cpu_ptr(iolat->stats, cpu); |
1fa2840e JB |
576 | latency_stat_sum(iolat, &stat, s); |
577 | latency_stat_init(iolat, s); | |
d7067512 JB |
578 | } |
579 | preempt_enable(); | |
580 | ||
d7067512 JB |
581 | parent = blkg_to_lat(blkg->parent); |
582 | if (!parent) | |
583 | return; | |
584 | ||
585 | lat_info = &parent->child_lat; | |
586 | ||
1fa2840e | 587 | iolat_update_total_lat_avg(iolat, &stat); |
d7067512 JB |
588 | |
589 | /* Everything is ok and we don't need to adjust the scale. */ | |
1fa2840e | 590 | if (latency_sum_ok(iolat, &stat) && |
d7067512 JB |
591 | atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE) |
592 | return; | |
593 | ||
594 | /* Somebody beat us to the punch, just bail. */ | |
595 | spin_lock_irqsave(&lat_info->lock, flags); | |
451bb7c3 JB |
596 | |
597 | latency_stat_sum(iolat, &iolat->cur_stat, &stat); | |
d7067512 | 598 | lat_info->nr_samples -= iolat->nr_samples; |
451bb7c3 JB |
599 | lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat); |
600 | iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat); | |
d7067512 JB |
601 | |
602 | if ((lat_info->last_scale_event >= now || | |
451bb7c3 | 603 | now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME)) |
d7067512 JB |
604 | goto out; |
605 | ||
451bb7c3 JB |
606 | if (latency_sum_ok(iolat, &iolat->cur_stat) && |
607 | latency_sum_ok(iolat, &stat)) { | |
608 | if (latency_stat_samples(iolat, &iolat->cur_stat) < | |
1fa2840e JB |
609 | BLKIOLATENCY_MIN_GOOD_SAMPLES) |
610 | goto out; | |
d7067512 JB |
611 | if (lat_info->scale_grp == iolat) { |
612 | lat_info->last_scale_event = now; | |
613 | scale_cookie_change(iolat->blkiolat, lat_info, true); | |
614 | } | |
451bb7c3 JB |
615 | } else if (lat_info->scale_lat == 0 || |
616 | lat_info->scale_lat >= iolat->min_lat_nsec) { | |
d7067512 JB |
617 | lat_info->last_scale_event = now; |
618 | if (!lat_info->scale_grp || | |
619 | lat_info->scale_lat > iolat->min_lat_nsec) { | |
620 | WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec); | |
621 | lat_info->scale_grp = iolat; | |
622 | } | |
623 | scale_cookie_change(iolat->blkiolat, lat_info, false); | |
624 | } | |
451bb7c3 | 625 | latency_stat_init(iolat, &iolat->cur_stat); |
d7067512 JB |
626 | out: |
627 | spin_unlock_irqrestore(&lat_info->lock, flags); | |
628 | } | |
629 | ||
630 | static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) | |
631 | { | |
632 | struct blkcg_gq *blkg; | |
633 | struct rq_wait *rqw; | |
634 | struct iolatency_grp *iolat; | |
635 | u64 window_start; | |
636 | u64 now = ktime_to_ns(ktime_get()); | |
637 | bool issue_as_root = bio_issue_as_root_blkg(bio); | |
638 | bool enabled = false; | |
639 | ||
640 | blkg = bio->bi_blkg; | |
641 | if (!blkg) | |
642 | return; | |
643 | ||
644 | iolat = blkg_to_lat(bio->bi_blkg); | |
645 | if (!iolat) | |
646 | return; | |
647 | ||
648 | enabled = blk_iolatency_enabled(iolat->blkiolat); | |
649 | while (blkg && blkg->parent) { | |
650 | iolat = blkg_to_lat(blkg); | |
651 | if (!iolat) { | |
652 | blkg = blkg->parent; | |
653 | continue; | |
654 | } | |
655 | rqw = &iolat->rq_wait; | |
656 | ||
657 | atomic_dec(&rqw->inflight); | |
658 | if (!enabled || iolat->min_lat_nsec == 0) | |
659 | goto next; | |
660 | iolatency_record_time(iolat, &bio->bi_issue, now, | |
661 | issue_as_root); | |
662 | window_start = atomic64_read(&iolat->window_start); | |
663 | if (now > window_start && | |
664 | (now - window_start) >= iolat->cur_win_nsec) { | |
665 | if (atomic64_cmpxchg(&iolat->window_start, | |
666 | window_start, now) == window_start) | |
667 | iolatency_check_latencies(iolat, now); | |
668 | } | |
669 | next: | |
670 | wake_up(&rqw->wait); | |
671 | blkg = blkg->parent; | |
672 | } | |
673 | } | |
674 | ||
675 | static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio) | |
676 | { | |
677 | struct blkcg_gq *blkg; | |
678 | ||
679 | blkg = bio->bi_blkg; | |
680 | while (blkg && blkg->parent) { | |
681 | struct rq_wait *rqw; | |
682 | struct iolatency_grp *iolat; | |
683 | ||
684 | iolat = blkg_to_lat(blkg); | |
685 | if (!iolat) | |
686 | goto next; | |
687 | ||
688 | rqw = &iolat->rq_wait; | |
689 | atomic_dec(&rqw->inflight); | |
690 | wake_up(&rqw->wait); | |
691 | next: | |
692 | blkg = blkg->parent; | |
693 | } | |
694 | } | |
695 | ||
696 | static void blkcg_iolatency_exit(struct rq_qos *rqos) | |
697 | { | |
698 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
699 | ||
700 | del_timer_sync(&blkiolat->timer); | |
701 | blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); | |
702 | kfree(blkiolat); | |
703 | } | |
704 | ||
705 | static struct rq_qos_ops blkcg_iolatency_ops = { | |
706 | .throttle = blkcg_iolatency_throttle, | |
707 | .cleanup = blkcg_iolatency_cleanup, | |
708 | .done_bio = blkcg_iolatency_done_bio, | |
709 | .exit = blkcg_iolatency_exit, | |
710 | }; | |
711 | ||
712 | static void blkiolatency_timer_fn(struct timer_list *t) | |
713 | { | |
714 | struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer); | |
715 | struct blkcg_gq *blkg; | |
716 | struct cgroup_subsys_state *pos_css; | |
717 | u64 now = ktime_to_ns(ktime_get()); | |
718 | ||
719 | rcu_read_lock(); | |
720 | blkg_for_each_descendant_pre(blkg, pos_css, | |
721 | blkiolat->rqos.q->root_blkg) { | |
722 | struct iolatency_grp *iolat; | |
723 | struct child_latency_info *lat_info; | |
724 | unsigned long flags; | |
725 | u64 cookie; | |
726 | ||
727 | /* | |
728 | * We could be exiting, don't access the pd unless we have a | |
729 | * ref on the blkg. | |
730 | */ | |
b5f2954d | 731 | if (!blkg_try_get(blkg)) |
d7067512 JB |
732 | continue; |
733 | ||
734 | iolat = blkg_to_lat(blkg); | |
735 | if (!iolat) | |
52a1199c | 736 | goto next; |
d7067512 JB |
737 | |
738 | lat_info = &iolat->child_lat; | |
739 | cookie = atomic_read(&lat_info->scale_cookie); | |
740 | ||
741 | if (cookie >= DEFAULT_SCALE_COOKIE) | |
742 | goto next; | |
743 | ||
744 | spin_lock_irqsave(&lat_info->lock, flags); | |
745 | if (lat_info->last_scale_event >= now) | |
746 | goto next_lock; | |
747 | ||
748 | /* | |
749 | * We scaled down but don't have a scale_grp, scale up and carry | |
750 | * on. | |
751 | */ | |
752 | if (lat_info->scale_grp == NULL) { | |
753 | scale_cookie_change(iolat->blkiolat, lat_info, true); | |
754 | goto next_lock; | |
755 | } | |
756 | ||
757 | /* | |
758 | * It's been 5 seconds since our last scale event, clear the | |
759 | * scale grp in case the group that needed the scale down isn't | |
760 | * doing any IO currently. | |
761 | */ | |
762 | if (now - lat_info->last_scale_event >= | |
763 | ((u64)NSEC_PER_SEC * 5)) | |
764 | lat_info->scale_grp = NULL; | |
765 | next_lock: | |
766 | spin_unlock_irqrestore(&lat_info->lock, flags); | |
767 | next: | |
768 | blkg_put(blkg); | |
769 | } | |
770 | rcu_read_unlock(); | |
771 | } | |
772 | ||
773 | int blk_iolatency_init(struct request_queue *q) | |
774 | { | |
775 | struct blk_iolatency *blkiolat; | |
776 | struct rq_qos *rqos; | |
777 | int ret; | |
778 | ||
779 | blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL); | |
780 | if (!blkiolat) | |
781 | return -ENOMEM; | |
782 | ||
783 | rqos = &blkiolat->rqos; | |
784 | rqos->id = RQ_QOS_CGROUP; | |
785 | rqos->ops = &blkcg_iolatency_ops; | |
786 | rqos->q = q; | |
787 | ||
788 | rq_qos_add(q, rqos); | |
789 | ||
790 | ret = blkcg_activate_policy(q, &blkcg_policy_iolatency); | |
791 | if (ret) { | |
792 | rq_qos_del(q, rqos); | |
793 | kfree(blkiolat); | |
794 | return ret; | |
795 | } | |
796 | ||
797 | timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0); | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
802 | static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) | |
803 | { | |
804 | struct iolatency_grp *iolat = blkg_to_lat(blkg); | |
805 | struct blk_iolatency *blkiolat = iolat->blkiolat; | |
806 | u64 oldval = iolat->min_lat_nsec; | |
807 | ||
808 | iolat->min_lat_nsec = val; | |
c480bcf9 DZF |
809 | iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE); |
810 | iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, | |
811 | BLKIOLATENCY_MAX_WIN_SIZE); | |
d7067512 JB |
812 | |
813 | if (!oldval && val) | |
814 | atomic_inc(&blkiolat->enabled); | |
815 | if (oldval && !val) | |
816 | atomic_dec(&blkiolat->enabled); | |
817 | } | |
818 | ||
819 | static void iolatency_clear_scaling(struct blkcg_gq *blkg) | |
820 | { | |
821 | if (blkg->parent) { | |
822 | struct iolatency_grp *iolat = blkg_to_lat(blkg->parent); | |
823 | struct child_latency_info *lat_info; | |
824 | if (!iolat) | |
825 | return; | |
826 | ||
827 | lat_info = &iolat->child_lat; | |
828 | spin_lock(&lat_info->lock); | |
829 | atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE); | |
830 | lat_info->last_scale_event = 0; | |
831 | lat_info->scale_grp = NULL; | |
832 | lat_info->scale_lat = 0; | |
833 | spin_unlock(&lat_info->lock); | |
834 | } | |
835 | } | |
836 | ||
837 | static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, | |
838 | size_t nbytes, loff_t off) | |
839 | { | |
840 | struct blkcg *blkcg = css_to_blkcg(of_css(of)); | |
841 | struct blkcg_gq *blkg; | |
d7067512 JB |
842 | struct blkg_conf_ctx ctx; |
843 | struct iolatency_grp *iolat; | |
844 | char *p, *tok; | |
845 | u64 lat_val = 0; | |
846 | u64 oldval; | |
847 | int ret; | |
848 | ||
849 | ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); | |
850 | if (ret) | |
851 | return ret; | |
852 | ||
853 | iolat = blkg_to_lat(ctx.blkg); | |
d7067512 JB |
854 | p = ctx.body; |
855 | ||
856 | ret = -EINVAL; | |
857 | while ((tok = strsep(&p, " "))) { | |
858 | char key[16]; | |
859 | char val[21]; /* 18446744073709551616 */ | |
860 | ||
861 | if (sscanf(tok, "%15[^=]=%20s", key, val) != 2) | |
862 | goto out; | |
863 | ||
864 | if (!strcmp(key, "target")) { | |
865 | u64 v; | |
866 | ||
867 | if (!strcmp(val, "max")) | |
868 | lat_val = 0; | |
869 | else if (sscanf(val, "%llu", &v) == 1) | |
870 | lat_val = v * NSEC_PER_USEC; | |
871 | else | |
872 | goto out; | |
873 | } else { | |
874 | goto out; | |
875 | } | |
876 | } | |
877 | ||
878 | /* Walk up the tree to see if our new val is lower than it should be. */ | |
879 | blkg = ctx.blkg; | |
880 | oldval = iolat->min_lat_nsec; | |
881 | ||
882 | iolatency_set_min_lat_nsec(blkg, lat_val); | |
883 | if (oldval != iolat->min_lat_nsec) { | |
884 | iolatency_clear_scaling(blkg); | |
885 | } | |
886 | ||
887 | ret = 0; | |
888 | out: | |
889 | blkg_conf_finish(&ctx); | |
890 | return ret ?: nbytes; | |
891 | } | |
892 | ||
893 | static u64 iolatency_prfill_limit(struct seq_file *sf, | |
894 | struct blkg_policy_data *pd, int off) | |
895 | { | |
896 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
897 | const char *dname = blkg_dev_name(pd->blkg); | |
898 | ||
899 | if (!dname || !iolat->min_lat_nsec) | |
900 | return 0; | |
901 | seq_printf(sf, "%s target=%llu\n", | |
88b7210c | 902 | dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC)); |
d7067512 JB |
903 | return 0; |
904 | } | |
905 | ||
906 | static int iolatency_print_limit(struct seq_file *sf, void *v) | |
907 | { | |
908 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
909 | iolatency_prfill_limit, | |
910 | &blkcg_policy_iolatency, seq_cft(sf)->private, false); | |
911 | return 0; | |
912 | } | |
913 | ||
1fa2840e JB |
914 | static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf, |
915 | size_t size) | |
916 | { | |
917 | struct latency_stat stat; | |
918 | int cpu; | |
919 | ||
920 | latency_stat_init(iolat, &stat); | |
921 | preempt_disable(); | |
922 | for_each_online_cpu(cpu) { | |
923 | struct latency_stat *s; | |
924 | s = per_cpu_ptr(iolat->stats, cpu); | |
925 | latency_stat_sum(iolat, &stat, s); | |
926 | } | |
927 | preempt_enable(); | |
928 | ||
929 | if (iolat->rq_depth.max_depth == UINT_MAX) | |
930 | return scnprintf(buf, size, " missed=%llu total=%llu depth=max", | |
931 | (unsigned long long)stat.ps.missed, | |
932 | (unsigned long long)stat.ps.total); | |
933 | return scnprintf(buf, size, " missed=%llu total=%llu depth=%u", | |
934 | (unsigned long long)stat.ps.missed, | |
935 | (unsigned long long)stat.ps.total, | |
936 | iolat->rq_depth.max_depth); | |
937 | } | |
938 | ||
d7067512 JB |
939 | static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf, |
940 | size_t size) | |
941 | { | |
942 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1fa2840e JB |
943 | unsigned long long avg_lat; |
944 | unsigned long long cur_win; | |
945 | ||
946 | if (iolat->ssd) | |
947 | return iolatency_ssd_stat(iolat, buf, size); | |
d7067512 | 948 | |
1fa2840e JB |
949 | avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC); |
950 | cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC); | |
a284390b | 951 | if (iolat->rq_depth.max_depth == UINT_MAX) |
c480bcf9 DZF |
952 | return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu", |
953 | avg_lat, cur_win); | |
d7067512 | 954 | |
c480bcf9 DZF |
955 | return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu", |
956 | iolat->rq_depth.max_depth, avg_lat, cur_win); | |
d7067512 JB |
957 | } |
958 | ||
959 | ||
960 | static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node) | |
961 | { | |
962 | struct iolatency_grp *iolat; | |
963 | ||
964 | iolat = kzalloc_node(sizeof(*iolat), gfp, node); | |
965 | if (!iolat) | |
966 | return NULL; | |
1fa2840e JB |
967 | iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat), |
968 | __alignof__(struct latency_stat), gfp); | |
d7067512 JB |
969 | if (!iolat->stats) { |
970 | kfree(iolat); | |
971 | return NULL; | |
972 | } | |
973 | return &iolat->pd; | |
974 | } | |
975 | ||
976 | static void iolatency_pd_init(struct blkg_policy_data *pd) | |
977 | { | |
978 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
979 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
980 | struct rq_qos *rqos = blkcg_rq_qos(blkg->q); | |
981 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
982 | u64 now = ktime_to_ns(ktime_get()); | |
983 | int cpu; | |
984 | ||
1fa2840e JB |
985 | if (blk_queue_nonrot(blkg->q)) |
986 | iolat->ssd = true; | |
987 | else | |
988 | iolat->ssd = false; | |
989 | ||
d7067512 | 990 | for_each_possible_cpu(cpu) { |
1fa2840e | 991 | struct latency_stat *stat; |
d7067512 | 992 | stat = per_cpu_ptr(iolat->stats, cpu); |
1fa2840e | 993 | latency_stat_init(iolat, stat); |
d7067512 JB |
994 | } |
995 | ||
451bb7c3 | 996 | latency_stat_init(iolat, &iolat->cur_stat); |
d7067512 JB |
997 | rq_wait_init(&iolat->rq_wait); |
998 | spin_lock_init(&iolat->child_lat.lock); | |
ff4cee08 | 999 | iolat->rq_depth.queue_depth = blkg->q->nr_requests; |
a284390b | 1000 | iolat->rq_depth.max_depth = UINT_MAX; |
d7067512 JB |
1001 | iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth; |
1002 | iolat->blkiolat = blkiolat; | |
1003 | iolat->cur_win_nsec = 100 * NSEC_PER_MSEC; | |
1004 | atomic64_set(&iolat->window_start, now); | |
1005 | ||
1006 | /* | |
1007 | * We init things in list order, so the pd for the parent may not be | |
1008 | * init'ed yet for whatever reason. | |
1009 | */ | |
1010 | if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) { | |
1011 | struct iolatency_grp *parent = blkg_to_lat(blkg->parent); | |
1012 | atomic_set(&iolat->scale_cookie, | |
1013 | atomic_read(&parent->child_lat.scale_cookie)); | |
1014 | } else { | |
1015 | atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE); | |
1016 | } | |
1017 | ||
1018 | atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE); | |
1019 | } | |
1020 | ||
1021 | static void iolatency_pd_offline(struct blkg_policy_data *pd) | |
1022 | { | |
1023 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1024 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
1025 | ||
1026 | iolatency_set_min_lat_nsec(blkg, 0); | |
1027 | iolatency_clear_scaling(blkg); | |
1028 | } | |
1029 | ||
1030 | static void iolatency_pd_free(struct blkg_policy_data *pd) | |
1031 | { | |
1032 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1033 | free_percpu(iolat->stats); | |
1034 | kfree(iolat); | |
1035 | } | |
1036 | ||
1037 | static struct cftype iolatency_files[] = { | |
1038 | { | |
1039 | .name = "latency", | |
1040 | .flags = CFTYPE_NOT_ON_ROOT, | |
1041 | .seq_show = iolatency_print_limit, | |
1042 | .write = iolatency_set_limit, | |
1043 | }, | |
1044 | {} | |
1045 | }; | |
1046 | ||
1047 | static struct blkcg_policy blkcg_policy_iolatency = { | |
1048 | .dfl_cftypes = iolatency_files, | |
1049 | .pd_alloc_fn = iolatency_pd_alloc, | |
1050 | .pd_init_fn = iolatency_pd_init, | |
1051 | .pd_offline_fn = iolatency_pd_offline, | |
1052 | .pd_free_fn = iolatency_pd_free, | |
1053 | .pd_stat_fn = iolatency_pd_stat, | |
1054 | }; | |
1055 | ||
1056 | static int __init iolatency_init(void) | |
1057 | { | |
1058 | return blkcg_policy_register(&blkcg_policy_iolatency); | |
1059 | } | |
1060 | ||
1061 | static void __exit iolatency_exit(void) | |
1062 | { | |
1063 | return blkcg_policy_unregister(&blkcg_policy_iolatency); | |
1064 | } | |
1065 | ||
1066 | module_init(iolatency_init); | |
1067 | module_exit(iolatency_exit); |