]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - block/blk-iolatency.c
blk-iolatency: deal with nr_requests == 1
[mirror_ubuntu-hirsute-kernel.git] / block / blk-iolatency.c
CommitLineData
d7067512
JB
1/*
2 * Block rq-qos base io controller
3 *
4 * This works similar to wbt with a few exceptions
5 *
6 * - It's bio based, so the latency covers the whole block layer in addition to
7 * the actual io.
8 * - We will throttle all IO that comes in here if we need to.
9 * - We use the mean latency over the 100ms window. This is because writes can
10 * be particularly fast, which could give us a false sense of the impact of
11 * other workloads on our protected workload.
a284390b
JB
12 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13 * that we can have as many outstanding bio's as we're allowed to. Only at
d7067512
JB
14 * throttle time do we pay attention to the actual queue depth.
15 *
16 * The hierarchy works like the cpu controller does, we track the latency at
17 * every configured node, and each configured node has it's own independent
18 * queue depth. This means that we only care about our latency targets at the
19 * peer level. Some group at the bottom of the hierarchy isn't going to affect
20 * a group at the end of some other path if we're only configred at leaf level.
21 *
22 * Consider the following
23 *
24 * root blkg
25 * / \
26 * fast (target=5ms) slow (target=10ms)
27 * / \ / \
28 * a b normal(15ms) unloved
29 *
30 * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31 * an average latency of 5ms. If it does then we will throttle the "slow"
32 * group. In the case of "normal", if it exceeds its 15ms target, we will
33 * throttle "unloved", but nobody else.
34 *
35 * In this example "fast", "slow", and "normal" will be the only groups actually
36 * accounting their io latencies. We have to walk up the heirarchy to the root
37 * on every submit and complete so we can do the appropriate stat recording and
38 * adjust the queue depth of ourselves if needed.
39 *
40 * There are 2 ways we throttle IO.
41 *
42 * 1) Queue depth throttling. As we throttle down we will adjust the maximum
43 * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
44 * to 1. If the group is only ever submitting IO for itself then this is the
45 * only way we throttle.
46 *
47 * 2) Induced delay throttling. This is for the case that a group is generating
48 * IO that has to be issued by the root cg to avoid priority inversion. So think
49 * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
50 * of work done for us on behalf of the root cg and are being asked to scale
51 * down more then we induce a latency at userspace return. We accumulate the
52 * total amount of time we need to be punished by doing
53 *
54 * total_time += min_lat_nsec - actual_io_completion
55 *
56 * and then at throttle time will do
57 *
58 * throttle_time = min(total_time, NSEC_PER_SEC)
59 *
60 * This induced delay will throttle back the activity that is generating the
61 * root cg issued io's, wethere that's some metadata intensive operation or the
62 * group is using so much memory that it is pushing us into swap.
63 *
64 * Copyright (C) 2018 Josef Bacik
65 */
66#include <linux/kernel.h>
67#include <linux/blk_types.h>
68#include <linux/backing-dev.h>
69#include <linux/module.h>
70#include <linux/timer.h>
71#include <linux/memcontrol.h>
c480bcf9 72#include <linux/sched/loadavg.h>
d7067512
JB
73#include <linux/sched/signal.h>
74#include <trace/events/block.h>
75#include "blk-rq-qos.h"
76#include "blk-stat.h"
77
78#define DEFAULT_SCALE_COOKIE 1000000U
79
80static struct blkcg_policy blkcg_policy_iolatency;
81struct iolatency_grp;
82
83struct blk_iolatency {
84 struct rq_qos rqos;
85 struct timer_list timer;
86 atomic_t enabled;
87};
88
89static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
90{
91 return container_of(rqos, struct blk_iolatency, rqos);
92}
93
94static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
95{
96 return atomic_read(&blkiolat->enabled) > 0;
97}
98
99struct child_latency_info {
100 spinlock_t lock;
101
102 /* Last time we adjusted the scale of everybody. */
103 u64 last_scale_event;
104
105 /* The latency that we missed. */
106 u64 scale_lat;
107
108 /* Total io's from all of our children for the last summation. */
109 u64 nr_samples;
110
111 /* The guy who actually changed the latency numbers. */
112 struct iolatency_grp *scale_grp;
113
114 /* Cookie to tell if we need to scale up or down. */
115 atomic_t scale_cookie;
116};
117
118struct iolatency_grp {
119 struct blkg_policy_data pd;
120 struct blk_rq_stat __percpu *stats;
121 struct blk_iolatency *blkiolat;
122 struct rq_depth rq_depth;
123 struct rq_wait rq_wait;
124 atomic64_t window_start;
125 atomic_t scale_cookie;
126 u64 min_lat_nsec;
127 u64 cur_win_nsec;
128
129 /* total running average of our io latency. */
c480bcf9 130 u64 lat_avg;
d7067512
JB
131
132 /* Our current number of IO's for the last summation. */
133 u64 nr_samples;
134
135 struct child_latency_info child_lat;
136};
137
c480bcf9
DZF
138#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
139#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
140/*
141 * These are the constants used to fake the fixed-point moving average
142 * calculation just like load average. The call to CALC_LOAD folds
143 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
144 * window size is bucketed to try to approximately calculate average
145 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
146 * elapse immediately. Note, windows only elapse with IO activity. Idle
147 * periods extend the most recent window.
148 */
149#define BLKIOLATENCY_NR_EXP_FACTORS 5
150#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
151 (BLKIOLATENCY_NR_EXP_FACTORS - 1))
152static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
153 2045, // exp(1/600) - 600 samples
154 2039, // exp(1/240) - 240 samples
155 2031, // exp(1/120) - 120 samples
156 2023, // exp(1/80) - 80 samples
157 2014, // exp(1/60) - 60 samples
158};
159
d7067512
JB
160static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
161{
162 return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
163}
164
165static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
166{
167 return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
168}
169
170static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
171{
172 return pd_to_blkg(&iolat->pd);
173}
174
175static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
176 wait_queue_entry_t *wait,
177 bool first_block)
178{
179 struct rq_wait *rqw = &iolat->rq_wait;
180
181 if (first_block && waitqueue_active(&rqw->wait) &&
182 rqw->wait.head.next != &wait->entry)
183 return false;
184 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
185}
186
187static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
188 struct iolatency_grp *iolat,
189 spinlock_t *lock, bool issue_as_root,
190 bool use_memdelay)
191 __releases(lock)
192 __acquires(lock)
193{
194 struct rq_wait *rqw = &iolat->rq_wait;
195 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
196 DEFINE_WAIT(wait);
197 bool first_block = true;
198
199 if (use_delay)
200 blkcg_schedule_throttle(rqos->q, use_memdelay);
201
202 /*
203 * To avoid priority inversions we want to just take a slot if we are
204 * issuing as root. If we're being killed off there's no point in
205 * delaying things, we may have been killed by OOM so throttling may
206 * make recovery take even longer, so just let the IO's through so the
207 * task can go away.
208 */
209 if (issue_as_root || fatal_signal_pending(current)) {
210 atomic_inc(&rqw->inflight);
211 return;
212 }
213
214 if (iolatency_may_queue(iolat, &wait, first_block))
215 return;
216
217 do {
218 prepare_to_wait_exclusive(&rqw->wait, &wait,
219 TASK_UNINTERRUPTIBLE);
220
221 if (iolatency_may_queue(iolat, &wait, first_block))
222 break;
223 first_block = false;
224
225 if (lock) {
226 spin_unlock_irq(lock);
227 io_schedule();
228 spin_lock_irq(lock);
229 } else {
230 io_schedule();
231 }
232 } while (1);
233
234 finish_wait(&rqw->wait, &wait);
235}
236
237#define SCALE_DOWN_FACTOR 2
238#define SCALE_UP_FACTOR 4
239
240static inline unsigned long scale_amount(unsigned long qd, bool up)
241{
242 return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
243}
244
245/*
246 * We scale the qd down faster than we scale up, so we need to use this helper
247 * to adjust the scale_cookie accordingly so we don't prematurely get
248 * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
249 *
250 * Each group has their own local copy of the last scale cookie they saw, so if
251 * the global scale cookie goes up or down they know which way they need to go
252 * based on their last knowledge of it.
253 */
254static void scale_cookie_change(struct blk_iolatency *blkiolat,
255 struct child_latency_info *lat_info,
256 bool up)
257{
ff4cee08 258 unsigned long qd = blkiolat->rqos.q->nr_requests;
d7067512
JB
259 unsigned long scale = scale_amount(qd, up);
260 unsigned long old = atomic_read(&lat_info->scale_cookie);
261 unsigned long max_scale = qd << 1;
262 unsigned long diff = 0;
263
264 if (old < DEFAULT_SCALE_COOKIE)
265 diff = DEFAULT_SCALE_COOKIE - old;
266
267 if (up) {
268 if (scale + old > DEFAULT_SCALE_COOKIE)
269 atomic_set(&lat_info->scale_cookie,
270 DEFAULT_SCALE_COOKIE);
271 else if (diff > qd)
272 atomic_inc(&lat_info->scale_cookie);
273 else
274 atomic_add(scale, &lat_info->scale_cookie);
275 } else {
276 /*
277 * We don't want to dig a hole so deep that it takes us hours to
278 * dig out of it. Just enough that we don't throttle/unthrottle
279 * with jagged workloads but can still unthrottle once pressure
280 * has sufficiently dissipated.
281 */
282 if (diff > qd) {
283 if (diff < max_scale)
284 atomic_dec(&lat_info->scale_cookie);
285 } else {
286 atomic_sub(scale, &lat_info->scale_cookie);
287 }
288 }
289}
290
291/*
292 * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
293 * queue depth at a time so we don't get wild swings and hopefully dial in to
294 * fairer distribution of the overall queue depth.
295 */
296static void scale_change(struct iolatency_grp *iolat, bool up)
297{
ff4cee08 298 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
d7067512
JB
299 unsigned long scale = scale_amount(qd, up);
300 unsigned long old = iolat->rq_depth.max_depth;
d7067512
JB
301
302 if (old > qd)
303 old = qd;
304
305 if (up) {
306 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
307 return;
308
309 if (old < qd) {
d7067512
JB
310 old += scale;
311 old = min(old, qd);
312 iolat->rq_depth.max_depth = old;
313 wake_up_all(&iolat->rq_wait.wait);
314 }
9f60511a 315 } else {
d7067512 316 old >>= 1;
d7067512
JB
317 iolat->rq_depth.max_depth = max(old, 1UL);
318 }
319}
320
321/* Check our parent and see if the scale cookie has changed. */
322static void check_scale_change(struct iolatency_grp *iolat)
323{
324 struct iolatency_grp *parent;
325 struct child_latency_info *lat_info;
326 unsigned int cur_cookie;
327 unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
328 u64 scale_lat;
329 unsigned int old;
330 int direction = 0;
331
332 if (lat_to_blkg(iolat)->parent == NULL)
333 return;
334
335 parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
336 if (!parent)
337 return;
338
339 lat_info = &parent->child_lat;
340 cur_cookie = atomic_read(&lat_info->scale_cookie);
341 scale_lat = READ_ONCE(lat_info->scale_lat);
342
343 if (cur_cookie < our_cookie)
344 direction = -1;
345 else if (cur_cookie > our_cookie)
346 direction = 1;
347 else
348 return;
349
350 old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
351
352 /* Somebody beat us to the punch, just bail. */
353 if (old != our_cookie)
354 return;
355
356 if (direction < 0 && iolat->min_lat_nsec) {
357 u64 samples_thresh;
358
359 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
360 return;
361
362 /*
363 * Sometimes high priority groups are their own worst enemy, so
364 * instead of taking it out on some poor other group that did 5%
365 * or less of the IO's for the last summation just skip this
366 * scale down event.
367 */
368 samples_thresh = lat_info->nr_samples * 5;
369 samples_thresh = div64_u64(samples_thresh, 100);
370 if (iolat->nr_samples <= samples_thresh)
371 return;
372 }
373
374 /* We're as low as we can go. */
375 if (iolat->rq_depth.max_depth == 1 && direction < 0) {
376 blkcg_use_delay(lat_to_blkg(iolat));
377 return;
378 }
379
380 /* We're back to the default cookie, unthrottle all the things. */
381 if (cur_cookie == DEFAULT_SCALE_COOKIE) {
382 blkcg_clear_delay(lat_to_blkg(iolat));
a284390b 383 iolat->rq_depth.max_depth = UINT_MAX;
d7067512
JB
384 wake_up_all(&iolat->rq_wait.wait);
385 return;
386 }
387
388 scale_change(iolat, direction > 0);
389}
390
391static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
392 spinlock_t *lock)
393{
394 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
a7b39b4e 395 struct blkcg_gq *blkg = bio->bi_blkg;
d7067512
JB
396 bool issue_as_root = bio_issue_as_root_blkg(bio);
397
398 if (!blk_iolatency_enabled(blkiolat))
399 return;
400
d7067512
JB
401 while (blkg && blkg->parent) {
402 struct iolatency_grp *iolat = blkg_to_lat(blkg);
403 if (!iolat) {
404 blkg = blkg->parent;
405 continue;
406 }
407
408 check_scale_change(iolat);
409 __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
410 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
411 blkg = blkg->parent;
412 }
413 if (!timer_pending(&blkiolat->timer))
414 mod_timer(&blkiolat->timer, jiffies + HZ);
415}
416
417static void iolatency_record_time(struct iolatency_grp *iolat,
418 struct bio_issue *issue, u64 now,
419 bool issue_as_root)
420{
421 struct blk_rq_stat *rq_stat;
422 u64 start = bio_issue_time(issue);
423 u64 req_time;
424
71e9690b
JB
425 /*
426 * Have to do this so we are truncated to the correct time that our
427 * issue is truncated to.
428 */
429 now = __bio_issue_time(now);
430
d7067512
JB
431 if (now <= start)
432 return;
433
434 req_time = now - start;
435
436 /*
437 * We don't want to count issue_as_root bio's in the cgroups latency
438 * statistics as it could skew the numbers downwards.
439 */
a284390b 440 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
d7067512
JB
441 u64 sub = iolat->min_lat_nsec;
442 if (req_time < sub)
443 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
444 return;
445 }
446
447 rq_stat = get_cpu_ptr(iolat->stats);
448 blk_rq_stat_add(rq_stat, req_time);
449 put_cpu_ptr(rq_stat);
450}
451
452#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
453#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
454
455static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
456{
457 struct blkcg_gq *blkg = lat_to_blkg(iolat);
458 struct iolatency_grp *parent;
459 struct child_latency_info *lat_info;
460 struct blk_rq_stat stat;
461 unsigned long flags;
c480bcf9 462 int cpu, exp_idx;
d7067512
JB
463
464 blk_rq_stat_init(&stat);
465 preempt_disable();
466 for_each_online_cpu(cpu) {
467 struct blk_rq_stat *s;
468 s = per_cpu_ptr(iolat->stats, cpu);
469 blk_rq_stat_sum(&stat, s);
470 blk_rq_stat_init(s);
471 }
472 preempt_enable();
473
d7067512
JB
474 parent = blkg_to_lat(blkg->parent);
475 if (!parent)
476 return;
477
478 lat_info = &parent->child_lat;
479
c480bcf9
DZF
480 /*
481 * CALC_LOAD takes in a number stored in fixed point representation.
482 * Because we are using this for IO time in ns, the values stored
483 * are significantly larger than the FIXED_1 denominator (2048).
484 * Therefore, rounding errors in the calculation are negligible and
485 * can be ignored.
486 */
487 exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
488 div64_u64(iolat->cur_win_nsec,
489 BLKIOLATENCY_EXP_BUCKET_SIZE));
490 CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat.mean);
d7067512
JB
491
492 /* Everything is ok and we don't need to adjust the scale. */
493 if (stat.mean <= iolat->min_lat_nsec &&
494 atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
495 return;
496
497 /* Somebody beat us to the punch, just bail. */
498 spin_lock_irqsave(&lat_info->lock, flags);
499 lat_info->nr_samples -= iolat->nr_samples;
500 lat_info->nr_samples += stat.nr_samples;
501 iolat->nr_samples = stat.nr_samples;
502
503 if ((lat_info->last_scale_event >= now ||
504 now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
505 lat_info->scale_lat <= iolat->min_lat_nsec)
506 goto out;
507
508 if (stat.mean <= iolat->min_lat_nsec &&
509 stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
510 if (lat_info->scale_grp == iolat) {
511 lat_info->last_scale_event = now;
512 scale_cookie_change(iolat->blkiolat, lat_info, true);
513 }
514 } else if (stat.mean > iolat->min_lat_nsec) {
515 lat_info->last_scale_event = now;
516 if (!lat_info->scale_grp ||
517 lat_info->scale_lat > iolat->min_lat_nsec) {
518 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
519 lat_info->scale_grp = iolat;
520 }
521 scale_cookie_change(iolat->blkiolat, lat_info, false);
522 }
523out:
524 spin_unlock_irqrestore(&lat_info->lock, flags);
525}
526
527static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
528{
529 struct blkcg_gq *blkg;
530 struct rq_wait *rqw;
531 struct iolatency_grp *iolat;
532 u64 window_start;
533 u64 now = ktime_to_ns(ktime_get());
534 bool issue_as_root = bio_issue_as_root_blkg(bio);
535 bool enabled = false;
536
537 blkg = bio->bi_blkg;
538 if (!blkg)
539 return;
540
541 iolat = blkg_to_lat(bio->bi_blkg);
542 if (!iolat)
543 return;
544
545 enabled = blk_iolatency_enabled(iolat->blkiolat);
546 while (blkg && blkg->parent) {
547 iolat = blkg_to_lat(blkg);
548 if (!iolat) {
549 blkg = blkg->parent;
550 continue;
551 }
552 rqw = &iolat->rq_wait;
553
554 atomic_dec(&rqw->inflight);
555 if (!enabled || iolat->min_lat_nsec == 0)
556 goto next;
557 iolatency_record_time(iolat, &bio->bi_issue, now,
558 issue_as_root);
559 window_start = atomic64_read(&iolat->window_start);
560 if (now > window_start &&
561 (now - window_start) >= iolat->cur_win_nsec) {
562 if (atomic64_cmpxchg(&iolat->window_start,
563 window_start, now) == window_start)
564 iolatency_check_latencies(iolat, now);
565 }
566next:
567 wake_up(&rqw->wait);
568 blkg = blkg->parent;
569 }
570}
571
572static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
573{
574 struct blkcg_gq *blkg;
575
576 blkg = bio->bi_blkg;
577 while (blkg && blkg->parent) {
578 struct rq_wait *rqw;
579 struct iolatency_grp *iolat;
580
581 iolat = blkg_to_lat(blkg);
582 if (!iolat)
583 goto next;
584
585 rqw = &iolat->rq_wait;
586 atomic_dec(&rqw->inflight);
587 wake_up(&rqw->wait);
588next:
589 blkg = blkg->parent;
590 }
591}
592
593static void blkcg_iolatency_exit(struct rq_qos *rqos)
594{
595 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
596
597 del_timer_sync(&blkiolat->timer);
598 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
599 kfree(blkiolat);
600}
601
602static struct rq_qos_ops blkcg_iolatency_ops = {
603 .throttle = blkcg_iolatency_throttle,
604 .cleanup = blkcg_iolatency_cleanup,
605 .done_bio = blkcg_iolatency_done_bio,
606 .exit = blkcg_iolatency_exit,
607};
608
609static void blkiolatency_timer_fn(struct timer_list *t)
610{
611 struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
612 struct blkcg_gq *blkg;
613 struct cgroup_subsys_state *pos_css;
614 u64 now = ktime_to_ns(ktime_get());
615
616 rcu_read_lock();
617 blkg_for_each_descendant_pre(blkg, pos_css,
618 blkiolat->rqos.q->root_blkg) {
619 struct iolatency_grp *iolat;
620 struct child_latency_info *lat_info;
621 unsigned long flags;
622 u64 cookie;
623
624 /*
625 * We could be exiting, don't access the pd unless we have a
626 * ref on the blkg.
627 */
101246ec 628 if (!blkg_tryget(blkg))
d7067512
JB
629 continue;
630
631 iolat = blkg_to_lat(blkg);
632 if (!iolat)
52a1199c 633 goto next;
d7067512
JB
634
635 lat_info = &iolat->child_lat;
636 cookie = atomic_read(&lat_info->scale_cookie);
637
638 if (cookie >= DEFAULT_SCALE_COOKIE)
639 goto next;
640
641 spin_lock_irqsave(&lat_info->lock, flags);
642 if (lat_info->last_scale_event >= now)
643 goto next_lock;
644
645 /*
646 * We scaled down but don't have a scale_grp, scale up and carry
647 * on.
648 */
649 if (lat_info->scale_grp == NULL) {
650 scale_cookie_change(iolat->blkiolat, lat_info, true);
651 goto next_lock;
652 }
653
654 /*
655 * It's been 5 seconds since our last scale event, clear the
656 * scale grp in case the group that needed the scale down isn't
657 * doing any IO currently.
658 */
659 if (now - lat_info->last_scale_event >=
660 ((u64)NSEC_PER_SEC * 5))
661 lat_info->scale_grp = NULL;
662next_lock:
663 spin_unlock_irqrestore(&lat_info->lock, flags);
664next:
665 blkg_put(blkg);
666 }
667 rcu_read_unlock();
668}
669
670int blk_iolatency_init(struct request_queue *q)
671{
672 struct blk_iolatency *blkiolat;
673 struct rq_qos *rqos;
674 int ret;
675
676 blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
677 if (!blkiolat)
678 return -ENOMEM;
679
680 rqos = &blkiolat->rqos;
681 rqos->id = RQ_QOS_CGROUP;
682 rqos->ops = &blkcg_iolatency_ops;
683 rqos->q = q;
684
685 rq_qos_add(q, rqos);
686
687 ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
688 if (ret) {
689 rq_qos_del(q, rqos);
690 kfree(blkiolat);
691 return ret;
692 }
693
694 timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
695
696 return 0;
697}
698
699static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
700{
701 struct iolatency_grp *iolat = blkg_to_lat(blkg);
702 struct blk_iolatency *blkiolat = iolat->blkiolat;
703 u64 oldval = iolat->min_lat_nsec;
704
705 iolat->min_lat_nsec = val;
c480bcf9
DZF
706 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
707 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
708 BLKIOLATENCY_MAX_WIN_SIZE);
d7067512
JB
709
710 if (!oldval && val)
711 atomic_inc(&blkiolat->enabled);
712 if (oldval && !val)
713 atomic_dec(&blkiolat->enabled);
714}
715
716static void iolatency_clear_scaling(struct blkcg_gq *blkg)
717{
718 if (blkg->parent) {
719 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
720 struct child_latency_info *lat_info;
721 if (!iolat)
722 return;
723
724 lat_info = &iolat->child_lat;
725 spin_lock(&lat_info->lock);
726 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
727 lat_info->last_scale_event = 0;
728 lat_info->scale_grp = NULL;
729 lat_info->scale_lat = 0;
730 spin_unlock(&lat_info->lock);
731 }
732}
733
734static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
735 size_t nbytes, loff_t off)
736{
737 struct blkcg *blkcg = css_to_blkcg(of_css(of));
738 struct blkcg_gq *blkg;
d7067512
JB
739 struct blkg_conf_ctx ctx;
740 struct iolatency_grp *iolat;
741 char *p, *tok;
742 u64 lat_val = 0;
743 u64 oldval;
744 int ret;
745
746 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
747 if (ret)
748 return ret;
749
750 iolat = blkg_to_lat(ctx.blkg);
d7067512
JB
751 p = ctx.body;
752
753 ret = -EINVAL;
754 while ((tok = strsep(&p, " "))) {
755 char key[16];
756 char val[21]; /* 18446744073709551616 */
757
758 if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
759 goto out;
760
761 if (!strcmp(key, "target")) {
762 u64 v;
763
764 if (!strcmp(val, "max"))
765 lat_val = 0;
766 else if (sscanf(val, "%llu", &v) == 1)
767 lat_val = v * NSEC_PER_USEC;
768 else
769 goto out;
770 } else {
771 goto out;
772 }
773 }
774
775 /* Walk up the tree to see if our new val is lower than it should be. */
776 blkg = ctx.blkg;
777 oldval = iolat->min_lat_nsec;
778
779 iolatency_set_min_lat_nsec(blkg, lat_val);
780 if (oldval != iolat->min_lat_nsec) {
781 iolatency_clear_scaling(blkg);
782 }
783
784 ret = 0;
785out:
786 blkg_conf_finish(&ctx);
787 return ret ?: nbytes;
788}
789
790static u64 iolatency_prfill_limit(struct seq_file *sf,
791 struct blkg_policy_data *pd, int off)
792{
793 struct iolatency_grp *iolat = pd_to_lat(pd);
794 const char *dname = blkg_dev_name(pd->blkg);
795
796 if (!dname || !iolat->min_lat_nsec)
797 return 0;
798 seq_printf(sf, "%s target=%llu\n",
88b7210c 799 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
d7067512
JB
800 return 0;
801}
802
803static int iolatency_print_limit(struct seq_file *sf, void *v)
804{
805 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
806 iolatency_prfill_limit,
807 &blkcg_policy_iolatency, seq_cft(sf)->private, false);
808 return 0;
809}
810
811static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
812 size_t size)
813{
814 struct iolatency_grp *iolat = pd_to_lat(pd);
c480bcf9
DZF
815 unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
816 unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
d7067512 817
a284390b 818 if (iolat->rq_depth.max_depth == UINT_MAX)
c480bcf9
DZF
819 return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
820 avg_lat, cur_win);
d7067512 821
c480bcf9
DZF
822 return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
823 iolat->rq_depth.max_depth, avg_lat, cur_win);
d7067512
JB
824}
825
826
827static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
828{
829 struct iolatency_grp *iolat;
830
831 iolat = kzalloc_node(sizeof(*iolat), gfp, node);
832 if (!iolat)
833 return NULL;
834 iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
835 __alignof__(struct blk_rq_stat), gfp);
836 if (!iolat->stats) {
837 kfree(iolat);
838 return NULL;
839 }
840 return &iolat->pd;
841}
842
843static void iolatency_pd_init(struct blkg_policy_data *pd)
844{
845 struct iolatency_grp *iolat = pd_to_lat(pd);
846 struct blkcg_gq *blkg = lat_to_blkg(iolat);
847 struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
848 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
849 u64 now = ktime_to_ns(ktime_get());
850 int cpu;
851
852 for_each_possible_cpu(cpu) {
853 struct blk_rq_stat *stat;
854 stat = per_cpu_ptr(iolat->stats, cpu);
855 blk_rq_stat_init(stat);
856 }
857
858 rq_wait_init(&iolat->rq_wait);
859 spin_lock_init(&iolat->child_lat.lock);
ff4cee08 860 iolat->rq_depth.queue_depth = blkg->q->nr_requests;
a284390b 861 iolat->rq_depth.max_depth = UINT_MAX;
d7067512
JB
862 iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
863 iolat->blkiolat = blkiolat;
864 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
865 atomic64_set(&iolat->window_start, now);
866
867 /*
868 * We init things in list order, so the pd for the parent may not be
869 * init'ed yet for whatever reason.
870 */
871 if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
872 struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
873 atomic_set(&iolat->scale_cookie,
874 atomic_read(&parent->child_lat.scale_cookie));
875 } else {
876 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
877 }
878
879 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
880}
881
882static void iolatency_pd_offline(struct blkg_policy_data *pd)
883{
884 struct iolatency_grp *iolat = pd_to_lat(pd);
885 struct blkcg_gq *blkg = lat_to_blkg(iolat);
886
887 iolatency_set_min_lat_nsec(blkg, 0);
888 iolatency_clear_scaling(blkg);
889}
890
891static void iolatency_pd_free(struct blkg_policy_data *pd)
892{
893 struct iolatency_grp *iolat = pd_to_lat(pd);
894 free_percpu(iolat->stats);
895 kfree(iolat);
896}
897
898static struct cftype iolatency_files[] = {
899 {
900 .name = "latency",
901 .flags = CFTYPE_NOT_ON_ROOT,
902 .seq_show = iolatency_print_limit,
903 .write = iolatency_set_limit,
904 },
905 {}
906};
907
908static struct blkcg_policy blkcg_policy_iolatency = {
909 .dfl_cftypes = iolatency_files,
910 .pd_alloc_fn = iolatency_pd_alloc,
911 .pd_init_fn = iolatency_pd_init,
912 .pd_offline_fn = iolatency_pd_offline,
913 .pd_free_fn = iolatency_pd_free,
914 .pd_stat_fn = iolatency_pd_stat,
915};
916
917static int __init iolatency_init(void)
918{
919 return blkcg_policy_register(&blkcg_policy_iolatency);
920}
921
922static void __exit iolatency_exit(void)
923{
924 return blkcg_policy_unregister(&blkcg_policy_iolatency);
925}
926
927module_init(iolatency_init);
928module_exit(iolatency_exit);