]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-throttle.c
blkcg: embed struct blkg_policy_data in policy specific data
[mirror_ubuntu-bionic-kernel.git] / block / blk-throttle.c
1 /*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
13 #include "blk.h"
14
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
17
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
20
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
24 static struct blkcg_policy blkcg_policy_throtl;
25
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct *kthrotld_workqueue;
28 static void throtl_schedule_delayed_work(struct throtl_data *td,
29 unsigned long delay);
30
31 struct throtl_rb_root {
32 struct rb_root rb;
33 struct rb_node *left;
34 unsigned int count;
35 unsigned long min_disptime;
36 };
37
38 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
39 .count = 0, .min_disptime = 0}
40
41 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
42
43 /* Per-cpu group stats */
44 struct tg_stats_cpu {
45 /* total bytes transferred */
46 struct blkg_rwstat service_bytes;
47 /* total IOs serviced, post merge */
48 struct blkg_rwstat serviced;
49 };
50
51 struct throtl_grp {
52 /* must be the first member */
53 struct blkg_policy_data pd;
54
55 /* active throtl group service_tree member */
56 struct rb_node rb_node;
57
58 /*
59 * Dispatch time in jiffies. This is the estimated time when group
60 * will unthrottle and is ready to dispatch more bio. It is used as
61 * key to sort active groups in service tree.
62 */
63 unsigned long disptime;
64
65 unsigned int flags;
66
67 /* Two lists for READ and WRITE */
68 struct bio_list bio_lists[2];
69
70 /* Number of queued bios on READ and WRITE lists */
71 unsigned int nr_queued[2];
72
73 /* bytes per second rate limits */
74 uint64_t bps[2];
75
76 /* IOPS limits */
77 unsigned int iops[2];
78
79 /* Number of bytes disptached in current slice */
80 uint64_t bytes_disp[2];
81 /* Number of bio's dispatched in current slice */
82 unsigned int io_disp[2];
83
84 /* When did we start a new slice */
85 unsigned long slice_start[2];
86 unsigned long slice_end[2];
87
88 /* Some throttle limits got updated for the group */
89 int limits_changed;
90
91 /* Per cpu stats pointer */
92 struct tg_stats_cpu __percpu *stats_cpu;
93
94 /* List of tgs waiting for per cpu stats memory to be allocated */
95 struct list_head stats_alloc_node;
96 };
97
98 struct throtl_data
99 {
100 /* service tree for active throtl groups */
101 struct throtl_rb_root tg_service_tree;
102
103 struct request_queue *queue;
104
105 /* Total Number of queued bios on READ and WRITE lists */
106 unsigned int nr_queued[2];
107
108 /*
109 * number of total undestroyed groups
110 */
111 unsigned int nr_undestroyed_grps;
112
113 /* Work for dispatching throttled bios */
114 struct delayed_work throtl_work;
115
116 int limits_changed;
117 };
118
119 /* list and work item to allocate percpu group stats */
120 static DEFINE_SPINLOCK(tg_stats_alloc_lock);
121 static LIST_HEAD(tg_stats_alloc_list);
122
123 static void tg_stats_alloc_fn(struct work_struct *);
124 static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
125
126 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
127 {
128 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
129 }
130
131 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
132 {
133 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
134 }
135
136 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
137 {
138 return pd_to_blkg(&tg->pd);
139 }
140
141 static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
142 {
143 return blkg_to_tg(td->queue->root_blkg);
144 }
145
146 enum tg_state_flags {
147 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
148 };
149
150 #define THROTL_TG_FNS(name) \
151 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
152 { \
153 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
154 } \
155 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
156 { \
157 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
158 } \
159 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
160 { \
161 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
162 }
163
164 THROTL_TG_FNS(on_rr);
165
166 #define throtl_log_tg(td, tg, fmt, args...) do { \
167 char __pbuf[128]; \
168 \
169 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
170 blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
171 } while (0)
172
173 #define throtl_log(td, fmt, args...) \
174 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
175
176 static inline unsigned int total_nr_queued(struct throtl_data *td)
177 {
178 return td->nr_queued[0] + td->nr_queued[1];
179 }
180
181 /*
182 * Worker for allocating per cpu stat for tgs. This is scheduled on the
183 * system_nrt_wq once there are some groups on the alloc_list waiting for
184 * allocation.
185 */
186 static void tg_stats_alloc_fn(struct work_struct *work)
187 {
188 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
189 struct delayed_work *dwork = to_delayed_work(work);
190 bool empty = false;
191
192 alloc_stats:
193 if (!stats_cpu) {
194 stats_cpu = alloc_percpu(struct tg_stats_cpu);
195 if (!stats_cpu) {
196 /* allocation failed, try again after some time */
197 queue_delayed_work(system_nrt_wq, dwork,
198 msecs_to_jiffies(10));
199 return;
200 }
201 }
202
203 spin_lock_irq(&tg_stats_alloc_lock);
204
205 if (!list_empty(&tg_stats_alloc_list)) {
206 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
207 struct throtl_grp,
208 stats_alloc_node);
209 swap(tg->stats_cpu, stats_cpu);
210 list_del_init(&tg->stats_alloc_node);
211 }
212
213 empty = list_empty(&tg_stats_alloc_list);
214 spin_unlock_irq(&tg_stats_alloc_lock);
215 if (!empty)
216 goto alloc_stats;
217 }
218
219 static void throtl_pd_init(struct blkcg_gq *blkg)
220 {
221 struct throtl_grp *tg = blkg_to_tg(blkg);
222
223 RB_CLEAR_NODE(&tg->rb_node);
224 bio_list_init(&tg->bio_lists[0]);
225 bio_list_init(&tg->bio_lists[1]);
226 tg->limits_changed = false;
227
228 tg->bps[READ] = -1;
229 tg->bps[WRITE] = -1;
230 tg->iops[READ] = -1;
231 tg->iops[WRITE] = -1;
232
233 /*
234 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
235 * but percpu allocator can't be called from IO path. Queue tg on
236 * tg_stats_alloc_list and allocate from work item.
237 */
238 spin_lock(&tg_stats_alloc_lock);
239 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
240 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
241 spin_unlock(&tg_stats_alloc_lock);
242 }
243
244 static void throtl_pd_exit(struct blkcg_gq *blkg)
245 {
246 struct throtl_grp *tg = blkg_to_tg(blkg);
247
248 spin_lock(&tg_stats_alloc_lock);
249 list_del_init(&tg->stats_alloc_node);
250 spin_unlock(&tg_stats_alloc_lock);
251
252 free_percpu(tg->stats_cpu);
253 }
254
255 static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
256 {
257 struct throtl_grp *tg = blkg_to_tg(blkg);
258 int cpu;
259
260 if (tg->stats_cpu == NULL)
261 return;
262
263 for_each_possible_cpu(cpu) {
264 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
265
266 blkg_rwstat_reset(&sc->service_bytes);
267 blkg_rwstat_reset(&sc->serviced);
268 }
269 }
270
271 static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
272 struct blkcg *blkcg)
273 {
274 /*
275 * This is the common case when there are no blkcgs. Avoid lookup
276 * in this case
277 */
278 if (blkcg == &blkcg_root)
279 return td_root_tg(td);
280
281 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
282 }
283
284 static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
285 struct blkcg *blkcg)
286 {
287 struct request_queue *q = td->queue;
288 struct throtl_grp *tg = NULL;
289
290 /*
291 * This is the common case when there are no blkcgs. Avoid lookup
292 * in this case
293 */
294 if (blkcg == &blkcg_root) {
295 tg = td_root_tg(td);
296 } else {
297 struct blkcg_gq *blkg;
298
299 blkg = blkg_lookup_create(blkcg, q);
300
301 /* if %NULL and @q is alive, fall back to root_tg */
302 if (!IS_ERR(blkg))
303 tg = blkg_to_tg(blkg);
304 else if (!blk_queue_dead(q))
305 tg = td_root_tg(td);
306 }
307
308 return tg;
309 }
310
311 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
312 {
313 /* Service tree is empty */
314 if (!root->count)
315 return NULL;
316
317 if (!root->left)
318 root->left = rb_first(&root->rb);
319
320 if (root->left)
321 return rb_entry_tg(root->left);
322
323 return NULL;
324 }
325
326 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
327 {
328 rb_erase(n, root);
329 RB_CLEAR_NODE(n);
330 }
331
332 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
333 {
334 if (root->left == n)
335 root->left = NULL;
336 rb_erase_init(n, &root->rb);
337 --root->count;
338 }
339
340 static void update_min_dispatch_time(struct throtl_rb_root *st)
341 {
342 struct throtl_grp *tg;
343
344 tg = throtl_rb_first(st);
345 if (!tg)
346 return;
347
348 st->min_disptime = tg->disptime;
349 }
350
351 static void
352 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
353 {
354 struct rb_node **node = &st->rb.rb_node;
355 struct rb_node *parent = NULL;
356 struct throtl_grp *__tg;
357 unsigned long key = tg->disptime;
358 int left = 1;
359
360 while (*node != NULL) {
361 parent = *node;
362 __tg = rb_entry_tg(parent);
363
364 if (time_before(key, __tg->disptime))
365 node = &parent->rb_left;
366 else {
367 node = &parent->rb_right;
368 left = 0;
369 }
370 }
371
372 if (left)
373 st->left = &tg->rb_node;
374
375 rb_link_node(&tg->rb_node, parent, node);
376 rb_insert_color(&tg->rb_node, &st->rb);
377 }
378
379 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
380 {
381 struct throtl_rb_root *st = &td->tg_service_tree;
382
383 tg_service_tree_add(st, tg);
384 throtl_mark_tg_on_rr(tg);
385 st->count++;
386 }
387
388 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
389 {
390 if (!throtl_tg_on_rr(tg))
391 __throtl_enqueue_tg(td, tg);
392 }
393
394 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
395 {
396 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
397 throtl_clear_tg_on_rr(tg);
398 }
399
400 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
401 {
402 if (throtl_tg_on_rr(tg))
403 __throtl_dequeue_tg(td, tg);
404 }
405
406 static void throtl_schedule_next_dispatch(struct throtl_data *td)
407 {
408 struct throtl_rb_root *st = &td->tg_service_tree;
409
410 /*
411 * If there are more bios pending, schedule more work.
412 */
413 if (!total_nr_queued(td))
414 return;
415
416 BUG_ON(!st->count);
417
418 update_min_dispatch_time(st);
419
420 if (time_before_eq(st->min_disptime, jiffies))
421 throtl_schedule_delayed_work(td, 0);
422 else
423 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
424 }
425
426 static inline void
427 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
428 {
429 tg->bytes_disp[rw] = 0;
430 tg->io_disp[rw] = 0;
431 tg->slice_start[rw] = jiffies;
432 tg->slice_end[rw] = jiffies + throtl_slice;
433 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
434 rw == READ ? 'R' : 'W', tg->slice_start[rw],
435 tg->slice_end[rw], jiffies);
436 }
437
438 static inline void throtl_set_slice_end(struct throtl_data *td,
439 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
440 {
441 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
442 }
443
444 static inline void throtl_extend_slice(struct throtl_data *td,
445 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
446 {
447 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
448 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
449 rw == READ ? 'R' : 'W', tg->slice_start[rw],
450 tg->slice_end[rw], jiffies);
451 }
452
453 /* Determine if previously allocated or extended slice is complete or not */
454 static bool
455 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
456 {
457 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
458 return 0;
459
460 return 1;
461 }
462
463 /* Trim the used slices and adjust slice start accordingly */
464 static inline void
465 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
466 {
467 unsigned long nr_slices, time_elapsed, io_trim;
468 u64 bytes_trim, tmp;
469
470 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
471
472 /*
473 * If bps are unlimited (-1), then time slice don't get
474 * renewed. Don't try to trim the slice if slice is used. A new
475 * slice will start when appropriate.
476 */
477 if (throtl_slice_used(td, tg, rw))
478 return;
479
480 /*
481 * A bio has been dispatched. Also adjust slice_end. It might happen
482 * that initially cgroup limit was very low resulting in high
483 * slice_end, but later limit was bumped up and bio was dispached
484 * sooner, then we need to reduce slice_end. A high bogus slice_end
485 * is bad because it does not allow new slice to start.
486 */
487
488 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
489
490 time_elapsed = jiffies - tg->slice_start[rw];
491
492 nr_slices = time_elapsed / throtl_slice;
493
494 if (!nr_slices)
495 return;
496 tmp = tg->bps[rw] * throtl_slice * nr_slices;
497 do_div(tmp, HZ);
498 bytes_trim = tmp;
499
500 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
501
502 if (!bytes_trim && !io_trim)
503 return;
504
505 if (tg->bytes_disp[rw] >= bytes_trim)
506 tg->bytes_disp[rw] -= bytes_trim;
507 else
508 tg->bytes_disp[rw] = 0;
509
510 if (tg->io_disp[rw] >= io_trim)
511 tg->io_disp[rw] -= io_trim;
512 else
513 tg->io_disp[rw] = 0;
514
515 tg->slice_start[rw] += nr_slices * throtl_slice;
516
517 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
518 " start=%lu end=%lu jiffies=%lu",
519 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
520 tg->slice_start[rw], tg->slice_end[rw], jiffies);
521 }
522
523 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
524 struct bio *bio, unsigned long *wait)
525 {
526 bool rw = bio_data_dir(bio);
527 unsigned int io_allowed;
528 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
529 u64 tmp;
530
531 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
532
533 /* Slice has just started. Consider one slice interval */
534 if (!jiffy_elapsed)
535 jiffy_elapsed_rnd = throtl_slice;
536
537 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
538
539 /*
540 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
541 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
542 * will allow dispatch after 1 second and after that slice should
543 * have been trimmed.
544 */
545
546 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
547 do_div(tmp, HZ);
548
549 if (tmp > UINT_MAX)
550 io_allowed = UINT_MAX;
551 else
552 io_allowed = tmp;
553
554 if (tg->io_disp[rw] + 1 <= io_allowed) {
555 if (wait)
556 *wait = 0;
557 return 1;
558 }
559
560 /* Calc approx time to dispatch */
561 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
562
563 if (jiffy_wait > jiffy_elapsed)
564 jiffy_wait = jiffy_wait - jiffy_elapsed;
565 else
566 jiffy_wait = 1;
567
568 if (wait)
569 *wait = jiffy_wait;
570 return 0;
571 }
572
573 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
574 struct bio *bio, unsigned long *wait)
575 {
576 bool rw = bio_data_dir(bio);
577 u64 bytes_allowed, extra_bytes, tmp;
578 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
579
580 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
581
582 /* Slice has just started. Consider one slice interval */
583 if (!jiffy_elapsed)
584 jiffy_elapsed_rnd = throtl_slice;
585
586 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
587
588 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
589 do_div(tmp, HZ);
590 bytes_allowed = tmp;
591
592 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
593 if (wait)
594 *wait = 0;
595 return 1;
596 }
597
598 /* Calc approx time to dispatch */
599 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
600 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
601
602 if (!jiffy_wait)
603 jiffy_wait = 1;
604
605 /*
606 * This wait time is without taking into consideration the rounding
607 * up we did. Add that time also.
608 */
609 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
610 if (wait)
611 *wait = jiffy_wait;
612 return 0;
613 }
614
615 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
616 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
617 return 1;
618 return 0;
619 }
620
621 /*
622 * Returns whether one can dispatch a bio or not. Also returns approx number
623 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
624 */
625 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
626 struct bio *bio, unsigned long *wait)
627 {
628 bool rw = bio_data_dir(bio);
629 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
630
631 /*
632 * Currently whole state machine of group depends on first bio
633 * queued in the group bio list. So one should not be calling
634 * this function with a different bio if there are other bios
635 * queued.
636 */
637 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
638
639 /* If tg->bps = -1, then BW is unlimited */
640 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
641 if (wait)
642 *wait = 0;
643 return 1;
644 }
645
646 /*
647 * If previous slice expired, start a new one otherwise renew/extend
648 * existing slice to make sure it is at least throtl_slice interval
649 * long since now.
650 */
651 if (throtl_slice_used(td, tg, rw))
652 throtl_start_new_slice(td, tg, rw);
653 else {
654 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
655 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
656 }
657
658 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
659 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
660 if (wait)
661 *wait = 0;
662 return 1;
663 }
664
665 max_wait = max(bps_wait, iops_wait);
666
667 if (wait)
668 *wait = max_wait;
669
670 if (time_before(tg->slice_end[rw], jiffies + max_wait))
671 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
672
673 return 0;
674 }
675
676 static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
677 int rw)
678 {
679 struct throtl_grp *tg = blkg_to_tg(blkg);
680 struct tg_stats_cpu *stats_cpu;
681 unsigned long flags;
682
683 /* If per cpu stats are not allocated yet, don't do any accounting. */
684 if (tg->stats_cpu == NULL)
685 return;
686
687 /*
688 * Disabling interrupts to provide mutual exclusion between two
689 * writes on same cpu. It probably is not needed for 64bit. Not
690 * optimizing that case yet.
691 */
692 local_irq_save(flags);
693
694 stats_cpu = this_cpu_ptr(tg->stats_cpu);
695
696 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
697 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
698
699 local_irq_restore(flags);
700 }
701
702 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
703 {
704 bool rw = bio_data_dir(bio);
705
706 /* Charge the bio to the group */
707 tg->bytes_disp[rw] += bio->bi_size;
708 tg->io_disp[rw]++;
709
710 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
711 }
712
713 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
714 struct bio *bio)
715 {
716 bool rw = bio_data_dir(bio);
717
718 bio_list_add(&tg->bio_lists[rw], bio);
719 /* Take a bio reference on tg */
720 blkg_get(tg_to_blkg(tg));
721 tg->nr_queued[rw]++;
722 td->nr_queued[rw]++;
723 throtl_enqueue_tg(td, tg);
724 }
725
726 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
727 {
728 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
729 struct bio *bio;
730
731 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
732 tg_may_dispatch(td, tg, bio, &read_wait);
733
734 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
735 tg_may_dispatch(td, tg, bio, &write_wait);
736
737 min_wait = min(read_wait, write_wait);
738 disptime = jiffies + min_wait;
739
740 /* Update dispatch time */
741 throtl_dequeue_tg(td, tg);
742 tg->disptime = disptime;
743 throtl_enqueue_tg(td, tg);
744 }
745
746 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
747 bool rw, struct bio_list *bl)
748 {
749 struct bio *bio;
750
751 bio = bio_list_pop(&tg->bio_lists[rw]);
752 tg->nr_queued[rw]--;
753 /* Drop bio reference on blkg */
754 blkg_put(tg_to_blkg(tg));
755
756 BUG_ON(td->nr_queued[rw] <= 0);
757 td->nr_queued[rw]--;
758
759 throtl_charge_bio(tg, bio);
760 bio_list_add(bl, bio);
761 bio->bi_rw |= REQ_THROTTLED;
762
763 throtl_trim_slice(td, tg, rw);
764 }
765
766 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
767 struct bio_list *bl)
768 {
769 unsigned int nr_reads = 0, nr_writes = 0;
770 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
771 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
772 struct bio *bio;
773
774 /* Try to dispatch 75% READS and 25% WRITES */
775
776 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
777 && tg_may_dispatch(td, tg, bio, NULL)) {
778
779 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
780 nr_reads++;
781
782 if (nr_reads >= max_nr_reads)
783 break;
784 }
785
786 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
787 && tg_may_dispatch(td, tg, bio, NULL)) {
788
789 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
790 nr_writes++;
791
792 if (nr_writes >= max_nr_writes)
793 break;
794 }
795
796 return nr_reads + nr_writes;
797 }
798
799 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
800 {
801 unsigned int nr_disp = 0;
802 struct throtl_grp *tg;
803 struct throtl_rb_root *st = &td->tg_service_tree;
804
805 while (1) {
806 tg = throtl_rb_first(st);
807
808 if (!tg)
809 break;
810
811 if (time_before(jiffies, tg->disptime))
812 break;
813
814 throtl_dequeue_tg(td, tg);
815
816 nr_disp += throtl_dispatch_tg(td, tg, bl);
817
818 if (tg->nr_queued[0] || tg->nr_queued[1]) {
819 tg_update_disptime(td, tg);
820 throtl_enqueue_tg(td, tg);
821 }
822
823 if (nr_disp >= throtl_quantum)
824 break;
825 }
826
827 return nr_disp;
828 }
829
830 static void throtl_process_limit_change(struct throtl_data *td)
831 {
832 struct request_queue *q = td->queue;
833 struct blkcg_gq *blkg, *n;
834
835 if (!td->limits_changed)
836 return;
837
838 xchg(&td->limits_changed, false);
839
840 throtl_log(td, "limits changed");
841
842 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
843 struct throtl_grp *tg = blkg_to_tg(blkg);
844
845 if (!tg->limits_changed)
846 continue;
847
848 if (!xchg(&tg->limits_changed, false))
849 continue;
850
851 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
852 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
853 tg->iops[READ], tg->iops[WRITE]);
854
855 /*
856 * Restart the slices for both READ and WRITES. It
857 * might happen that a group's limit are dropped
858 * suddenly and we don't want to account recently
859 * dispatched IO with new low rate
860 */
861 throtl_start_new_slice(td, tg, 0);
862 throtl_start_new_slice(td, tg, 1);
863
864 if (throtl_tg_on_rr(tg))
865 tg_update_disptime(td, tg);
866 }
867 }
868
869 /* Dispatch throttled bios. Should be called without queue lock held. */
870 static int throtl_dispatch(struct request_queue *q)
871 {
872 struct throtl_data *td = q->td;
873 unsigned int nr_disp = 0;
874 struct bio_list bio_list_on_stack;
875 struct bio *bio;
876 struct blk_plug plug;
877
878 spin_lock_irq(q->queue_lock);
879
880 throtl_process_limit_change(td);
881
882 if (!total_nr_queued(td))
883 goto out;
884
885 bio_list_init(&bio_list_on_stack);
886
887 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
888 total_nr_queued(td), td->nr_queued[READ],
889 td->nr_queued[WRITE]);
890
891 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
892
893 if (nr_disp)
894 throtl_log(td, "bios disp=%u", nr_disp);
895
896 throtl_schedule_next_dispatch(td);
897 out:
898 spin_unlock_irq(q->queue_lock);
899
900 /*
901 * If we dispatched some requests, unplug the queue to make sure
902 * immediate dispatch
903 */
904 if (nr_disp) {
905 blk_start_plug(&plug);
906 while((bio = bio_list_pop(&bio_list_on_stack)))
907 generic_make_request(bio);
908 blk_finish_plug(&plug);
909 }
910 return nr_disp;
911 }
912
913 void blk_throtl_work(struct work_struct *work)
914 {
915 struct throtl_data *td = container_of(work, struct throtl_data,
916 throtl_work.work);
917 struct request_queue *q = td->queue;
918
919 throtl_dispatch(q);
920 }
921
922 /* Call with queue lock held */
923 static void
924 throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
925 {
926
927 struct delayed_work *dwork = &td->throtl_work;
928
929 /* schedule work if limits changed even if no bio is queued */
930 if (total_nr_queued(td) || td->limits_changed) {
931 /*
932 * We might have a work scheduled to be executed in future.
933 * Cancel that and schedule a new one.
934 */
935 __cancel_delayed_work(dwork);
936 queue_delayed_work(kthrotld_workqueue, dwork, delay);
937 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
938 delay, jiffies);
939 }
940 }
941
942 static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
943 struct blkg_policy_data *pd, int off)
944 {
945 struct throtl_grp *tg = pd_to_tg(pd);
946 struct blkg_rwstat rwstat = { }, tmp;
947 int i, cpu;
948
949 for_each_possible_cpu(cpu) {
950 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
951
952 tmp = blkg_rwstat_read((void *)sc + off);
953 for (i = 0; i < BLKG_RWSTAT_NR; i++)
954 rwstat.cnt[i] += tmp.cnt[i];
955 }
956
957 return __blkg_prfill_rwstat(sf, pd, &rwstat);
958 }
959
960 static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
961 struct seq_file *sf)
962 {
963 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
964
965 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
966 cft->private, true);
967 return 0;
968 }
969
970 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
971 int off)
972 {
973 struct throtl_grp *tg = pd_to_tg(pd);
974 u64 v = *(u64 *)((void *)tg + off);
975
976 if (v == -1)
977 return 0;
978 return __blkg_prfill_u64(sf, pd, v);
979 }
980
981 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
982 int off)
983 {
984 struct throtl_grp *tg = pd_to_tg(pd);
985 unsigned int v = *(unsigned int *)((void *)tg + off);
986
987 if (v == -1)
988 return 0;
989 return __blkg_prfill_u64(sf, pd, v);
990 }
991
992 static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
993 struct seq_file *sf)
994 {
995 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
996 &blkcg_policy_throtl, cft->private, false);
997 return 0;
998 }
999
1000 static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1001 struct seq_file *sf)
1002 {
1003 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
1004 &blkcg_policy_throtl, cft->private, false);
1005 return 0;
1006 }
1007
1008 static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1009 bool is_u64)
1010 {
1011 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1012 struct blkg_conf_ctx ctx;
1013 struct throtl_grp *tg;
1014 struct throtl_data *td;
1015 int ret;
1016
1017 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1018 if (ret)
1019 return ret;
1020
1021 tg = blkg_to_tg(ctx.blkg);
1022 td = ctx.blkg->q->td;
1023
1024 if (!ctx.v)
1025 ctx.v = -1;
1026
1027 if (is_u64)
1028 *(u64 *)((void *)tg + cft->private) = ctx.v;
1029 else
1030 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
1031
1032 /* XXX: we don't need the following deferred processing */
1033 xchg(&tg->limits_changed, true);
1034 xchg(&td->limits_changed, true);
1035 throtl_schedule_delayed_work(td, 0);
1036
1037 blkg_conf_finish(&ctx);
1038 return 0;
1039 }
1040
1041 static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1042 const char *buf)
1043 {
1044 return tg_set_conf(cgrp, cft, buf, true);
1045 }
1046
1047 static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1048 const char *buf)
1049 {
1050 return tg_set_conf(cgrp, cft, buf, false);
1051 }
1052
1053 static struct cftype throtl_files[] = {
1054 {
1055 .name = "throttle.read_bps_device",
1056 .private = offsetof(struct throtl_grp, bps[READ]),
1057 .read_seq_string = tg_print_conf_u64,
1058 .write_string = tg_set_conf_u64,
1059 .max_write_len = 256,
1060 },
1061 {
1062 .name = "throttle.write_bps_device",
1063 .private = offsetof(struct throtl_grp, bps[WRITE]),
1064 .read_seq_string = tg_print_conf_u64,
1065 .write_string = tg_set_conf_u64,
1066 .max_write_len = 256,
1067 },
1068 {
1069 .name = "throttle.read_iops_device",
1070 .private = offsetof(struct throtl_grp, iops[READ]),
1071 .read_seq_string = tg_print_conf_uint,
1072 .write_string = tg_set_conf_uint,
1073 .max_write_len = 256,
1074 },
1075 {
1076 .name = "throttle.write_iops_device",
1077 .private = offsetof(struct throtl_grp, iops[WRITE]),
1078 .read_seq_string = tg_print_conf_uint,
1079 .write_string = tg_set_conf_uint,
1080 .max_write_len = 256,
1081 },
1082 {
1083 .name = "throttle.io_service_bytes",
1084 .private = offsetof(struct tg_stats_cpu, service_bytes),
1085 .read_seq_string = tg_print_cpu_rwstat,
1086 },
1087 {
1088 .name = "throttle.io_serviced",
1089 .private = offsetof(struct tg_stats_cpu, serviced),
1090 .read_seq_string = tg_print_cpu_rwstat,
1091 },
1092 { } /* terminate */
1093 };
1094
1095 static void throtl_shutdown_wq(struct request_queue *q)
1096 {
1097 struct throtl_data *td = q->td;
1098
1099 cancel_delayed_work_sync(&td->throtl_work);
1100 }
1101
1102 static struct blkcg_policy blkcg_policy_throtl = {
1103 .ops = {
1104 .pd_init_fn = throtl_pd_init,
1105 .pd_exit_fn = throtl_pd_exit,
1106 .pd_reset_stats_fn = throtl_pd_reset_stats,
1107 },
1108 .pd_size = sizeof(struct throtl_grp),
1109 .cftypes = throtl_files,
1110 };
1111
1112 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1113 {
1114 struct throtl_data *td = q->td;
1115 struct throtl_grp *tg;
1116 bool rw = bio_data_dir(bio), update_disptime = true;
1117 struct blkcg *blkcg;
1118 bool throttled = false;
1119
1120 if (bio->bi_rw & REQ_THROTTLED) {
1121 bio->bi_rw &= ~REQ_THROTTLED;
1122 goto out;
1123 }
1124
1125 /* bio_associate_current() needs ioc, try creating */
1126 create_io_context(GFP_ATOMIC, q->node);
1127
1128 /*
1129 * A throtl_grp pointer retrieved under rcu can be used to access
1130 * basic fields like stats and io rates. If a group has no rules,
1131 * just update the dispatch stats in lockless manner and return.
1132 */
1133 rcu_read_lock();
1134 blkcg = bio_blkcg(bio);
1135 tg = throtl_lookup_tg(td, blkcg);
1136 if (tg) {
1137 if (tg_no_rule_group(tg, rw)) {
1138 throtl_update_dispatch_stats(tg_to_blkg(tg),
1139 bio->bi_size, bio->bi_rw);
1140 goto out_unlock_rcu;
1141 }
1142 }
1143
1144 /*
1145 * Either group has not been allocated yet or it is not an unlimited
1146 * IO group
1147 */
1148 spin_lock_irq(q->queue_lock);
1149 tg = throtl_lookup_create_tg(td, blkcg);
1150 if (unlikely(!tg))
1151 goto out_unlock;
1152
1153 if (tg->nr_queued[rw]) {
1154 /*
1155 * There is already another bio queued in same dir. No
1156 * need to update dispatch time.
1157 */
1158 update_disptime = false;
1159 goto queue_bio;
1160
1161 }
1162
1163 /* Bio is with-in rate limit of group */
1164 if (tg_may_dispatch(td, tg, bio, NULL)) {
1165 throtl_charge_bio(tg, bio);
1166
1167 /*
1168 * We need to trim slice even when bios are not being queued
1169 * otherwise it might happen that a bio is not queued for
1170 * a long time and slice keeps on extending and trim is not
1171 * called for a long time. Now if limits are reduced suddenly
1172 * we take into account all the IO dispatched so far at new
1173 * low rate and * newly queued IO gets a really long dispatch
1174 * time.
1175 *
1176 * So keep on trimming slice even if bio is not queued.
1177 */
1178 throtl_trim_slice(td, tg, rw);
1179 goto out_unlock;
1180 }
1181
1182 queue_bio:
1183 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1184 " iodisp=%u iops=%u queued=%d/%d",
1185 rw == READ ? 'R' : 'W',
1186 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1187 tg->io_disp[rw], tg->iops[rw],
1188 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1189
1190 bio_associate_current(bio);
1191 throtl_add_bio_tg(q->td, tg, bio);
1192 throttled = true;
1193
1194 if (update_disptime) {
1195 tg_update_disptime(td, tg);
1196 throtl_schedule_next_dispatch(td);
1197 }
1198
1199 out_unlock:
1200 spin_unlock_irq(q->queue_lock);
1201 out_unlock_rcu:
1202 rcu_read_unlock();
1203 out:
1204 return throttled;
1205 }
1206
1207 /**
1208 * blk_throtl_drain - drain throttled bios
1209 * @q: request_queue to drain throttled bios for
1210 *
1211 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1212 */
1213 void blk_throtl_drain(struct request_queue *q)
1214 __releases(q->queue_lock) __acquires(q->queue_lock)
1215 {
1216 struct throtl_data *td = q->td;
1217 struct throtl_rb_root *st = &td->tg_service_tree;
1218 struct throtl_grp *tg;
1219 struct bio_list bl;
1220 struct bio *bio;
1221
1222 WARN_ON_ONCE(!queue_is_locked(q));
1223
1224 bio_list_init(&bl);
1225
1226 while ((tg = throtl_rb_first(st))) {
1227 throtl_dequeue_tg(td, tg);
1228
1229 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1230 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1231 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1232 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1233 }
1234 spin_unlock_irq(q->queue_lock);
1235
1236 while ((bio = bio_list_pop(&bl)))
1237 generic_make_request(bio);
1238
1239 spin_lock_irq(q->queue_lock);
1240 }
1241
1242 int blk_throtl_init(struct request_queue *q)
1243 {
1244 struct throtl_data *td;
1245 int ret;
1246
1247 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1248 if (!td)
1249 return -ENOMEM;
1250
1251 td->tg_service_tree = THROTL_RB_ROOT;
1252 td->limits_changed = false;
1253 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1254
1255 q->td = td;
1256 td->queue = q;
1257
1258 /* activate policy */
1259 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1260 if (ret)
1261 kfree(td);
1262 return ret;
1263 }
1264
1265 void blk_throtl_exit(struct request_queue *q)
1266 {
1267 BUG_ON(!q->td);
1268 throtl_shutdown_wq(q);
1269 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1270 kfree(q->td);
1271 }
1272
1273 static int __init throtl_init(void)
1274 {
1275 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1276 if (!kthrotld_workqueue)
1277 panic("Failed to create kthrotld\n");
1278
1279 return blkcg_policy_register(&blkcg_policy_throtl);
1280 }
1281
1282 module_init(throtl_init);