]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - block/blk-throttle.c
blk-throttle: clean up blkg_policy_data alloc/init/exit/free methods
[mirror_ubuntu-eoan-kernel.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
eea8f41c 12#include <linux/blk-cgroup.h>
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
3c798398 24static struct blkcg_policy blkcg_policy_throtl;
0381411e 25
450adcbe
VG
26/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
450adcbe 28
c5cc2070
TH
29/*
30 * To implement hierarchical throttling, throtl_grps form a tree and bios
31 * are dispatched upwards level by level until they reach the top and get
32 * issued. When dispatching bios from the children and local group at each
33 * level, if the bios are dispatched into a single bio_list, there's a risk
34 * of a local or child group which can queue many bios at once filling up
35 * the list starving others.
36 *
37 * To avoid such starvation, dispatched bios are queued separately
38 * according to where they came from. When they are again dispatched to
39 * the parent, they're popped in round-robin order so that no single source
40 * hogs the dispatch window.
41 *
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
44 * throtl_service_queue and then dispatched in round-robin order.
45 *
46 * It's also used to track the reference counts on blkg's. A qnode always
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
48 * incrementing the reference of the associated throtl_grp when a qnode is
49 * queued and decrementing when dequeued is enough to keep the whole blkg
50 * tree pinned while bios are in flight.
51 */
52struct throtl_qnode {
53 struct list_head node; /* service_queue->queued[] */
54 struct bio_list bios; /* queued bios */
55 struct throtl_grp *tg; /* tg this qnode belongs to */
56};
57
c9e0332e 58struct throtl_service_queue {
77216b04
TH
59 struct throtl_service_queue *parent_sq; /* the parent service_queue */
60
73f0d49a
TH
61 /*
62 * Bios queued directly to this service_queue or dispatched from
63 * children throtl_grp's.
64 */
c5cc2070 65 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
73f0d49a
TH
66 unsigned int nr_queued[2]; /* number of queued bios */
67
68 /*
69 * RB tree of active children throtl_grp's, which are sorted by
70 * their ->disptime.
71 */
c9e0332e
TH
72 struct rb_root pending_tree; /* RB tree of active tgs */
73 struct rb_node *first_pending; /* first node in the tree */
74 unsigned int nr_pending; /* # queued in the tree */
75 unsigned long first_pending_disptime; /* disptime of the first tg */
69df0ab0 76 struct timer_list pending_timer; /* fires on first_pending_disptime */
e43473b7
VG
77};
78
5b2c16aa
TH
79enum tg_state_flags {
80 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
0e9f4164 81 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
5b2c16aa
TH
82};
83
e43473b7
VG
84#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
85
8a3d2615
TH
86/* Per-cpu group stats */
87struct tg_stats_cpu {
88 /* total bytes transferred */
89 struct blkg_rwstat service_bytes;
90 /* total IOs serviced, post merge */
91 struct blkg_rwstat serviced;
92};
93
e43473b7 94struct throtl_grp {
f95a04af
TH
95 /* must be the first member */
96 struct blkg_policy_data pd;
97
c9e0332e 98 /* active throtl group service_queue member */
e43473b7
VG
99 struct rb_node rb_node;
100
0f3457f6
TH
101 /* throtl_data this group belongs to */
102 struct throtl_data *td;
103
49a2f1e3
TH
104 /* this group's service queue */
105 struct throtl_service_queue service_queue;
106
c5cc2070
TH
107 /*
108 * qnode_on_self is used when bios are directly queued to this
109 * throtl_grp so that local bios compete fairly with bios
110 * dispatched from children. qnode_on_parent is used when bios are
111 * dispatched from this throtl_grp into its parent and will compete
112 * with the sibling qnode_on_parents and the parent's
113 * qnode_on_self.
114 */
115 struct throtl_qnode qnode_on_self[2];
116 struct throtl_qnode qnode_on_parent[2];
117
e43473b7
VG
118 /*
119 * Dispatch time in jiffies. This is the estimated time when group
120 * will unthrottle and is ready to dispatch more bio. It is used as
121 * key to sort active groups in service tree.
122 */
123 unsigned long disptime;
124
e43473b7
VG
125 unsigned int flags;
126
693e751e
TH
127 /* are there any throtl rules between this group and td? */
128 bool has_rules[2];
129
e43473b7
VG
130 /* bytes per second rate limits */
131 uint64_t bps[2];
132
8e89d13f
VG
133 /* IOPS limits */
134 unsigned int iops[2];
135
e43473b7
VG
136 /* Number of bytes disptached in current slice */
137 uint64_t bytes_disp[2];
8e89d13f
VG
138 /* Number of bio's dispatched in current slice */
139 unsigned int io_disp[2];
e43473b7
VG
140
141 /* When did we start a new slice */
142 unsigned long slice_start[2];
143 unsigned long slice_end[2];
fe071437 144
8a3d2615
TH
145 /* Per cpu stats pointer */
146 struct tg_stats_cpu __percpu *stats_cpu;
e43473b7
VG
147};
148
149struct throtl_data
150{
e43473b7 151 /* service tree for active throtl groups */
c9e0332e 152 struct throtl_service_queue service_queue;
e43473b7 153
e43473b7
VG
154 struct request_queue *queue;
155
156 /* Total Number of queued bios on READ and WRITE lists */
157 unsigned int nr_queued[2];
158
159 /*
02977e4a 160 * number of total undestroyed groups
e43473b7
VG
161 */
162 unsigned int nr_undestroyed_grps;
163
164 /* Work for dispatching throttled bios */
69df0ab0 165 struct work_struct dispatch_work;
e43473b7
VG
166};
167
69df0ab0
TH
168static void throtl_pending_timer_fn(unsigned long arg);
169
f95a04af
TH
170static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
171{
172 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
173}
174
3c798398 175static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0381411e 176{
f95a04af 177 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0381411e
TH
178}
179
3c798398 180static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 181{
f95a04af 182 return pd_to_blkg(&tg->pd);
0381411e
TH
183}
184
03d8e111
TH
185static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
186{
187 return blkg_to_tg(td->queue->root_blkg);
188}
189
fda6f272
TH
190/**
191 * sq_to_tg - return the throl_grp the specified service queue belongs to
192 * @sq: the throtl_service_queue of interest
193 *
194 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
195 * embedded in throtl_data, %NULL is returned.
196 */
197static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
198{
199 if (sq && sq->parent_sq)
200 return container_of(sq, struct throtl_grp, service_queue);
201 else
202 return NULL;
203}
204
205/**
206 * sq_to_td - return throtl_data the specified service queue belongs to
207 * @sq: the throtl_service_queue of interest
208 *
209 * A service_queue can be embeded in either a throtl_grp or throtl_data.
210 * Determine the associated throtl_data accordingly and return it.
211 */
212static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
213{
214 struct throtl_grp *tg = sq_to_tg(sq);
215
216 if (tg)
217 return tg->td;
218 else
219 return container_of(sq, struct throtl_data, service_queue);
220}
221
222/**
223 * throtl_log - log debug message via blktrace
224 * @sq: the service_queue being reported
225 * @fmt: printf format string
226 * @args: printf args
227 *
228 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
229 * throtl_grp; otherwise, just "throtl".
230 *
231 * TODO: this should be made a function and name formatting should happen
232 * after testing whether blktrace is enabled.
233 */
234#define throtl_log(sq, fmt, args...) do { \
235 struct throtl_grp *__tg = sq_to_tg((sq)); \
236 struct throtl_data *__td = sq_to_td((sq)); \
237 \
238 (void)__td; \
239 if ((__tg)) { \
240 char __pbuf[128]; \
54e7ed12 241 \
fda6f272
TH
242 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
243 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
244 } else { \
245 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
246 } \
54e7ed12 247} while (0)
e43473b7 248
c5cc2070
TH
249static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
250{
251 INIT_LIST_HEAD(&qn->node);
252 bio_list_init(&qn->bios);
253 qn->tg = tg;
254}
255
256/**
257 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
258 * @bio: bio being added
259 * @qn: qnode to add bio to
260 * @queued: the service_queue->queued[] list @qn belongs to
261 *
262 * Add @bio to @qn and put @qn on @queued if it's not already on.
263 * @qn->tg's reference count is bumped when @qn is activated. See the
264 * comment on top of throtl_qnode definition for details.
265 */
266static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
267 struct list_head *queued)
268{
269 bio_list_add(&qn->bios, bio);
270 if (list_empty(&qn->node)) {
271 list_add_tail(&qn->node, queued);
272 blkg_get(tg_to_blkg(qn->tg));
273 }
274}
275
276/**
277 * throtl_peek_queued - peek the first bio on a qnode list
278 * @queued: the qnode list to peek
279 */
280static struct bio *throtl_peek_queued(struct list_head *queued)
281{
282 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
283 struct bio *bio;
284
285 if (list_empty(queued))
286 return NULL;
287
288 bio = bio_list_peek(&qn->bios);
289 WARN_ON_ONCE(!bio);
290 return bio;
291}
292
293/**
294 * throtl_pop_queued - pop the first bio form a qnode list
295 * @queued: the qnode list to pop a bio from
296 * @tg_to_put: optional out argument for throtl_grp to put
297 *
298 * Pop the first bio from the qnode list @queued. After popping, the first
299 * qnode is removed from @queued if empty or moved to the end of @queued so
300 * that the popping order is round-robin.
301 *
302 * When the first qnode is removed, its associated throtl_grp should be put
303 * too. If @tg_to_put is NULL, this function automatically puts it;
304 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
305 * responsible for putting it.
306 */
307static struct bio *throtl_pop_queued(struct list_head *queued,
308 struct throtl_grp **tg_to_put)
309{
310 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
311 struct bio *bio;
312
313 if (list_empty(queued))
314 return NULL;
315
316 bio = bio_list_pop(&qn->bios);
317 WARN_ON_ONCE(!bio);
318
319 if (bio_list_empty(&qn->bios)) {
320 list_del_init(&qn->node);
321 if (tg_to_put)
322 *tg_to_put = qn->tg;
323 else
324 blkg_put(tg_to_blkg(qn->tg));
325 } else {
326 list_move_tail(&qn->node, queued);
327 }
328
329 return bio;
330}
331
49a2f1e3 332/* init a service_queue, assumes the caller zeroed it */
b2ce2643 333static void throtl_service_queue_init(struct throtl_service_queue *sq)
49a2f1e3 334{
c5cc2070
TH
335 INIT_LIST_HEAD(&sq->queued[0]);
336 INIT_LIST_HEAD(&sq->queued[1]);
49a2f1e3 337 sq->pending_tree = RB_ROOT;
69df0ab0
TH
338 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
339 (unsigned long)sq);
340}
341
001bea73
TH
342static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
343{
4fb72036 344 struct throtl_grp *tg;
b2ce2643 345 int rw, cpu;
4fb72036
TH
346
347 tg = kzalloc_node(sizeof(*tg), gfp, node);
348 if (!tg)
349 return NULL;
350
351 tg->stats_cpu = alloc_percpu_gfp(struct tg_stats_cpu, gfp);
352 if (!tg->stats_cpu) {
353 kfree(tg);
354 return NULL;
355 }
356
b2ce2643
TH
357 throtl_service_queue_init(&tg->service_queue);
358
359 for (rw = READ; rw <= WRITE; rw++) {
360 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
361 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
362 }
363
364 RB_CLEAR_NODE(&tg->rb_node);
365 tg->bps[READ] = -1;
366 tg->bps[WRITE] = -1;
367 tg->iops[READ] = -1;
368 tg->iops[WRITE] = -1;
369
4fb72036
TH
370 for_each_possible_cpu(cpu) {
371 struct tg_stats_cpu *stats_cpu = per_cpu_ptr(tg->stats_cpu, cpu);
372
373 blkg_rwstat_init(&stats_cpu->service_bytes);
374 blkg_rwstat_init(&stats_cpu->serviced);
375 }
376
377 return &tg->pd;
001bea73
TH
378}
379
3c798398 380static void throtl_pd_init(struct blkcg_gq *blkg)
a29a171e 381{
0381411e 382 struct throtl_grp *tg = blkg_to_tg(blkg);
77216b04 383 struct throtl_data *td = blkg->q->td;
b2ce2643 384 struct throtl_service_queue *sq = &tg->service_queue;
cd1604fa 385
9138125b 386 /*
aa6ec29b 387 * If on the default hierarchy, we switch to properly hierarchical
9138125b
TH
388 * behavior where limits on a given throtl_grp are applied to the
389 * whole subtree rather than just the group itself. e.g. If 16M
390 * read_bps limit is set on the root group, the whole system can't
391 * exceed 16M for the device.
392 *
aa6ec29b 393 * If not on the default hierarchy, the broken flat hierarchy
9138125b
TH
394 * behavior is retained where all throtl_grps are treated as if
395 * they're all separate root groups right below throtl_data.
396 * Limits of a group don't interact with limits of other groups
397 * regardless of the position of the group in the hierarchy.
398 */
b2ce2643 399 sq->parent_sq = &td->service_queue;
aa6ec29b 400 if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
b2ce2643 401 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
77216b04 402 tg->td = td;
8a3d2615
TH
403}
404
693e751e
TH
405/*
406 * Set has_rules[] if @tg or any of its parents have limits configured.
407 * This doesn't require walking up to the top of the hierarchy as the
408 * parent's has_rules[] is guaranteed to be correct.
409 */
410static void tg_update_has_rules(struct throtl_grp *tg)
411{
412 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
413 int rw;
414
415 for (rw = READ; rw <= WRITE; rw++)
416 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
417 (tg->bps[rw] != -1 || tg->iops[rw] != -1);
418}
419
420static void throtl_pd_online(struct blkcg_gq *blkg)
421{
422 /*
423 * We don't want new groups to escape the limits of its ancestors.
424 * Update has_rules[] after a new group is brought online.
425 */
426 tg_update_has_rules(blkg_to_tg(blkg));
427}
428
001bea73
TH
429static void throtl_pd_free(struct blkg_policy_data *pd)
430{
4fb72036
TH
431 struct throtl_grp *tg = pd_to_tg(pd);
432
b2ce2643 433 del_timer_sync(&tg->service_queue.pending_timer);
4fb72036
TH
434 free_percpu(tg->stats_cpu);
435 kfree(tg);
001bea73
TH
436}
437
3c798398 438static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
8a3d2615
TH
439{
440 struct throtl_grp *tg = blkg_to_tg(blkg);
441 int cpu;
442
8a3d2615
TH
443 for_each_possible_cpu(cpu) {
444 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
445
446 blkg_rwstat_reset(&sc->service_bytes);
447 blkg_rwstat_reset(&sc->serviced);
448 }
a29a171e
VG
449}
450
3c798398
TH
451static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
452 struct blkcg *blkcg)
e43473b7 453{
be2c6b19 454 /*
3c798398
TH
455 * This is the common case when there are no blkcgs. Avoid lookup
456 * in this case
cd1604fa 457 */
3c798398 458 if (blkcg == &blkcg_root)
03d8e111 459 return td_root_tg(td);
e43473b7 460
e8989fae 461 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
e43473b7
VG
462}
463
cd1604fa 464static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
3c798398 465 struct blkcg *blkcg)
e43473b7 466{
f469a7b4 467 struct request_queue *q = td->queue;
cd1604fa 468 struct throtl_grp *tg = NULL;
bc16a4f9 469
f469a7b4 470 /*
3c798398
TH
471 * This is the common case when there are no blkcgs. Avoid lookup
472 * in this case
f469a7b4 473 */
3c798398 474 if (blkcg == &blkcg_root) {
03d8e111 475 tg = td_root_tg(td);
cd1604fa 476 } else {
3c798398 477 struct blkcg_gq *blkg;
f469a7b4 478
3c96cb32 479 blkg = blkg_lookup_create(blkcg, q);
f469a7b4 480
cd1604fa
TH
481 /* if %NULL and @q is alive, fall back to root_tg */
482 if (!IS_ERR(blkg))
0381411e 483 tg = blkg_to_tg(blkg);
3f3299d5 484 else if (!blk_queue_dying(q))
03d8e111 485 tg = td_root_tg(td);
f469a7b4
VG
486 }
487
e43473b7
VG
488 return tg;
489}
490
0049af73
TH
491static struct throtl_grp *
492throtl_rb_first(struct throtl_service_queue *parent_sq)
e43473b7
VG
493{
494 /* Service tree is empty */
0049af73 495 if (!parent_sq->nr_pending)
e43473b7
VG
496 return NULL;
497
0049af73
TH
498 if (!parent_sq->first_pending)
499 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
e43473b7 500
0049af73
TH
501 if (parent_sq->first_pending)
502 return rb_entry_tg(parent_sq->first_pending);
e43473b7
VG
503
504 return NULL;
505}
506
507static void rb_erase_init(struct rb_node *n, struct rb_root *root)
508{
509 rb_erase(n, root);
510 RB_CLEAR_NODE(n);
511}
512
0049af73
TH
513static void throtl_rb_erase(struct rb_node *n,
514 struct throtl_service_queue *parent_sq)
e43473b7 515{
0049af73
TH
516 if (parent_sq->first_pending == n)
517 parent_sq->first_pending = NULL;
518 rb_erase_init(n, &parent_sq->pending_tree);
519 --parent_sq->nr_pending;
e43473b7
VG
520}
521
0049af73 522static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
e43473b7
VG
523{
524 struct throtl_grp *tg;
525
0049af73 526 tg = throtl_rb_first(parent_sq);
e43473b7
VG
527 if (!tg)
528 return;
529
0049af73 530 parent_sq->first_pending_disptime = tg->disptime;
e43473b7
VG
531}
532
77216b04 533static void tg_service_queue_add(struct throtl_grp *tg)
e43473b7 534{
77216b04 535 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
0049af73 536 struct rb_node **node = &parent_sq->pending_tree.rb_node;
e43473b7
VG
537 struct rb_node *parent = NULL;
538 struct throtl_grp *__tg;
539 unsigned long key = tg->disptime;
540 int left = 1;
541
542 while (*node != NULL) {
543 parent = *node;
544 __tg = rb_entry_tg(parent);
545
546 if (time_before(key, __tg->disptime))
547 node = &parent->rb_left;
548 else {
549 node = &parent->rb_right;
550 left = 0;
551 }
552 }
553
554 if (left)
0049af73 555 parent_sq->first_pending = &tg->rb_node;
e43473b7
VG
556
557 rb_link_node(&tg->rb_node, parent, node);
0049af73 558 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
e43473b7
VG
559}
560
77216b04 561static void __throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 562{
77216b04 563 tg_service_queue_add(tg);
5b2c16aa 564 tg->flags |= THROTL_TG_PENDING;
77216b04 565 tg->service_queue.parent_sq->nr_pending++;
e43473b7
VG
566}
567
77216b04 568static void throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 569{
5b2c16aa 570 if (!(tg->flags & THROTL_TG_PENDING))
77216b04 571 __throtl_enqueue_tg(tg);
e43473b7
VG
572}
573
77216b04 574static void __throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 575{
77216b04 576 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
5b2c16aa 577 tg->flags &= ~THROTL_TG_PENDING;
e43473b7
VG
578}
579
77216b04 580static void throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 581{
5b2c16aa 582 if (tg->flags & THROTL_TG_PENDING)
77216b04 583 __throtl_dequeue_tg(tg);
e43473b7
VG
584}
585
a9131a27 586/* Call with queue lock held */
69df0ab0
TH
587static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
588 unsigned long expires)
a9131a27 589{
69df0ab0
TH
590 mod_timer(&sq->pending_timer, expires);
591 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
592 expires - jiffies, jiffies);
a9131a27
TH
593}
594
7f52f98c
TH
595/**
596 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
597 * @sq: the service_queue to schedule dispatch for
598 * @force: force scheduling
599 *
600 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
601 * dispatch time of the first pending child. Returns %true if either timer
602 * is armed or there's no pending child left. %false if the current
603 * dispatch window is still open and the caller should continue
604 * dispatching.
605 *
606 * If @force is %true, the dispatch timer is always scheduled and this
607 * function is guaranteed to return %true. This is to be used when the
608 * caller can't dispatch itself and needs to invoke pending_timer
609 * unconditionally. Note that forced scheduling is likely to induce short
610 * delay before dispatch starts even if @sq->first_pending_disptime is not
611 * in the future and thus shouldn't be used in hot paths.
612 */
613static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
614 bool force)
e43473b7 615{
6a525600 616 /* any pending children left? */
c9e0332e 617 if (!sq->nr_pending)
7f52f98c 618 return true;
e43473b7 619
c9e0332e 620 update_min_dispatch_time(sq);
e43473b7 621
69df0ab0 622 /* is the next dispatch time in the future? */
7f52f98c 623 if (force || time_after(sq->first_pending_disptime, jiffies)) {
69df0ab0 624 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
7f52f98c 625 return true;
69df0ab0
TH
626 }
627
7f52f98c
TH
628 /* tell the caller to continue dispatching */
629 return false;
e43473b7
VG
630}
631
32ee5bc4
VG
632static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
633 bool rw, unsigned long start)
634{
635 tg->bytes_disp[rw] = 0;
636 tg->io_disp[rw] = 0;
637
638 /*
639 * Previous slice has expired. We must have trimmed it after last
640 * bio dispatch. That means since start of last slice, we never used
641 * that bandwidth. Do try to make use of that bandwidth while giving
642 * credit.
643 */
644 if (time_after_eq(start, tg->slice_start[rw]))
645 tg->slice_start[rw] = start;
646
647 tg->slice_end[rw] = jiffies + throtl_slice;
648 throtl_log(&tg->service_queue,
649 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
650 rw == READ ? 'R' : 'W', tg->slice_start[rw],
651 tg->slice_end[rw], jiffies);
652}
653
0f3457f6 654static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
e43473b7
VG
655{
656 tg->bytes_disp[rw] = 0;
8e89d13f 657 tg->io_disp[rw] = 0;
e43473b7
VG
658 tg->slice_start[rw] = jiffies;
659 tg->slice_end[rw] = jiffies + throtl_slice;
fda6f272
TH
660 throtl_log(&tg->service_queue,
661 "[%c] new slice start=%lu end=%lu jiffies=%lu",
662 rw == READ ? 'R' : 'W', tg->slice_start[rw],
663 tg->slice_end[rw], jiffies);
e43473b7
VG
664}
665
0f3457f6
TH
666static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
667 unsigned long jiffy_end)
d1ae8ffd
VG
668{
669 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
670}
671
0f3457f6
TH
672static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
673 unsigned long jiffy_end)
e43473b7
VG
674{
675 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
fda6f272
TH
676 throtl_log(&tg->service_queue,
677 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
678 rw == READ ? 'R' : 'W', tg->slice_start[rw],
679 tg->slice_end[rw], jiffies);
e43473b7
VG
680}
681
682/* Determine if previously allocated or extended slice is complete or not */
0f3457f6 683static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
e43473b7
VG
684{
685 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
5cf8c227 686 return false;
e43473b7
VG
687
688 return 1;
689}
690
691/* Trim the used slices and adjust slice start accordingly */
0f3457f6 692static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
e43473b7 693{
3aad5d3e
VG
694 unsigned long nr_slices, time_elapsed, io_trim;
695 u64 bytes_trim, tmp;
e43473b7
VG
696
697 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
698
699 /*
700 * If bps are unlimited (-1), then time slice don't get
701 * renewed. Don't try to trim the slice if slice is used. A new
702 * slice will start when appropriate.
703 */
0f3457f6 704 if (throtl_slice_used(tg, rw))
e43473b7
VG
705 return;
706
d1ae8ffd
VG
707 /*
708 * A bio has been dispatched. Also adjust slice_end. It might happen
709 * that initially cgroup limit was very low resulting in high
710 * slice_end, but later limit was bumped up and bio was dispached
711 * sooner, then we need to reduce slice_end. A high bogus slice_end
712 * is bad because it does not allow new slice to start.
713 */
714
0f3457f6 715 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
d1ae8ffd 716
e43473b7
VG
717 time_elapsed = jiffies - tg->slice_start[rw];
718
719 nr_slices = time_elapsed / throtl_slice;
720
721 if (!nr_slices)
722 return;
3aad5d3e
VG
723 tmp = tg->bps[rw] * throtl_slice * nr_slices;
724 do_div(tmp, HZ);
725 bytes_trim = tmp;
e43473b7 726
8e89d13f 727 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
e43473b7 728
8e89d13f 729 if (!bytes_trim && !io_trim)
e43473b7
VG
730 return;
731
732 if (tg->bytes_disp[rw] >= bytes_trim)
733 tg->bytes_disp[rw] -= bytes_trim;
734 else
735 tg->bytes_disp[rw] = 0;
736
8e89d13f
VG
737 if (tg->io_disp[rw] >= io_trim)
738 tg->io_disp[rw] -= io_trim;
739 else
740 tg->io_disp[rw] = 0;
741
e43473b7
VG
742 tg->slice_start[rw] += nr_slices * throtl_slice;
743
fda6f272
TH
744 throtl_log(&tg->service_queue,
745 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
746 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
747 tg->slice_start[rw], tg->slice_end[rw], jiffies);
e43473b7
VG
748}
749
0f3457f6
TH
750static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
751 unsigned long *wait)
e43473b7
VG
752{
753 bool rw = bio_data_dir(bio);
8e89d13f 754 unsigned int io_allowed;
e43473b7 755 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 756 u64 tmp;
e43473b7 757
8e89d13f 758 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 759
8e89d13f
VG
760 /* Slice has just started. Consider one slice interval */
761 if (!jiffy_elapsed)
762 jiffy_elapsed_rnd = throtl_slice;
763
764 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
765
c49c06e4
VG
766 /*
767 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
768 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
769 * will allow dispatch after 1 second and after that slice should
770 * have been trimmed.
771 */
772
773 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
774 do_div(tmp, HZ);
775
776 if (tmp > UINT_MAX)
777 io_allowed = UINT_MAX;
778 else
779 io_allowed = tmp;
8e89d13f
VG
780
781 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
782 if (wait)
783 *wait = 0;
5cf8c227 784 return true;
e43473b7
VG
785 }
786
8e89d13f
VG
787 /* Calc approx time to dispatch */
788 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
789
790 if (jiffy_wait > jiffy_elapsed)
791 jiffy_wait = jiffy_wait - jiffy_elapsed;
792 else
793 jiffy_wait = 1;
794
795 if (wait)
796 *wait = jiffy_wait;
797 return 0;
798}
799
0f3457f6
TH
800static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
801 unsigned long *wait)
8e89d13f
VG
802{
803 bool rw = bio_data_dir(bio);
3aad5d3e 804 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 805 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
806
807 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
808
809 /* Slice has just started. Consider one slice interval */
810 if (!jiffy_elapsed)
811 jiffy_elapsed_rnd = throtl_slice;
812
813 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
814
5e901a2b
VG
815 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
816 do_div(tmp, HZ);
3aad5d3e 817 bytes_allowed = tmp;
e43473b7 818
4f024f37 819 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
e43473b7
VG
820 if (wait)
821 *wait = 0;
5cf8c227 822 return true;
e43473b7
VG
823 }
824
825 /* Calc approx time to dispatch */
4f024f37 826 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
e43473b7
VG
827 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
828
829 if (!jiffy_wait)
830 jiffy_wait = 1;
831
832 /*
833 * This wait time is without taking into consideration the rounding
834 * up we did. Add that time also.
835 */
836 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
837 if (wait)
838 *wait = jiffy_wait;
8e89d13f
VG
839 return 0;
840}
841
842/*
843 * Returns whether one can dispatch a bio or not. Also returns approx number
844 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
845 */
0f3457f6
TH
846static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
847 unsigned long *wait)
8e89d13f
VG
848{
849 bool rw = bio_data_dir(bio);
850 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
851
852 /*
853 * Currently whole state machine of group depends on first bio
854 * queued in the group bio list. So one should not be calling
855 * this function with a different bio if there are other bios
856 * queued.
857 */
73f0d49a 858 BUG_ON(tg->service_queue.nr_queued[rw] &&
c5cc2070 859 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
e43473b7 860
8e89d13f
VG
861 /* If tg->bps = -1, then BW is unlimited */
862 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
863 if (wait)
864 *wait = 0;
5cf8c227 865 return true;
8e89d13f
VG
866 }
867
868 /*
869 * If previous slice expired, start a new one otherwise renew/extend
870 * existing slice to make sure it is at least throtl_slice interval
871 * long since now.
872 */
0f3457f6
TH
873 if (throtl_slice_used(tg, rw))
874 throtl_start_new_slice(tg, rw);
8e89d13f
VG
875 else {
876 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
0f3457f6 877 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
8e89d13f
VG
878 }
879
0f3457f6
TH
880 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
881 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
8e89d13f
VG
882 if (wait)
883 *wait = 0;
884 return 1;
885 }
886
887 max_wait = max(bps_wait, iops_wait);
888
889 if (wait)
890 *wait = max_wait;
891
892 if (time_before(tg->slice_end[rw], jiffies + max_wait))
0f3457f6 893 throtl_extend_slice(tg, rw, jiffies + max_wait);
e43473b7
VG
894
895 return 0;
896}
897
3c798398 898static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
629ed0b1
TH
899 int rw)
900{
8a3d2615
TH
901 struct throtl_grp *tg = blkg_to_tg(blkg);
902 struct tg_stats_cpu *stats_cpu;
629ed0b1
TH
903 unsigned long flags;
904
629ed0b1
TH
905 /*
906 * Disabling interrupts to provide mutual exclusion between two
907 * writes on same cpu. It probably is not needed for 64bit. Not
908 * optimizing that case yet.
909 */
910 local_irq_save(flags);
911
8a3d2615 912 stats_cpu = this_cpu_ptr(tg->stats_cpu);
629ed0b1 913
629ed0b1
TH
914 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
915 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
916
917 local_irq_restore(flags);
918}
919
e43473b7
VG
920static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
921{
922 bool rw = bio_data_dir(bio);
e43473b7
VG
923
924 /* Charge the bio to the group */
4f024f37 925 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
8e89d13f 926 tg->io_disp[rw]++;
e43473b7 927
2a0f61e6
TH
928 /*
929 * REQ_THROTTLED is used to prevent the same bio to be throttled
930 * more than once as a throttled bio will go through blk-throtl the
931 * second time when it eventually gets issued. Set it when a bio
932 * is being charged to a tg.
933 *
934 * Dispatch stats aren't recursive and each @bio should only be
935 * accounted by the @tg it was originally associated with. Let's
936 * update the stats when setting REQ_THROTTLED for the first time
937 * which is guaranteed to be for the @bio's original tg.
938 */
939 if (!(bio->bi_rw & REQ_THROTTLED)) {
940 bio->bi_rw |= REQ_THROTTLED;
4f024f37
KO
941 throtl_update_dispatch_stats(tg_to_blkg(tg),
942 bio->bi_iter.bi_size, bio->bi_rw);
2a0f61e6 943 }
e43473b7
VG
944}
945
c5cc2070
TH
946/**
947 * throtl_add_bio_tg - add a bio to the specified throtl_grp
948 * @bio: bio to add
949 * @qn: qnode to use
950 * @tg: the target throtl_grp
951 *
952 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
953 * tg->qnode_on_self[] is used.
954 */
955static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
956 struct throtl_grp *tg)
e43473b7 957{
73f0d49a 958 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
959 bool rw = bio_data_dir(bio);
960
c5cc2070
TH
961 if (!qn)
962 qn = &tg->qnode_on_self[rw];
963
0e9f4164
TH
964 /*
965 * If @tg doesn't currently have any bios queued in the same
966 * direction, queueing @bio can change when @tg should be
967 * dispatched. Mark that @tg was empty. This is automatically
968 * cleaered on the next tg_update_disptime().
969 */
970 if (!sq->nr_queued[rw])
971 tg->flags |= THROTL_TG_WAS_EMPTY;
972
c5cc2070
TH
973 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
974
73f0d49a 975 sq->nr_queued[rw]++;
77216b04 976 throtl_enqueue_tg(tg);
e43473b7
VG
977}
978
77216b04 979static void tg_update_disptime(struct throtl_grp *tg)
e43473b7 980{
73f0d49a 981 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
982 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
983 struct bio *bio;
984
c5cc2070 985 if ((bio = throtl_peek_queued(&sq->queued[READ])))
0f3457f6 986 tg_may_dispatch(tg, bio, &read_wait);
e43473b7 987
c5cc2070 988 if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
0f3457f6 989 tg_may_dispatch(tg, bio, &write_wait);
e43473b7
VG
990
991 min_wait = min(read_wait, write_wait);
992 disptime = jiffies + min_wait;
993
e43473b7 994 /* Update dispatch time */
77216b04 995 throtl_dequeue_tg(tg);
e43473b7 996 tg->disptime = disptime;
77216b04 997 throtl_enqueue_tg(tg);
0e9f4164
TH
998
999 /* see throtl_add_bio_tg() */
1000 tg->flags &= ~THROTL_TG_WAS_EMPTY;
e43473b7
VG
1001}
1002
32ee5bc4
VG
1003static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1004 struct throtl_grp *parent_tg, bool rw)
1005{
1006 if (throtl_slice_used(parent_tg, rw)) {
1007 throtl_start_new_slice_with_credit(parent_tg, rw,
1008 child_tg->slice_start[rw]);
1009 }
1010
1011}
1012
77216b04 1013static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
e43473b7 1014{
73f0d49a 1015 struct throtl_service_queue *sq = &tg->service_queue;
6bc9c2b4
TH
1016 struct throtl_service_queue *parent_sq = sq->parent_sq;
1017 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
c5cc2070 1018 struct throtl_grp *tg_to_put = NULL;
e43473b7
VG
1019 struct bio *bio;
1020
c5cc2070
TH
1021 /*
1022 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1023 * from @tg may put its reference and @parent_sq might end up
1024 * getting released prematurely. Remember the tg to put and put it
1025 * after @bio is transferred to @parent_sq.
1026 */
1027 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
73f0d49a 1028 sq->nr_queued[rw]--;
e43473b7
VG
1029
1030 throtl_charge_bio(tg, bio);
6bc9c2b4
TH
1031
1032 /*
1033 * If our parent is another tg, we just need to transfer @bio to
1034 * the parent using throtl_add_bio_tg(). If our parent is
1035 * @td->service_queue, @bio is ready to be issued. Put it on its
1036 * bio_lists[] and decrease total number queued. The caller is
1037 * responsible for issuing these bios.
1038 */
1039 if (parent_tg) {
c5cc2070 1040 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
32ee5bc4 1041 start_parent_slice_with_credit(tg, parent_tg, rw);
6bc9c2b4 1042 } else {
c5cc2070
TH
1043 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1044 &parent_sq->queued[rw]);
6bc9c2b4
TH
1045 BUG_ON(tg->td->nr_queued[rw] <= 0);
1046 tg->td->nr_queued[rw]--;
1047 }
e43473b7 1048
0f3457f6 1049 throtl_trim_slice(tg, rw);
6bc9c2b4 1050
c5cc2070
TH
1051 if (tg_to_put)
1052 blkg_put(tg_to_blkg(tg_to_put));
e43473b7
VG
1053}
1054
77216b04 1055static int throtl_dispatch_tg(struct throtl_grp *tg)
e43473b7 1056{
73f0d49a 1057 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1058 unsigned int nr_reads = 0, nr_writes = 0;
1059 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 1060 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
1061 struct bio *bio;
1062
1063 /* Try to dispatch 75% READS and 25% WRITES */
1064
c5cc2070 1065 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
0f3457f6 1066 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1067
77216b04 1068 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1069 nr_reads++;
1070
1071 if (nr_reads >= max_nr_reads)
1072 break;
1073 }
1074
c5cc2070 1075 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
0f3457f6 1076 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1077
77216b04 1078 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1079 nr_writes++;
1080
1081 if (nr_writes >= max_nr_writes)
1082 break;
1083 }
1084
1085 return nr_reads + nr_writes;
1086}
1087
651930bc 1088static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
e43473b7
VG
1089{
1090 unsigned int nr_disp = 0;
e43473b7
VG
1091
1092 while (1) {
73f0d49a
TH
1093 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1094 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1095
1096 if (!tg)
1097 break;
1098
1099 if (time_before(jiffies, tg->disptime))
1100 break;
1101
77216b04 1102 throtl_dequeue_tg(tg);
e43473b7 1103
77216b04 1104 nr_disp += throtl_dispatch_tg(tg);
e43473b7 1105
73f0d49a 1106 if (sq->nr_queued[0] || sq->nr_queued[1])
77216b04 1107 tg_update_disptime(tg);
e43473b7
VG
1108
1109 if (nr_disp >= throtl_quantum)
1110 break;
1111 }
1112
1113 return nr_disp;
1114}
1115
6e1a5704
TH
1116/**
1117 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1118 * @arg: the throtl_service_queue being serviced
1119 *
1120 * This timer is armed when a child throtl_grp with active bio's become
1121 * pending and queued on the service_queue's pending_tree and expires when
1122 * the first child throtl_grp should be dispatched. This function
2e48a530
TH
1123 * dispatches bio's from the children throtl_grps to the parent
1124 * service_queue.
1125 *
1126 * If the parent's parent is another throtl_grp, dispatching is propagated
1127 * by either arming its pending_timer or repeating dispatch directly. If
1128 * the top-level service_tree is reached, throtl_data->dispatch_work is
1129 * kicked so that the ready bio's are issued.
6e1a5704 1130 */
69df0ab0
TH
1131static void throtl_pending_timer_fn(unsigned long arg)
1132{
1133 struct throtl_service_queue *sq = (void *)arg;
2e48a530 1134 struct throtl_grp *tg = sq_to_tg(sq);
69df0ab0 1135 struct throtl_data *td = sq_to_td(sq);
cb76199c 1136 struct request_queue *q = td->queue;
2e48a530
TH
1137 struct throtl_service_queue *parent_sq;
1138 bool dispatched;
6e1a5704 1139 int ret;
e43473b7
VG
1140
1141 spin_lock_irq(q->queue_lock);
2e48a530
TH
1142again:
1143 parent_sq = sq->parent_sq;
1144 dispatched = false;
e43473b7 1145
7f52f98c
TH
1146 while (true) {
1147 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
2e48a530
TH
1148 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1149 sq->nr_queued[READ], sq->nr_queued[WRITE]);
7f52f98c
TH
1150
1151 ret = throtl_select_dispatch(sq);
1152 if (ret) {
7f52f98c
TH
1153 throtl_log(sq, "bios disp=%u", ret);
1154 dispatched = true;
1155 }
e43473b7 1156
7f52f98c
TH
1157 if (throtl_schedule_next_dispatch(sq, false))
1158 break;
e43473b7 1159
7f52f98c
TH
1160 /* this dispatch windows is still open, relax and repeat */
1161 spin_unlock_irq(q->queue_lock);
1162 cpu_relax();
1163 spin_lock_irq(q->queue_lock);
651930bc 1164 }
e43473b7 1165
2e48a530
TH
1166 if (!dispatched)
1167 goto out_unlock;
6e1a5704 1168
2e48a530
TH
1169 if (parent_sq) {
1170 /* @parent_sq is another throl_grp, propagate dispatch */
1171 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1172 tg_update_disptime(tg);
1173 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1174 /* window is already open, repeat dispatching */
1175 sq = parent_sq;
1176 tg = sq_to_tg(sq);
1177 goto again;
1178 }
1179 }
1180 } else {
1181 /* reached the top-level, queue issueing */
1182 queue_work(kthrotld_workqueue, &td->dispatch_work);
1183 }
1184out_unlock:
e43473b7 1185 spin_unlock_irq(q->queue_lock);
6e1a5704 1186}
e43473b7 1187
6e1a5704
TH
1188/**
1189 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1190 * @work: work item being executed
1191 *
1192 * This function is queued for execution when bio's reach the bio_lists[]
1193 * of throtl_data->service_queue. Those bio's are ready and issued by this
1194 * function.
1195 */
8876e140 1196static void blk_throtl_dispatch_work_fn(struct work_struct *work)
6e1a5704
TH
1197{
1198 struct throtl_data *td = container_of(work, struct throtl_data,
1199 dispatch_work);
1200 struct throtl_service_queue *td_sq = &td->service_queue;
1201 struct request_queue *q = td->queue;
1202 struct bio_list bio_list_on_stack;
1203 struct bio *bio;
1204 struct blk_plug plug;
1205 int rw;
1206
1207 bio_list_init(&bio_list_on_stack);
1208
1209 spin_lock_irq(q->queue_lock);
c5cc2070
TH
1210 for (rw = READ; rw <= WRITE; rw++)
1211 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1212 bio_list_add(&bio_list_on_stack, bio);
6e1a5704
TH
1213 spin_unlock_irq(q->queue_lock);
1214
1215 if (!bio_list_empty(&bio_list_on_stack)) {
69d60eb9 1216 blk_start_plug(&plug);
e43473b7
VG
1217 while((bio = bio_list_pop(&bio_list_on_stack)))
1218 generic_make_request(bio);
69d60eb9 1219 blk_finish_plug(&plug);
e43473b7 1220 }
e43473b7
VG
1221}
1222
f95a04af
TH
1223static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
1224 struct blkg_policy_data *pd, int off)
41b38b6d 1225{
f95a04af 1226 struct throtl_grp *tg = pd_to_tg(pd);
41b38b6d
TH
1227 struct blkg_rwstat rwstat = { }, tmp;
1228 int i, cpu;
1229
1230 for_each_possible_cpu(cpu) {
8a3d2615 1231 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
41b38b6d
TH
1232
1233 tmp = blkg_rwstat_read((void *)sc + off);
1234 for (i = 0; i < BLKG_RWSTAT_NR; i++)
1235 rwstat.cnt[i] += tmp.cnt[i];
1236 }
1237
f95a04af 1238 return __blkg_prfill_rwstat(sf, pd, &rwstat);
41b38b6d
TH
1239}
1240
2da8ca82 1241static int tg_print_cpu_rwstat(struct seq_file *sf, void *v)
41b38b6d 1242{
2da8ca82
TH
1243 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat,
1244 &blkcg_policy_throtl, seq_cft(sf)->private, true);
41b38b6d
TH
1245 return 0;
1246}
1247
f95a04af
TH
1248static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1249 int off)
60c2bc2d 1250{
f95a04af
TH
1251 struct throtl_grp *tg = pd_to_tg(pd);
1252 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 1253
af133ceb 1254 if (v == -1)
60c2bc2d 1255 return 0;
f95a04af 1256 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1257}
1258
f95a04af
TH
1259static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1260 int off)
e43473b7 1261{
f95a04af
TH
1262 struct throtl_grp *tg = pd_to_tg(pd);
1263 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 1264
af133ceb
TH
1265 if (v == -1)
1266 return 0;
f95a04af 1267 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
1268}
1269
2da8ca82 1270static int tg_print_conf_u64(struct seq_file *sf, void *v)
8e89d13f 1271{
2da8ca82
TH
1272 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1273 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1274 return 0;
8e89d13f
VG
1275}
1276
2da8ca82 1277static int tg_print_conf_uint(struct seq_file *sf, void *v)
8e89d13f 1278{
2da8ca82
TH
1279 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1280 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1281 return 0;
60c2bc2d
TH
1282}
1283
451af504
TH
1284static ssize_t tg_set_conf(struct kernfs_open_file *of,
1285 char *buf, size_t nbytes, loff_t off, bool is_u64)
60c2bc2d 1286{
451af504 1287 struct blkcg *blkcg = css_to_blkcg(of_css(of));
60c2bc2d 1288 struct blkg_conf_ctx ctx;
af133ceb 1289 struct throtl_grp *tg;
69df0ab0 1290 struct throtl_service_queue *sq;
693e751e 1291 struct blkcg_gq *blkg;
492eb21b 1292 struct cgroup_subsys_state *pos_css;
60c2bc2d
TH
1293 int ret;
1294
3c798398 1295 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
60c2bc2d
TH
1296 if (ret)
1297 return ret;
1298
af133ceb 1299 tg = blkg_to_tg(ctx.blkg);
69df0ab0 1300 sq = &tg->service_queue;
af133ceb 1301
a2b1693b
TH
1302 if (!ctx.v)
1303 ctx.v = -1;
af133ceb 1304
a2b1693b 1305 if (is_u64)
451af504 1306 *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v;
a2b1693b 1307 else
451af504 1308 *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v;
af133ceb 1309
fda6f272
TH
1310 throtl_log(&tg->service_queue,
1311 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1312 tg->bps[READ], tg->bps[WRITE],
1313 tg->iops[READ], tg->iops[WRITE]);
632b4493 1314
693e751e
TH
1315 /*
1316 * Update has_rules[] flags for the updated tg's subtree. A tg is
1317 * considered to have rules if either the tg itself or any of its
1318 * ancestors has rules. This identifies groups without any
1319 * restrictions in the whole hierarchy and allows them to bypass
1320 * blk-throttle.
1321 */
492eb21b 1322 blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
693e751e
TH
1323 tg_update_has_rules(blkg_to_tg(blkg));
1324
632b4493
TH
1325 /*
1326 * We're already holding queue_lock and know @tg is valid. Let's
1327 * apply the new config directly.
1328 *
1329 * Restart the slices for both READ and WRITES. It might happen
1330 * that a group's limit are dropped suddenly and we don't want to
1331 * account recently dispatched IO with new low rate.
1332 */
0f3457f6
TH
1333 throtl_start_new_slice(tg, 0);
1334 throtl_start_new_slice(tg, 1);
632b4493 1335
5b2c16aa 1336 if (tg->flags & THROTL_TG_PENDING) {
77216b04 1337 tg_update_disptime(tg);
7f52f98c 1338 throtl_schedule_next_dispatch(sq->parent_sq, true);
632b4493 1339 }
60c2bc2d
TH
1340
1341 blkg_conf_finish(&ctx);
451af504 1342 return nbytes;
8e89d13f
VG
1343}
1344
451af504
TH
1345static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1346 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1347{
451af504 1348 return tg_set_conf(of, buf, nbytes, off, true);
60c2bc2d
TH
1349}
1350
451af504
TH
1351static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1352 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1353{
451af504 1354 return tg_set_conf(of, buf, nbytes, off, false);
60c2bc2d
TH
1355}
1356
1357static struct cftype throtl_files[] = {
1358 {
1359 .name = "throttle.read_bps_device",
af133ceb 1360 .private = offsetof(struct throtl_grp, bps[READ]),
2da8ca82 1361 .seq_show = tg_print_conf_u64,
451af504 1362 .write = tg_set_conf_u64,
60c2bc2d
TH
1363 },
1364 {
1365 .name = "throttle.write_bps_device",
af133ceb 1366 .private = offsetof(struct throtl_grp, bps[WRITE]),
2da8ca82 1367 .seq_show = tg_print_conf_u64,
451af504 1368 .write = tg_set_conf_u64,
60c2bc2d
TH
1369 },
1370 {
1371 .name = "throttle.read_iops_device",
af133ceb 1372 .private = offsetof(struct throtl_grp, iops[READ]),
2da8ca82 1373 .seq_show = tg_print_conf_uint,
451af504 1374 .write = tg_set_conf_uint,
60c2bc2d
TH
1375 },
1376 {
1377 .name = "throttle.write_iops_device",
af133ceb 1378 .private = offsetof(struct throtl_grp, iops[WRITE]),
2da8ca82 1379 .seq_show = tg_print_conf_uint,
451af504 1380 .write = tg_set_conf_uint,
60c2bc2d
TH
1381 },
1382 {
1383 .name = "throttle.io_service_bytes",
5bc4afb1 1384 .private = offsetof(struct tg_stats_cpu, service_bytes),
2da8ca82 1385 .seq_show = tg_print_cpu_rwstat,
60c2bc2d
TH
1386 },
1387 {
1388 .name = "throttle.io_serviced",
5bc4afb1 1389 .private = offsetof(struct tg_stats_cpu, serviced),
2da8ca82 1390 .seq_show = tg_print_cpu_rwstat,
60c2bc2d
TH
1391 },
1392 { } /* terminate */
1393};
1394
da527770 1395static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1396{
1397 struct throtl_data *td = q->td;
1398
69df0ab0 1399 cancel_work_sync(&td->dispatch_work);
e43473b7
VG
1400}
1401
3c798398 1402static struct blkcg_policy blkcg_policy_throtl = {
f9fcc2d3
TH
1403 .cftypes = throtl_files,
1404
001bea73 1405 .pd_alloc_fn = throtl_pd_alloc,
f9fcc2d3 1406 .pd_init_fn = throtl_pd_init,
693e751e 1407 .pd_online_fn = throtl_pd_online,
001bea73 1408 .pd_free_fn = throtl_pd_free,
f9fcc2d3 1409 .pd_reset_stats_fn = throtl_pd_reset_stats,
e43473b7
VG
1410};
1411
bc16a4f9 1412bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
e43473b7
VG
1413{
1414 struct throtl_data *td = q->td;
c5cc2070 1415 struct throtl_qnode *qn = NULL;
e43473b7 1416 struct throtl_grp *tg;
73f0d49a 1417 struct throtl_service_queue *sq;
0e9f4164 1418 bool rw = bio_data_dir(bio);
3c798398 1419 struct blkcg *blkcg;
bc16a4f9 1420 bool throttled = false;
e43473b7 1421
2a0f61e6
TH
1422 /* see throtl_charge_bio() */
1423 if (bio->bi_rw & REQ_THROTTLED)
bc16a4f9 1424 goto out;
e43473b7 1425
af75cd3c
VG
1426 /*
1427 * A throtl_grp pointer retrieved under rcu can be used to access
1428 * basic fields like stats and io rates. If a group has no rules,
1429 * just update the dispatch stats in lockless manner and return.
1430 */
af75cd3c 1431 rcu_read_lock();
3c798398 1432 blkcg = bio_blkcg(bio);
cd1604fa 1433 tg = throtl_lookup_tg(td, blkcg);
af75cd3c 1434 if (tg) {
693e751e 1435 if (!tg->has_rules[rw]) {
629ed0b1 1436 throtl_update_dispatch_stats(tg_to_blkg(tg),
4f024f37 1437 bio->bi_iter.bi_size, bio->bi_rw);
2a7f1244 1438 goto out_unlock_rcu;
af75cd3c
VG
1439 }
1440 }
af75cd3c
VG
1441
1442 /*
1443 * Either group has not been allocated yet or it is not an unlimited
1444 * IO group
1445 */
e43473b7 1446 spin_lock_irq(q->queue_lock);
cd1604fa 1447 tg = throtl_lookup_create_tg(td, blkcg);
bc16a4f9
TH
1448 if (unlikely(!tg))
1449 goto out_unlock;
f469a7b4 1450
73f0d49a
TH
1451 sq = &tg->service_queue;
1452
9e660acf
TH
1453 while (true) {
1454 /* throtl is FIFO - if bios are already queued, should queue */
1455 if (sq->nr_queued[rw])
1456 break;
de701c74 1457
9e660acf
TH
1458 /* if above limits, break to queue */
1459 if (!tg_may_dispatch(tg, bio, NULL))
1460 break;
1461
1462 /* within limits, let's charge and dispatch directly */
e43473b7 1463 throtl_charge_bio(tg, bio);
04521db0
VG
1464
1465 /*
1466 * We need to trim slice even when bios are not being queued
1467 * otherwise it might happen that a bio is not queued for
1468 * a long time and slice keeps on extending and trim is not
1469 * called for a long time. Now if limits are reduced suddenly
1470 * we take into account all the IO dispatched so far at new
1471 * low rate and * newly queued IO gets a really long dispatch
1472 * time.
1473 *
1474 * So keep on trimming slice even if bio is not queued.
1475 */
0f3457f6 1476 throtl_trim_slice(tg, rw);
9e660acf
TH
1477
1478 /*
1479 * @bio passed through this layer without being throttled.
1480 * Climb up the ladder. If we''re already at the top, it
1481 * can be executed directly.
1482 */
c5cc2070 1483 qn = &tg->qnode_on_parent[rw];
9e660acf
TH
1484 sq = sq->parent_sq;
1485 tg = sq_to_tg(sq);
1486 if (!tg)
1487 goto out_unlock;
e43473b7
VG
1488 }
1489
9e660acf 1490 /* out-of-limit, queue to @tg */
fda6f272
TH
1491 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1492 rw == READ ? 'R' : 'W',
4f024f37 1493 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
fda6f272
TH
1494 tg->io_disp[rw], tg->iops[rw],
1495 sq->nr_queued[READ], sq->nr_queued[WRITE]);
e43473b7 1496
671058fb 1497 bio_associate_current(bio);
6bc9c2b4 1498 tg->td->nr_queued[rw]++;
c5cc2070 1499 throtl_add_bio_tg(bio, qn, tg);
bc16a4f9 1500 throttled = true;
e43473b7 1501
7f52f98c
TH
1502 /*
1503 * Update @tg's dispatch time and force schedule dispatch if @tg
1504 * was empty before @bio. The forced scheduling isn't likely to
1505 * cause undue delay as @bio is likely to be dispatched directly if
1506 * its @tg's disptime is not in the future.
1507 */
0e9f4164 1508 if (tg->flags & THROTL_TG_WAS_EMPTY) {
77216b04 1509 tg_update_disptime(tg);
7f52f98c 1510 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
e43473b7
VG
1511 }
1512
bc16a4f9 1513out_unlock:
e43473b7 1514 spin_unlock_irq(q->queue_lock);
2a7f1244
TH
1515out_unlock_rcu:
1516 rcu_read_unlock();
bc16a4f9 1517out:
2a0f61e6
TH
1518 /*
1519 * As multiple blk-throtls may stack in the same issue path, we
1520 * don't want bios to leave with the flag set. Clear the flag if
1521 * being issued.
1522 */
1523 if (!throttled)
1524 bio->bi_rw &= ~REQ_THROTTLED;
bc16a4f9 1525 return throttled;
e43473b7
VG
1526}
1527
2a12f0dc
TH
1528/*
1529 * Dispatch all bios from all children tg's queued on @parent_sq. On
1530 * return, @parent_sq is guaranteed to not have any active children tg's
1531 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1532 */
1533static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1534{
1535 struct throtl_grp *tg;
1536
1537 while ((tg = throtl_rb_first(parent_sq))) {
1538 struct throtl_service_queue *sq = &tg->service_queue;
1539 struct bio *bio;
1540
1541 throtl_dequeue_tg(tg);
1542
c5cc2070 1543 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2a12f0dc 1544 tg_dispatch_one_bio(tg, bio_data_dir(bio));
c5cc2070 1545 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2a12f0dc
TH
1546 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1547 }
1548}
1549
c9a929dd
TH
1550/**
1551 * blk_throtl_drain - drain throttled bios
1552 * @q: request_queue to drain throttled bios for
1553 *
1554 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1555 */
1556void blk_throtl_drain(struct request_queue *q)
1557 __releases(q->queue_lock) __acquires(q->queue_lock)
1558{
1559 struct throtl_data *td = q->td;
2a12f0dc 1560 struct blkcg_gq *blkg;
492eb21b 1561 struct cgroup_subsys_state *pos_css;
c9a929dd 1562 struct bio *bio;
651930bc 1563 int rw;
c9a929dd 1564
8bcb6c7d 1565 queue_lockdep_assert_held(q);
2a12f0dc 1566 rcu_read_lock();
c9a929dd 1567
2a12f0dc
TH
1568 /*
1569 * Drain each tg while doing post-order walk on the blkg tree, so
1570 * that all bios are propagated to td->service_queue. It'd be
1571 * better to walk service_queue tree directly but blkg walk is
1572 * easier.
1573 */
492eb21b 1574 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2a12f0dc 1575 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
73f0d49a 1576
2a12f0dc
TH
1577 /* finally, transfer bios from top-level tg's into the td */
1578 tg_drain_bios(&td->service_queue);
1579
1580 rcu_read_unlock();
c9a929dd
TH
1581 spin_unlock_irq(q->queue_lock);
1582
2a12f0dc 1583 /* all bios now should be in td->service_queue, issue them */
651930bc 1584 for (rw = READ; rw <= WRITE; rw++)
c5cc2070
TH
1585 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1586 NULL)))
651930bc 1587 generic_make_request(bio);
c9a929dd
TH
1588
1589 spin_lock_irq(q->queue_lock);
1590}
1591
e43473b7
VG
1592int blk_throtl_init(struct request_queue *q)
1593{
1594 struct throtl_data *td;
a2b1693b 1595 int ret;
e43473b7
VG
1596
1597 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1598 if (!td)
1599 return -ENOMEM;
1600
69df0ab0 1601 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
b2ce2643 1602 throtl_service_queue_init(&td->service_queue);
e43473b7 1603
cd1604fa 1604 q->td = td;
29b12589 1605 td->queue = q;
02977e4a 1606
a2b1693b 1607 /* activate policy */
3c798398 1608 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
a2b1693b 1609 if (ret)
f51b802c 1610 kfree(td);
a2b1693b 1611 return ret;
e43473b7
VG
1612}
1613
1614void blk_throtl_exit(struct request_queue *q)
1615{
c875f4d0 1616 BUG_ON(!q->td);
da527770 1617 throtl_shutdown_wq(q);
3c798398 1618 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
c9a929dd 1619 kfree(q->td);
e43473b7
VG
1620}
1621
1622static int __init throtl_init(void)
1623{
450adcbe
VG
1624 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1625 if (!kthrotld_workqueue)
1626 panic("Failed to create kthrotld\n");
1627
3c798398 1628 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
1629}
1630
1631module_init(throtl_init);