]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - block/blk-throttle.c
blkcg: make blkg_[rw]stat_recursive_sum() to be able to index into blkcg_gq
[mirror_ubuntu-zesty-kernel.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
eea8f41c 12#include <linux/blk-cgroup.h>
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
3c798398 24static struct blkcg_policy blkcg_policy_throtl;
0381411e 25
450adcbe
VG
26/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
450adcbe 28
c5cc2070
TH
29/*
30 * To implement hierarchical throttling, throtl_grps form a tree and bios
31 * are dispatched upwards level by level until they reach the top and get
32 * issued. When dispatching bios from the children and local group at each
33 * level, if the bios are dispatched into a single bio_list, there's a risk
34 * of a local or child group which can queue many bios at once filling up
35 * the list starving others.
36 *
37 * To avoid such starvation, dispatched bios are queued separately
38 * according to where they came from. When they are again dispatched to
39 * the parent, they're popped in round-robin order so that no single source
40 * hogs the dispatch window.
41 *
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
44 * throtl_service_queue and then dispatched in round-robin order.
45 *
46 * It's also used to track the reference counts on blkg's. A qnode always
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
48 * incrementing the reference of the associated throtl_grp when a qnode is
49 * queued and decrementing when dequeued is enough to keep the whole blkg
50 * tree pinned while bios are in flight.
51 */
52struct throtl_qnode {
53 struct list_head node; /* service_queue->queued[] */
54 struct bio_list bios; /* queued bios */
55 struct throtl_grp *tg; /* tg this qnode belongs to */
56};
57
c9e0332e 58struct throtl_service_queue {
77216b04
TH
59 struct throtl_service_queue *parent_sq; /* the parent service_queue */
60
73f0d49a
TH
61 /*
62 * Bios queued directly to this service_queue or dispatched from
63 * children throtl_grp's.
64 */
c5cc2070 65 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
73f0d49a
TH
66 unsigned int nr_queued[2]; /* number of queued bios */
67
68 /*
69 * RB tree of active children throtl_grp's, which are sorted by
70 * their ->disptime.
71 */
c9e0332e
TH
72 struct rb_root pending_tree; /* RB tree of active tgs */
73 struct rb_node *first_pending; /* first node in the tree */
74 unsigned int nr_pending; /* # queued in the tree */
75 unsigned long first_pending_disptime; /* disptime of the first tg */
69df0ab0 76 struct timer_list pending_timer; /* fires on first_pending_disptime */
e43473b7
VG
77};
78
5b2c16aa
TH
79enum tg_state_flags {
80 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
0e9f4164 81 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
5b2c16aa
TH
82};
83
e43473b7
VG
84#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
85
86struct throtl_grp {
f95a04af
TH
87 /* must be the first member */
88 struct blkg_policy_data pd;
89
c9e0332e 90 /* active throtl group service_queue member */
e43473b7
VG
91 struct rb_node rb_node;
92
0f3457f6
TH
93 /* throtl_data this group belongs to */
94 struct throtl_data *td;
95
49a2f1e3
TH
96 /* this group's service queue */
97 struct throtl_service_queue service_queue;
98
c5cc2070
TH
99 /*
100 * qnode_on_self is used when bios are directly queued to this
101 * throtl_grp so that local bios compete fairly with bios
102 * dispatched from children. qnode_on_parent is used when bios are
103 * dispatched from this throtl_grp into its parent and will compete
104 * with the sibling qnode_on_parents and the parent's
105 * qnode_on_self.
106 */
107 struct throtl_qnode qnode_on_self[2];
108 struct throtl_qnode qnode_on_parent[2];
109
e43473b7
VG
110 /*
111 * Dispatch time in jiffies. This is the estimated time when group
112 * will unthrottle and is ready to dispatch more bio. It is used as
113 * key to sort active groups in service tree.
114 */
115 unsigned long disptime;
116
e43473b7
VG
117 unsigned int flags;
118
693e751e
TH
119 /* are there any throtl rules between this group and td? */
120 bool has_rules[2];
121
e43473b7
VG
122 /* bytes per second rate limits */
123 uint64_t bps[2];
124
8e89d13f
VG
125 /* IOPS limits */
126 unsigned int iops[2];
127
e43473b7
VG
128 /* Number of bytes disptached in current slice */
129 uint64_t bytes_disp[2];
8e89d13f
VG
130 /* Number of bio's dispatched in current slice */
131 unsigned int io_disp[2];
e43473b7
VG
132
133 /* When did we start a new slice */
134 unsigned long slice_start[2];
135 unsigned long slice_end[2];
fe071437 136
24bdb8ef
TH
137 /* total bytes transferred */
138 struct blkg_rwstat service_bytes;
139 /* total IOs serviced, post merge */
140 struct blkg_rwstat serviced;
e43473b7
VG
141};
142
143struct throtl_data
144{
e43473b7 145 /* service tree for active throtl groups */
c9e0332e 146 struct throtl_service_queue service_queue;
e43473b7 147
e43473b7
VG
148 struct request_queue *queue;
149
150 /* Total Number of queued bios on READ and WRITE lists */
151 unsigned int nr_queued[2];
152
153 /*
02977e4a 154 * number of total undestroyed groups
e43473b7
VG
155 */
156 unsigned int nr_undestroyed_grps;
157
158 /* Work for dispatching throttled bios */
69df0ab0 159 struct work_struct dispatch_work;
e43473b7
VG
160};
161
69df0ab0
TH
162static void throtl_pending_timer_fn(unsigned long arg);
163
f95a04af
TH
164static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
165{
166 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
167}
168
3c798398 169static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0381411e 170{
f95a04af 171 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0381411e
TH
172}
173
3c798398 174static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 175{
f95a04af 176 return pd_to_blkg(&tg->pd);
0381411e
TH
177}
178
fda6f272
TH
179/**
180 * sq_to_tg - return the throl_grp the specified service queue belongs to
181 * @sq: the throtl_service_queue of interest
182 *
183 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
184 * embedded in throtl_data, %NULL is returned.
185 */
186static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
187{
188 if (sq && sq->parent_sq)
189 return container_of(sq, struct throtl_grp, service_queue);
190 else
191 return NULL;
192}
193
194/**
195 * sq_to_td - return throtl_data the specified service queue belongs to
196 * @sq: the throtl_service_queue of interest
197 *
198 * A service_queue can be embeded in either a throtl_grp or throtl_data.
199 * Determine the associated throtl_data accordingly and return it.
200 */
201static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
202{
203 struct throtl_grp *tg = sq_to_tg(sq);
204
205 if (tg)
206 return tg->td;
207 else
208 return container_of(sq, struct throtl_data, service_queue);
209}
210
211/**
212 * throtl_log - log debug message via blktrace
213 * @sq: the service_queue being reported
214 * @fmt: printf format string
215 * @args: printf args
216 *
217 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
218 * throtl_grp; otherwise, just "throtl".
219 *
220 * TODO: this should be made a function and name formatting should happen
221 * after testing whether blktrace is enabled.
222 */
223#define throtl_log(sq, fmt, args...) do { \
224 struct throtl_grp *__tg = sq_to_tg((sq)); \
225 struct throtl_data *__td = sq_to_td((sq)); \
226 \
227 (void)__td; \
228 if ((__tg)) { \
229 char __pbuf[128]; \
54e7ed12 230 \
fda6f272
TH
231 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
232 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
233 } else { \
234 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
235 } \
54e7ed12 236} while (0)
e43473b7 237
c5cc2070
TH
238static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
239{
240 INIT_LIST_HEAD(&qn->node);
241 bio_list_init(&qn->bios);
242 qn->tg = tg;
243}
244
245/**
246 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
247 * @bio: bio being added
248 * @qn: qnode to add bio to
249 * @queued: the service_queue->queued[] list @qn belongs to
250 *
251 * Add @bio to @qn and put @qn on @queued if it's not already on.
252 * @qn->tg's reference count is bumped when @qn is activated. See the
253 * comment on top of throtl_qnode definition for details.
254 */
255static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
256 struct list_head *queued)
257{
258 bio_list_add(&qn->bios, bio);
259 if (list_empty(&qn->node)) {
260 list_add_tail(&qn->node, queued);
261 blkg_get(tg_to_blkg(qn->tg));
262 }
263}
264
265/**
266 * throtl_peek_queued - peek the first bio on a qnode list
267 * @queued: the qnode list to peek
268 */
269static struct bio *throtl_peek_queued(struct list_head *queued)
270{
271 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
272 struct bio *bio;
273
274 if (list_empty(queued))
275 return NULL;
276
277 bio = bio_list_peek(&qn->bios);
278 WARN_ON_ONCE(!bio);
279 return bio;
280}
281
282/**
283 * throtl_pop_queued - pop the first bio form a qnode list
284 * @queued: the qnode list to pop a bio from
285 * @tg_to_put: optional out argument for throtl_grp to put
286 *
287 * Pop the first bio from the qnode list @queued. After popping, the first
288 * qnode is removed from @queued if empty or moved to the end of @queued so
289 * that the popping order is round-robin.
290 *
291 * When the first qnode is removed, its associated throtl_grp should be put
292 * too. If @tg_to_put is NULL, this function automatically puts it;
293 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
294 * responsible for putting it.
295 */
296static struct bio *throtl_pop_queued(struct list_head *queued,
297 struct throtl_grp **tg_to_put)
298{
299 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
300 struct bio *bio;
301
302 if (list_empty(queued))
303 return NULL;
304
305 bio = bio_list_pop(&qn->bios);
306 WARN_ON_ONCE(!bio);
307
308 if (bio_list_empty(&qn->bios)) {
309 list_del_init(&qn->node);
310 if (tg_to_put)
311 *tg_to_put = qn->tg;
312 else
313 blkg_put(tg_to_blkg(qn->tg));
314 } else {
315 list_move_tail(&qn->node, queued);
316 }
317
318 return bio;
319}
320
49a2f1e3 321/* init a service_queue, assumes the caller zeroed it */
b2ce2643 322static void throtl_service_queue_init(struct throtl_service_queue *sq)
49a2f1e3 323{
c5cc2070
TH
324 INIT_LIST_HEAD(&sq->queued[0]);
325 INIT_LIST_HEAD(&sq->queued[1]);
49a2f1e3 326 sq->pending_tree = RB_ROOT;
69df0ab0
TH
327 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
328 (unsigned long)sq);
329}
330
001bea73
TH
331static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
332{
4fb72036 333 struct throtl_grp *tg;
24bdb8ef 334 int rw;
4fb72036
TH
335
336 tg = kzalloc_node(sizeof(*tg), gfp, node);
337 if (!tg)
24bdb8ef 338 goto err;
4fb72036 339
24bdb8ef
TH
340 if (blkg_rwstat_init(&tg->service_bytes, gfp) ||
341 blkg_rwstat_init(&tg->serviced, gfp))
342 goto err_free_tg;
4fb72036 343
b2ce2643
TH
344 throtl_service_queue_init(&tg->service_queue);
345
346 for (rw = READ; rw <= WRITE; rw++) {
347 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
348 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
349 }
350
351 RB_CLEAR_NODE(&tg->rb_node);
352 tg->bps[READ] = -1;
353 tg->bps[WRITE] = -1;
354 tg->iops[READ] = -1;
355 tg->iops[WRITE] = -1;
356
4fb72036 357 return &tg->pd;
24bdb8ef
TH
358
359err_free_tg:
360 blkg_rwstat_exit(&tg->serviced);
361 blkg_rwstat_exit(&tg->service_bytes);
362 kfree(tg);
363err:
364 return NULL;
001bea73
TH
365}
366
a9520cd6 367static void throtl_pd_init(struct blkg_policy_data *pd)
a29a171e 368{
a9520cd6
TH
369 struct throtl_grp *tg = pd_to_tg(pd);
370 struct blkcg_gq *blkg = tg_to_blkg(tg);
77216b04 371 struct throtl_data *td = blkg->q->td;
b2ce2643 372 struct throtl_service_queue *sq = &tg->service_queue;
cd1604fa 373
9138125b 374 /*
aa6ec29b 375 * If on the default hierarchy, we switch to properly hierarchical
9138125b
TH
376 * behavior where limits on a given throtl_grp are applied to the
377 * whole subtree rather than just the group itself. e.g. If 16M
378 * read_bps limit is set on the root group, the whole system can't
379 * exceed 16M for the device.
380 *
aa6ec29b 381 * If not on the default hierarchy, the broken flat hierarchy
9138125b
TH
382 * behavior is retained where all throtl_grps are treated as if
383 * they're all separate root groups right below throtl_data.
384 * Limits of a group don't interact with limits of other groups
385 * regardless of the position of the group in the hierarchy.
386 */
b2ce2643 387 sq->parent_sq = &td->service_queue;
aa6ec29b 388 if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
b2ce2643 389 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
77216b04 390 tg->td = td;
8a3d2615
TH
391}
392
693e751e
TH
393/*
394 * Set has_rules[] if @tg or any of its parents have limits configured.
395 * This doesn't require walking up to the top of the hierarchy as the
396 * parent's has_rules[] is guaranteed to be correct.
397 */
398static void tg_update_has_rules(struct throtl_grp *tg)
399{
400 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
401 int rw;
402
403 for (rw = READ; rw <= WRITE; rw++)
404 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
405 (tg->bps[rw] != -1 || tg->iops[rw] != -1);
406}
407
a9520cd6 408static void throtl_pd_online(struct blkg_policy_data *pd)
693e751e
TH
409{
410 /*
411 * We don't want new groups to escape the limits of its ancestors.
412 * Update has_rules[] after a new group is brought online.
413 */
a9520cd6 414 tg_update_has_rules(pd_to_tg(pd));
693e751e
TH
415}
416
001bea73
TH
417static void throtl_pd_free(struct blkg_policy_data *pd)
418{
4fb72036
TH
419 struct throtl_grp *tg = pd_to_tg(pd);
420
b2ce2643 421 del_timer_sync(&tg->service_queue.pending_timer);
24bdb8ef
TH
422 blkg_rwstat_exit(&tg->serviced);
423 blkg_rwstat_exit(&tg->service_bytes);
4fb72036 424 kfree(tg);
001bea73
TH
425}
426
a9520cd6 427static void throtl_pd_reset_stats(struct blkg_policy_data *pd)
8a3d2615 428{
a9520cd6 429 struct throtl_grp *tg = pd_to_tg(pd);
8a3d2615 430
24bdb8ef
TH
431 blkg_rwstat_reset(&tg->service_bytes);
432 blkg_rwstat_reset(&tg->serviced);
a29a171e
VG
433}
434
0049af73
TH
435static struct throtl_grp *
436throtl_rb_first(struct throtl_service_queue *parent_sq)
e43473b7
VG
437{
438 /* Service tree is empty */
0049af73 439 if (!parent_sq->nr_pending)
e43473b7
VG
440 return NULL;
441
0049af73
TH
442 if (!parent_sq->first_pending)
443 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
e43473b7 444
0049af73
TH
445 if (parent_sq->first_pending)
446 return rb_entry_tg(parent_sq->first_pending);
e43473b7
VG
447
448 return NULL;
449}
450
451static void rb_erase_init(struct rb_node *n, struct rb_root *root)
452{
453 rb_erase(n, root);
454 RB_CLEAR_NODE(n);
455}
456
0049af73
TH
457static void throtl_rb_erase(struct rb_node *n,
458 struct throtl_service_queue *parent_sq)
e43473b7 459{
0049af73
TH
460 if (parent_sq->first_pending == n)
461 parent_sq->first_pending = NULL;
462 rb_erase_init(n, &parent_sq->pending_tree);
463 --parent_sq->nr_pending;
e43473b7
VG
464}
465
0049af73 466static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
e43473b7
VG
467{
468 struct throtl_grp *tg;
469
0049af73 470 tg = throtl_rb_first(parent_sq);
e43473b7
VG
471 if (!tg)
472 return;
473
0049af73 474 parent_sq->first_pending_disptime = tg->disptime;
e43473b7
VG
475}
476
77216b04 477static void tg_service_queue_add(struct throtl_grp *tg)
e43473b7 478{
77216b04 479 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
0049af73 480 struct rb_node **node = &parent_sq->pending_tree.rb_node;
e43473b7
VG
481 struct rb_node *parent = NULL;
482 struct throtl_grp *__tg;
483 unsigned long key = tg->disptime;
484 int left = 1;
485
486 while (*node != NULL) {
487 parent = *node;
488 __tg = rb_entry_tg(parent);
489
490 if (time_before(key, __tg->disptime))
491 node = &parent->rb_left;
492 else {
493 node = &parent->rb_right;
494 left = 0;
495 }
496 }
497
498 if (left)
0049af73 499 parent_sq->first_pending = &tg->rb_node;
e43473b7
VG
500
501 rb_link_node(&tg->rb_node, parent, node);
0049af73 502 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
e43473b7
VG
503}
504
77216b04 505static void __throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 506{
77216b04 507 tg_service_queue_add(tg);
5b2c16aa 508 tg->flags |= THROTL_TG_PENDING;
77216b04 509 tg->service_queue.parent_sq->nr_pending++;
e43473b7
VG
510}
511
77216b04 512static void throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 513{
5b2c16aa 514 if (!(tg->flags & THROTL_TG_PENDING))
77216b04 515 __throtl_enqueue_tg(tg);
e43473b7
VG
516}
517
77216b04 518static void __throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 519{
77216b04 520 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
5b2c16aa 521 tg->flags &= ~THROTL_TG_PENDING;
e43473b7
VG
522}
523
77216b04 524static void throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 525{
5b2c16aa 526 if (tg->flags & THROTL_TG_PENDING)
77216b04 527 __throtl_dequeue_tg(tg);
e43473b7
VG
528}
529
a9131a27 530/* Call with queue lock held */
69df0ab0
TH
531static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
532 unsigned long expires)
a9131a27 533{
69df0ab0
TH
534 mod_timer(&sq->pending_timer, expires);
535 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
536 expires - jiffies, jiffies);
a9131a27
TH
537}
538
7f52f98c
TH
539/**
540 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
541 * @sq: the service_queue to schedule dispatch for
542 * @force: force scheduling
543 *
544 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
545 * dispatch time of the first pending child. Returns %true if either timer
546 * is armed or there's no pending child left. %false if the current
547 * dispatch window is still open and the caller should continue
548 * dispatching.
549 *
550 * If @force is %true, the dispatch timer is always scheduled and this
551 * function is guaranteed to return %true. This is to be used when the
552 * caller can't dispatch itself and needs to invoke pending_timer
553 * unconditionally. Note that forced scheduling is likely to induce short
554 * delay before dispatch starts even if @sq->first_pending_disptime is not
555 * in the future and thus shouldn't be used in hot paths.
556 */
557static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
558 bool force)
e43473b7 559{
6a525600 560 /* any pending children left? */
c9e0332e 561 if (!sq->nr_pending)
7f52f98c 562 return true;
e43473b7 563
c9e0332e 564 update_min_dispatch_time(sq);
e43473b7 565
69df0ab0 566 /* is the next dispatch time in the future? */
7f52f98c 567 if (force || time_after(sq->first_pending_disptime, jiffies)) {
69df0ab0 568 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
7f52f98c 569 return true;
69df0ab0
TH
570 }
571
7f52f98c
TH
572 /* tell the caller to continue dispatching */
573 return false;
e43473b7
VG
574}
575
32ee5bc4
VG
576static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
577 bool rw, unsigned long start)
578{
579 tg->bytes_disp[rw] = 0;
580 tg->io_disp[rw] = 0;
581
582 /*
583 * Previous slice has expired. We must have trimmed it after last
584 * bio dispatch. That means since start of last slice, we never used
585 * that bandwidth. Do try to make use of that bandwidth while giving
586 * credit.
587 */
588 if (time_after_eq(start, tg->slice_start[rw]))
589 tg->slice_start[rw] = start;
590
591 tg->slice_end[rw] = jiffies + throtl_slice;
592 throtl_log(&tg->service_queue,
593 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
594 rw == READ ? 'R' : 'W', tg->slice_start[rw],
595 tg->slice_end[rw], jiffies);
596}
597
0f3457f6 598static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
e43473b7
VG
599{
600 tg->bytes_disp[rw] = 0;
8e89d13f 601 tg->io_disp[rw] = 0;
e43473b7
VG
602 tg->slice_start[rw] = jiffies;
603 tg->slice_end[rw] = jiffies + throtl_slice;
fda6f272
TH
604 throtl_log(&tg->service_queue,
605 "[%c] new slice start=%lu end=%lu jiffies=%lu",
606 rw == READ ? 'R' : 'W', tg->slice_start[rw],
607 tg->slice_end[rw], jiffies);
e43473b7
VG
608}
609
0f3457f6
TH
610static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
611 unsigned long jiffy_end)
d1ae8ffd
VG
612{
613 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
614}
615
0f3457f6
TH
616static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
617 unsigned long jiffy_end)
e43473b7
VG
618{
619 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
fda6f272
TH
620 throtl_log(&tg->service_queue,
621 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
622 rw == READ ? 'R' : 'W', tg->slice_start[rw],
623 tg->slice_end[rw], jiffies);
e43473b7
VG
624}
625
626/* Determine if previously allocated or extended slice is complete or not */
0f3457f6 627static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
e43473b7
VG
628{
629 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
5cf8c227 630 return false;
e43473b7
VG
631
632 return 1;
633}
634
635/* Trim the used slices and adjust slice start accordingly */
0f3457f6 636static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
e43473b7 637{
3aad5d3e
VG
638 unsigned long nr_slices, time_elapsed, io_trim;
639 u64 bytes_trim, tmp;
e43473b7
VG
640
641 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
642
643 /*
644 * If bps are unlimited (-1), then time slice don't get
645 * renewed. Don't try to trim the slice if slice is used. A new
646 * slice will start when appropriate.
647 */
0f3457f6 648 if (throtl_slice_used(tg, rw))
e43473b7
VG
649 return;
650
d1ae8ffd
VG
651 /*
652 * A bio has been dispatched. Also adjust slice_end. It might happen
653 * that initially cgroup limit was very low resulting in high
654 * slice_end, but later limit was bumped up and bio was dispached
655 * sooner, then we need to reduce slice_end. A high bogus slice_end
656 * is bad because it does not allow new slice to start.
657 */
658
0f3457f6 659 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
d1ae8ffd 660
e43473b7
VG
661 time_elapsed = jiffies - tg->slice_start[rw];
662
663 nr_slices = time_elapsed / throtl_slice;
664
665 if (!nr_slices)
666 return;
3aad5d3e
VG
667 tmp = tg->bps[rw] * throtl_slice * nr_slices;
668 do_div(tmp, HZ);
669 bytes_trim = tmp;
e43473b7 670
8e89d13f 671 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
e43473b7 672
8e89d13f 673 if (!bytes_trim && !io_trim)
e43473b7
VG
674 return;
675
676 if (tg->bytes_disp[rw] >= bytes_trim)
677 tg->bytes_disp[rw] -= bytes_trim;
678 else
679 tg->bytes_disp[rw] = 0;
680
8e89d13f
VG
681 if (tg->io_disp[rw] >= io_trim)
682 tg->io_disp[rw] -= io_trim;
683 else
684 tg->io_disp[rw] = 0;
685
e43473b7
VG
686 tg->slice_start[rw] += nr_slices * throtl_slice;
687
fda6f272
TH
688 throtl_log(&tg->service_queue,
689 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
690 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
691 tg->slice_start[rw], tg->slice_end[rw], jiffies);
e43473b7
VG
692}
693
0f3457f6
TH
694static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
695 unsigned long *wait)
e43473b7
VG
696{
697 bool rw = bio_data_dir(bio);
8e89d13f 698 unsigned int io_allowed;
e43473b7 699 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 700 u64 tmp;
e43473b7 701
8e89d13f 702 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 703
8e89d13f
VG
704 /* Slice has just started. Consider one slice interval */
705 if (!jiffy_elapsed)
706 jiffy_elapsed_rnd = throtl_slice;
707
708 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
709
c49c06e4
VG
710 /*
711 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
712 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
713 * will allow dispatch after 1 second and after that slice should
714 * have been trimmed.
715 */
716
717 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
718 do_div(tmp, HZ);
719
720 if (tmp > UINT_MAX)
721 io_allowed = UINT_MAX;
722 else
723 io_allowed = tmp;
8e89d13f
VG
724
725 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
726 if (wait)
727 *wait = 0;
5cf8c227 728 return true;
e43473b7
VG
729 }
730
8e89d13f
VG
731 /* Calc approx time to dispatch */
732 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
733
734 if (jiffy_wait > jiffy_elapsed)
735 jiffy_wait = jiffy_wait - jiffy_elapsed;
736 else
737 jiffy_wait = 1;
738
739 if (wait)
740 *wait = jiffy_wait;
741 return 0;
742}
743
0f3457f6
TH
744static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
745 unsigned long *wait)
8e89d13f
VG
746{
747 bool rw = bio_data_dir(bio);
3aad5d3e 748 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 749 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
750
751 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
752
753 /* Slice has just started. Consider one slice interval */
754 if (!jiffy_elapsed)
755 jiffy_elapsed_rnd = throtl_slice;
756
757 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
758
5e901a2b
VG
759 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
760 do_div(tmp, HZ);
3aad5d3e 761 bytes_allowed = tmp;
e43473b7 762
4f024f37 763 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
e43473b7
VG
764 if (wait)
765 *wait = 0;
5cf8c227 766 return true;
e43473b7
VG
767 }
768
769 /* Calc approx time to dispatch */
4f024f37 770 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
e43473b7
VG
771 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
772
773 if (!jiffy_wait)
774 jiffy_wait = 1;
775
776 /*
777 * This wait time is without taking into consideration the rounding
778 * up we did. Add that time also.
779 */
780 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
781 if (wait)
782 *wait = jiffy_wait;
8e89d13f
VG
783 return 0;
784}
785
786/*
787 * Returns whether one can dispatch a bio or not. Also returns approx number
788 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
789 */
0f3457f6
TH
790static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
791 unsigned long *wait)
8e89d13f
VG
792{
793 bool rw = bio_data_dir(bio);
794 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
795
796 /*
797 * Currently whole state machine of group depends on first bio
798 * queued in the group bio list. So one should not be calling
799 * this function with a different bio if there are other bios
800 * queued.
801 */
73f0d49a 802 BUG_ON(tg->service_queue.nr_queued[rw] &&
c5cc2070 803 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
e43473b7 804
8e89d13f
VG
805 /* If tg->bps = -1, then BW is unlimited */
806 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
807 if (wait)
808 *wait = 0;
5cf8c227 809 return true;
8e89d13f
VG
810 }
811
812 /*
813 * If previous slice expired, start a new one otherwise renew/extend
814 * existing slice to make sure it is at least throtl_slice interval
815 * long since now.
816 */
0f3457f6
TH
817 if (throtl_slice_used(tg, rw))
818 throtl_start_new_slice(tg, rw);
8e89d13f
VG
819 else {
820 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
0f3457f6 821 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
8e89d13f
VG
822 }
823
0f3457f6
TH
824 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
825 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
8e89d13f
VG
826 if (wait)
827 *wait = 0;
828 return 1;
829 }
830
831 max_wait = max(bps_wait, iops_wait);
832
833 if (wait)
834 *wait = max_wait;
835
836 if (time_before(tg->slice_end[rw], jiffies + max_wait))
0f3457f6 837 throtl_extend_slice(tg, rw, jiffies + max_wait);
e43473b7
VG
838
839 return 0;
840}
841
3c798398 842static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
629ed0b1
TH
843 int rw)
844{
8a3d2615 845 struct throtl_grp *tg = blkg_to_tg(blkg);
629ed0b1
TH
846 unsigned long flags;
847
629ed0b1
TH
848 /*
849 * Disabling interrupts to provide mutual exclusion between two
850 * writes on same cpu. It probably is not needed for 64bit. Not
851 * optimizing that case yet.
852 */
853 local_irq_save(flags);
854
24bdb8ef
TH
855 blkg_rwstat_add(&tg->serviced, rw, 1);
856 blkg_rwstat_add(&tg->service_bytes, rw, bytes);
629ed0b1
TH
857
858 local_irq_restore(flags);
859}
860
e43473b7
VG
861static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
862{
863 bool rw = bio_data_dir(bio);
e43473b7
VG
864
865 /* Charge the bio to the group */
4f024f37 866 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
8e89d13f 867 tg->io_disp[rw]++;
e43473b7 868
2a0f61e6
TH
869 /*
870 * REQ_THROTTLED is used to prevent the same bio to be throttled
871 * more than once as a throttled bio will go through blk-throtl the
872 * second time when it eventually gets issued. Set it when a bio
873 * is being charged to a tg.
874 *
875 * Dispatch stats aren't recursive and each @bio should only be
876 * accounted by the @tg it was originally associated with. Let's
877 * update the stats when setting REQ_THROTTLED for the first time
878 * which is guaranteed to be for the @bio's original tg.
879 */
880 if (!(bio->bi_rw & REQ_THROTTLED)) {
881 bio->bi_rw |= REQ_THROTTLED;
4f024f37
KO
882 throtl_update_dispatch_stats(tg_to_blkg(tg),
883 bio->bi_iter.bi_size, bio->bi_rw);
2a0f61e6 884 }
e43473b7
VG
885}
886
c5cc2070
TH
887/**
888 * throtl_add_bio_tg - add a bio to the specified throtl_grp
889 * @bio: bio to add
890 * @qn: qnode to use
891 * @tg: the target throtl_grp
892 *
893 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
894 * tg->qnode_on_self[] is used.
895 */
896static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
897 struct throtl_grp *tg)
e43473b7 898{
73f0d49a 899 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
900 bool rw = bio_data_dir(bio);
901
c5cc2070
TH
902 if (!qn)
903 qn = &tg->qnode_on_self[rw];
904
0e9f4164
TH
905 /*
906 * If @tg doesn't currently have any bios queued in the same
907 * direction, queueing @bio can change when @tg should be
908 * dispatched. Mark that @tg was empty. This is automatically
909 * cleaered on the next tg_update_disptime().
910 */
911 if (!sq->nr_queued[rw])
912 tg->flags |= THROTL_TG_WAS_EMPTY;
913
c5cc2070
TH
914 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
915
73f0d49a 916 sq->nr_queued[rw]++;
77216b04 917 throtl_enqueue_tg(tg);
e43473b7
VG
918}
919
77216b04 920static void tg_update_disptime(struct throtl_grp *tg)
e43473b7 921{
73f0d49a 922 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
923 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
924 struct bio *bio;
925
c5cc2070 926 if ((bio = throtl_peek_queued(&sq->queued[READ])))
0f3457f6 927 tg_may_dispatch(tg, bio, &read_wait);
e43473b7 928
c5cc2070 929 if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
0f3457f6 930 tg_may_dispatch(tg, bio, &write_wait);
e43473b7
VG
931
932 min_wait = min(read_wait, write_wait);
933 disptime = jiffies + min_wait;
934
e43473b7 935 /* Update dispatch time */
77216b04 936 throtl_dequeue_tg(tg);
e43473b7 937 tg->disptime = disptime;
77216b04 938 throtl_enqueue_tg(tg);
0e9f4164
TH
939
940 /* see throtl_add_bio_tg() */
941 tg->flags &= ~THROTL_TG_WAS_EMPTY;
e43473b7
VG
942}
943
32ee5bc4
VG
944static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
945 struct throtl_grp *parent_tg, bool rw)
946{
947 if (throtl_slice_used(parent_tg, rw)) {
948 throtl_start_new_slice_with_credit(parent_tg, rw,
949 child_tg->slice_start[rw]);
950 }
951
952}
953
77216b04 954static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
e43473b7 955{
73f0d49a 956 struct throtl_service_queue *sq = &tg->service_queue;
6bc9c2b4
TH
957 struct throtl_service_queue *parent_sq = sq->parent_sq;
958 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
c5cc2070 959 struct throtl_grp *tg_to_put = NULL;
e43473b7
VG
960 struct bio *bio;
961
c5cc2070
TH
962 /*
963 * @bio is being transferred from @tg to @parent_sq. Popping a bio
964 * from @tg may put its reference and @parent_sq might end up
965 * getting released prematurely. Remember the tg to put and put it
966 * after @bio is transferred to @parent_sq.
967 */
968 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
73f0d49a 969 sq->nr_queued[rw]--;
e43473b7
VG
970
971 throtl_charge_bio(tg, bio);
6bc9c2b4
TH
972
973 /*
974 * If our parent is another tg, we just need to transfer @bio to
975 * the parent using throtl_add_bio_tg(). If our parent is
976 * @td->service_queue, @bio is ready to be issued. Put it on its
977 * bio_lists[] and decrease total number queued. The caller is
978 * responsible for issuing these bios.
979 */
980 if (parent_tg) {
c5cc2070 981 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
32ee5bc4 982 start_parent_slice_with_credit(tg, parent_tg, rw);
6bc9c2b4 983 } else {
c5cc2070
TH
984 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
985 &parent_sq->queued[rw]);
6bc9c2b4
TH
986 BUG_ON(tg->td->nr_queued[rw] <= 0);
987 tg->td->nr_queued[rw]--;
988 }
e43473b7 989
0f3457f6 990 throtl_trim_slice(tg, rw);
6bc9c2b4 991
c5cc2070
TH
992 if (tg_to_put)
993 blkg_put(tg_to_blkg(tg_to_put));
e43473b7
VG
994}
995
77216b04 996static int throtl_dispatch_tg(struct throtl_grp *tg)
e43473b7 997{
73f0d49a 998 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
999 unsigned int nr_reads = 0, nr_writes = 0;
1000 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 1001 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
1002 struct bio *bio;
1003
1004 /* Try to dispatch 75% READS and 25% WRITES */
1005
c5cc2070 1006 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
0f3457f6 1007 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1008
77216b04 1009 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1010 nr_reads++;
1011
1012 if (nr_reads >= max_nr_reads)
1013 break;
1014 }
1015
c5cc2070 1016 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
0f3457f6 1017 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1018
77216b04 1019 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1020 nr_writes++;
1021
1022 if (nr_writes >= max_nr_writes)
1023 break;
1024 }
1025
1026 return nr_reads + nr_writes;
1027}
1028
651930bc 1029static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
e43473b7
VG
1030{
1031 unsigned int nr_disp = 0;
e43473b7
VG
1032
1033 while (1) {
73f0d49a
TH
1034 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1035 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1036
1037 if (!tg)
1038 break;
1039
1040 if (time_before(jiffies, tg->disptime))
1041 break;
1042
77216b04 1043 throtl_dequeue_tg(tg);
e43473b7 1044
77216b04 1045 nr_disp += throtl_dispatch_tg(tg);
e43473b7 1046
73f0d49a 1047 if (sq->nr_queued[0] || sq->nr_queued[1])
77216b04 1048 tg_update_disptime(tg);
e43473b7
VG
1049
1050 if (nr_disp >= throtl_quantum)
1051 break;
1052 }
1053
1054 return nr_disp;
1055}
1056
6e1a5704
TH
1057/**
1058 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1059 * @arg: the throtl_service_queue being serviced
1060 *
1061 * This timer is armed when a child throtl_grp with active bio's become
1062 * pending and queued on the service_queue's pending_tree and expires when
1063 * the first child throtl_grp should be dispatched. This function
2e48a530
TH
1064 * dispatches bio's from the children throtl_grps to the parent
1065 * service_queue.
1066 *
1067 * If the parent's parent is another throtl_grp, dispatching is propagated
1068 * by either arming its pending_timer or repeating dispatch directly. If
1069 * the top-level service_tree is reached, throtl_data->dispatch_work is
1070 * kicked so that the ready bio's are issued.
6e1a5704 1071 */
69df0ab0
TH
1072static void throtl_pending_timer_fn(unsigned long arg)
1073{
1074 struct throtl_service_queue *sq = (void *)arg;
2e48a530 1075 struct throtl_grp *tg = sq_to_tg(sq);
69df0ab0 1076 struct throtl_data *td = sq_to_td(sq);
cb76199c 1077 struct request_queue *q = td->queue;
2e48a530
TH
1078 struct throtl_service_queue *parent_sq;
1079 bool dispatched;
6e1a5704 1080 int ret;
e43473b7
VG
1081
1082 spin_lock_irq(q->queue_lock);
2e48a530
TH
1083again:
1084 parent_sq = sq->parent_sq;
1085 dispatched = false;
e43473b7 1086
7f52f98c
TH
1087 while (true) {
1088 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
2e48a530
TH
1089 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1090 sq->nr_queued[READ], sq->nr_queued[WRITE]);
7f52f98c
TH
1091
1092 ret = throtl_select_dispatch(sq);
1093 if (ret) {
7f52f98c
TH
1094 throtl_log(sq, "bios disp=%u", ret);
1095 dispatched = true;
1096 }
e43473b7 1097
7f52f98c
TH
1098 if (throtl_schedule_next_dispatch(sq, false))
1099 break;
e43473b7 1100
7f52f98c
TH
1101 /* this dispatch windows is still open, relax and repeat */
1102 spin_unlock_irq(q->queue_lock);
1103 cpu_relax();
1104 spin_lock_irq(q->queue_lock);
651930bc 1105 }
e43473b7 1106
2e48a530
TH
1107 if (!dispatched)
1108 goto out_unlock;
6e1a5704 1109
2e48a530
TH
1110 if (parent_sq) {
1111 /* @parent_sq is another throl_grp, propagate dispatch */
1112 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1113 tg_update_disptime(tg);
1114 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1115 /* window is already open, repeat dispatching */
1116 sq = parent_sq;
1117 tg = sq_to_tg(sq);
1118 goto again;
1119 }
1120 }
1121 } else {
1122 /* reached the top-level, queue issueing */
1123 queue_work(kthrotld_workqueue, &td->dispatch_work);
1124 }
1125out_unlock:
e43473b7 1126 spin_unlock_irq(q->queue_lock);
6e1a5704 1127}
e43473b7 1128
6e1a5704
TH
1129/**
1130 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1131 * @work: work item being executed
1132 *
1133 * This function is queued for execution when bio's reach the bio_lists[]
1134 * of throtl_data->service_queue. Those bio's are ready and issued by this
1135 * function.
1136 */
8876e140 1137static void blk_throtl_dispatch_work_fn(struct work_struct *work)
6e1a5704
TH
1138{
1139 struct throtl_data *td = container_of(work, struct throtl_data,
1140 dispatch_work);
1141 struct throtl_service_queue *td_sq = &td->service_queue;
1142 struct request_queue *q = td->queue;
1143 struct bio_list bio_list_on_stack;
1144 struct bio *bio;
1145 struct blk_plug plug;
1146 int rw;
1147
1148 bio_list_init(&bio_list_on_stack);
1149
1150 spin_lock_irq(q->queue_lock);
c5cc2070
TH
1151 for (rw = READ; rw <= WRITE; rw++)
1152 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1153 bio_list_add(&bio_list_on_stack, bio);
6e1a5704
TH
1154 spin_unlock_irq(q->queue_lock);
1155
1156 if (!bio_list_empty(&bio_list_on_stack)) {
69d60eb9 1157 blk_start_plug(&plug);
e43473b7
VG
1158 while((bio = bio_list_pop(&bio_list_on_stack)))
1159 generic_make_request(bio);
69d60eb9 1160 blk_finish_plug(&plug);
e43473b7 1161 }
e43473b7
VG
1162}
1163
24bdb8ef 1164static int tg_print_rwstat(struct seq_file *sf, void *v)
41b38b6d 1165{
24bdb8ef 1166 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
2da8ca82 1167 &blkcg_policy_throtl, seq_cft(sf)->private, true);
41b38b6d
TH
1168 return 0;
1169}
1170
f95a04af
TH
1171static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1172 int off)
60c2bc2d 1173{
f95a04af
TH
1174 struct throtl_grp *tg = pd_to_tg(pd);
1175 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 1176
af133ceb 1177 if (v == -1)
60c2bc2d 1178 return 0;
f95a04af 1179 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1180}
1181
f95a04af
TH
1182static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1183 int off)
e43473b7 1184{
f95a04af
TH
1185 struct throtl_grp *tg = pd_to_tg(pd);
1186 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 1187
af133ceb
TH
1188 if (v == -1)
1189 return 0;
f95a04af 1190 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
1191}
1192
2da8ca82 1193static int tg_print_conf_u64(struct seq_file *sf, void *v)
8e89d13f 1194{
2da8ca82
TH
1195 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1196 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1197 return 0;
8e89d13f
VG
1198}
1199
2da8ca82 1200static int tg_print_conf_uint(struct seq_file *sf, void *v)
8e89d13f 1201{
2da8ca82
TH
1202 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1203 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1204 return 0;
60c2bc2d
TH
1205}
1206
451af504
TH
1207static ssize_t tg_set_conf(struct kernfs_open_file *of,
1208 char *buf, size_t nbytes, loff_t off, bool is_u64)
60c2bc2d 1209{
451af504 1210 struct blkcg *blkcg = css_to_blkcg(of_css(of));
60c2bc2d 1211 struct blkg_conf_ctx ctx;
af133ceb 1212 struct throtl_grp *tg;
69df0ab0 1213 struct throtl_service_queue *sq;
693e751e 1214 struct blkcg_gq *blkg;
492eb21b 1215 struct cgroup_subsys_state *pos_css;
60c2bc2d
TH
1216 int ret;
1217
3c798398 1218 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
60c2bc2d
TH
1219 if (ret)
1220 return ret;
1221
af133ceb 1222 tg = blkg_to_tg(ctx.blkg);
69df0ab0 1223 sq = &tg->service_queue;
af133ceb 1224
a2b1693b
TH
1225 if (!ctx.v)
1226 ctx.v = -1;
af133ceb 1227
a2b1693b 1228 if (is_u64)
451af504 1229 *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v;
a2b1693b 1230 else
451af504 1231 *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v;
af133ceb 1232
fda6f272
TH
1233 throtl_log(&tg->service_queue,
1234 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1235 tg->bps[READ], tg->bps[WRITE],
1236 tg->iops[READ], tg->iops[WRITE]);
632b4493 1237
693e751e
TH
1238 /*
1239 * Update has_rules[] flags for the updated tg's subtree. A tg is
1240 * considered to have rules if either the tg itself or any of its
1241 * ancestors has rules. This identifies groups without any
1242 * restrictions in the whole hierarchy and allows them to bypass
1243 * blk-throttle.
1244 */
492eb21b 1245 blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
693e751e
TH
1246 tg_update_has_rules(blkg_to_tg(blkg));
1247
632b4493
TH
1248 /*
1249 * We're already holding queue_lock and know @tg is valid. Let's
1250 * apply the new config directly.
1251 *
1252 * Restart the slices for both READ and WRITES. It might happen
1253 * that a group's limit are dropped suddenly and we don't want to
1254 * account recently dispatched IO with new low rate.
1255 */
0f3457f6
TH
1256 throtl_start_new_slice(tg, 0);
1257 throtl_start_new_slice(tg, 1);
632b4493 1258
5b2c16aa 1259 if (tg->flags & THROTL_TG_PENDING) {
77216b04 1260 tg_update_disptime(tg);
7f52f98c 1261 throtl_schedule_next_dispatch(sq->parent_sq, true);
632b4493 1262 }
60c2bc2d
TH
1263
1264 blkg_conf_finish(&ctx);
451af504 1265 return nbytes;
8e89d13f
VG
1266}
1267
451af504
TH
1268static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1269 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1270{
451af504 1271 return tg_set_conf(of, buf, nbytes, off, true);
60c2bc2d
TH
1272}
1273
451af504
TH
1274static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1275 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1276{
451af504 1277 return tg_set_conf(of, buf, nbytes, off, false);
60c2bc2d
TH
1278}
1279
1280static struct cftype throtl_files[] = {
1281 {
1282 .name = "throttle.read_bps_device",
af133ceb 1283 .private = offsetof(struct throtl_grp, bps[READ]),
2da8ca82 1284 .seq_show = tg_print_conf_u64,
451af504 1285 .write = tg_set_conf_u64,
60c2bc2d
TH
1286 },
1287 {
1288 .name = "throttle.write_bps_device",
af133ceb 1289 .private = offsetof(struct throtl_grp, bps[WRITE]),
2da8ca82 1290 .seq_show = tg_print_conf_u64,
451af504 1291 .write = tg_set_conf_u64,
60c2bc2d
TH
1292 },
1293 {
1294 .name = "throttle.read_iops_device",
af133ceb 1295 .private = offsetof(struct throtl_grp, iops[READ]),
2da8ca82 1296 .seq_show = tg_print_conf_uint,
451af504 1297 .write = tg_set_conf_uint,
60c2bc2d
TH
1298 },
1299 {
1300 .name = "throttle.write_iops_device",
af133ceb 1301 .private = offsetof(struct throtl_grp, iops[WRITE]),
2da8ca82 1302 .seq_show = tg_print_conf_uint,
451af504 1303 .write = tg_set_conf_uint,
60c2bc2d
TH
1304 },
1305 {
1306 .name = "throttle.io_service_bytes",
24bdb8ef
TH
1307 .private = offsetof(struct throtl_grp, service_bytes),
1308 .seq_show = tg_print_rwstat,
60c2bc2d
TH
1309 },
1310 {
1311 .name = "throttle.io_serviced",
24bdb8ef
TH
1312 .private = offsetof(struct throtl_grp, serviced),
1313 .seq_show = tg_print_rwstat,
60c2bc2d
TH
1314 },
1315 { } /* terminate */
1316};
1317
da527770 1318static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1319{
1320 struct throtl_data *td = q->td;
1321
69df0ab0 1322 cancel_work_sync(&td->dispatch_work);
e43473b7
VG
1323}
1324
3c798398 1325static struct blkcg_policy blkcg_policy_throtl = {
f9fcc2d3
TH
1326 .cftypes = throtl_files,
1327
001bea73 1328 .pd_alloc_fn = throtl_pd_alloc,
f9fcc2d3 1329 .pd_init_fn = throtl_pd_init,
693e751e 1330 .pd_online_fn = throtl_pd_online,
001bea73 1331 .pd_free_fn = throtl_pd_free,
f9fcc2d3 1332 .pd_reset_stats_fn = throtl_pd_reset_stats,
e43473b7
VG
1333};
1334
ae118896
TH
1335bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
1336 struct bio *bio)
e43473b7 1337{
c5cc2070 1338 struct throtl_qnode *qn = NULL;
ae118896 1339 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
73f0d49a 1340 struct throtl_service_queue *sq;
0e9f4164 1341 bool rw = bio_data_dir(bio);
bc16a4f9 1342 bool throttled = false;
e43473b7 1343
ae118896
TH
1344 WARN_ON_ONCE(!rcu_read_lock_held());
1345
2a0f61e6 1346 /* see throtl_charge_bio() */
ae118896 1347 if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
bc16a4f9 1348 goto out;
e43473b7
VG
1349
1350 spin_lock_irq(q->queue_lock);
c9589f03
TH
1351
1352 if (unlikely(blk_queue_bypass(q)))
bc16a4f9 1353 goto out_unlock;
f469a7b4 1354
73f0d49a
TH
1355 sq = &tg->service_queue;
1356
9e660acf
TH
1357 while (true) {
1358 /* throtl is FIFO - if bios are already queued, should queue */
1359 if (sq->nr_queued[rw])
1360 break;
de701c74 1361
9e660acf
TH
1362 /* if above limits, break to queue */
1363 if (!tg_may_dispatch(tg, bio, NULL))
1364 break;
1365
1366 /* within limits, let's charge and dispatch directly */
e43473b7 1367 throtl_charge_bio(tg, bio);
04521db0
VG
1368
1369 /*
1370 * We need to trim slice even when bios are not being queued
1371 * otherwise it might happen that a bio is not queued for
1372 * a long time and slice keeps on extending and trim is not
1373 * called for a long time. Now if limits are reduced suddenly
1374 * we take into account all the IO dispatched so far at new
1375 * low rate and * newly queued IO gets a really long dispatch
1376 * time.
1377 *
1378 * So keep on trimming slice even if bio is not queued.
1379 */
0f3457f6 1380 throtl_trim_slice(tg, rw);
9e660acf
TH
1381
1382 /*
1383 * @bio passed through this layer without being throttled.
1384 * Climb up the ladder. If we''re already at the top, it
1385 * can be executed directly.
1386 */
c5cc2070 1387 qn = &tg->qnode_on_parent[rw];
9e660acf
TH
1388 sq = sq->parent_sq;
1389 tg = sq_to_tg(sq);
1390 if (!tg)
1391 goto out_unlock;
e43473b7
VG
1392 }
1393
9e660acf 1394 /* out-of-limit, queue to @tg */
fda6f272
TH
1395 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1396 rw == READ ? 'R' : 'W',
4f024f37 1397 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
fda6f272
TH
1398 tg->io_disp[rw], tg->iops[rw],
1399 sq->nr_queued[READ], sq->nr_queued[WRITE]);
e43473b7 1400
671058fb 1401 bio_associate_current(bio);
6bc9c2b4 1402 tg->td->nr_queued[rw]++;
c5cc2070 1403 throtl_add_bio_tg(bio, qn, tg);
bc16a4f9 1404 throttled = true;
e43473b7 1405
7f52f98c
TH
1406 /*
1407 * Update @tg's dispatch time and force schedule dispatch if @tg
1408 * was empty before @bio. The forced scheduling isn't likely to
1409 * cause undue delay as @bio is likely to be dispatched directly if
1410 * its @tg's disptime is not in the future.
1411 */
0e9f4164 1412 if (tg->flags & THROTL_TG_WAS_EMPTY) {
77216b04 1413 tg_update_disptime(tg);
7f52f98c 1414 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
e43473b7
VG
1415 }
1416
bc16a4f9 1417out_unlock:
e43473b7 1418 spin_unlock_irq(q->queue_lock);
bc16a4f9 1419out:
2a0f61e6
TH
1420 /*
1421 * As multiple blk-throtls may stack in the same issue path, we
1422 * don't want bios to leave with the flag set. Clear the flag if
1423 * being issued.
1424 */
1425 if (!throttled)
1426 bio->bi_rw &= ~REQ_THROTTLED;
bc16a4f9 1427 return throttled;
e43473b7
VG
1428}
1429
2a12f0dc
TH
1430/*
1431 * Dispatch all bios from all children tg's queued on @parent_sq. On
1432 * return, @parent_sq is guaranteed to not have any active children tg's
1433 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1434 */
1435static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1436{
1437 struct throtl_grp *tg;
1438
1439 while ((tg = throtl_rb_first(parent_sq))) {
1440 struct throtl_service_queue *sq = &tg->service_queue;
1441 struct bio *bio;
1442
1443 throtl_dequeue_tg(tg);
1444
c5cc2070 1445 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2a12f0dc 1446 tg_dispatch_one_bio(tg, bio_data_dir(bio));
c5cc2070 1447 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2a12f0dc
TH
1448 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1449 }
1450}
1451
c9a929dd
TH
1452/**
1453 * blk_throtl_drain - drain throttled bios
1454 * @q: request_queue to drain throttled bios for
1455 *
1456 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1457 */
1458void blk_throtl_drain(struct request_queue *q)
1459 __releases(q->queue_lock) __acquires(q->queue_lock)
1460{
1461 struct throtl_data *td = q->td;
2a12f0dc 1462 struct blkcg_gq *blkg;
492eb21b 1463 struct cgroup_subsys_state *pos_css;
c9a929dd 1464 struct bio *bio;
651930bc 1465 int rw;
c9a929dd 1466
8bcb6c7d 1467 queue_lockdep_assert_held(q);
2a12f0dc 1468 rcu_read_lock();
c9a929dd 1469
2a12f0dc
TH
1470 /*
1471 * Drain each tg while doing post-order walk on the blkg tree, so
1472 * that all bios are propagated to td->service_queue. It'd be
1473 * better to walk service_queue tree directly but blkg walk is
1474 * easier.
1475 */
492eb21b 1476 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2a12f0dc 1477 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
73f0d49a 1478
2a12f0dc
TH
1479 /* finally, transfer bios from top-level tg's into the td */
1480 tg_drain_bios(&td->service_queue);
1481
1482 rcu_read_unlock();
c9a929dd
TH
1483 spin_unlock_irq(q->queue_lock);
1484
2a12f0dc 1485 /* all bios now should be in td->service_queue, issue them */
651930bc 1486 for (rw = READ; rw <= WRITE; rw++)
c5cc2070
TH
1487 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1488 NULL)))
651930bc 1489 generic_make_request(bio);
c9a929dd
TH
1490
1491 spin_lock_irq(q->queue_lock);
1492}
1493
e43473b7
VG
1494int blk_throtl_init(struct request_queue *q)
1495{
1496 struct throtl_data *td;
a2b1693b 1497 int ret;
e43473b7
VG
1498
1499 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1500 if (!td)
1501 return -ENOMEM;
1502
69df0ab0 1503 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
b2ce2643 1504 throtl_service_queue_init(&td->service_queue);
e43473b7 1505
cd1604fa 1506 q->td = td;
29b12589 1507 td->queue = q;
02977e4a 1508
a2b1693b 1509 /* activate policy */
3c798398 1510 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
a2b1693b 1511 if (ret)
f51b802c 1512 kfree(td);
a2b1693b 1513 return ret;
e43473b7
VG
1514}
1515
1516void blk_throtl_exit(struct request_queue *q)
1517{
c875f4d0 1518 BUG_ON(!q->td);
da527770 1519 throtl_shutdown_wq(q);
3c798398 1520 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
c9a929dd 1521 kfree(q->td);
e43473b7
VG
1522}
1523
1524static int __init throtl_init(void)
1525{
450adcbe
VG
1526 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1527 if (!kthrotld_workqueue)
1528 panic("Failed to create kthrotld\n");
1529
3c798398 1530 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
1531}
1532
1533module_init(throtl_init);