2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-cgroup.h>
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl
;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct
*kthrotld_workqueue
;
30 * To implement hierarchical throttling, throtl_grps form a tree and bios
31 * are dispatched upwards level by level until they reach the top and get
32 * issued. When dispatching bios from the children and local group at each
33 * level, if the bios are dispatched into a single bio_list, there's a risk
34 * of a local or child group which can queue many bios at once filling up
35 * the list starving others.
37 * To avoid such starvation, dispatched bios are queued separately
38 * according to where they came from. When they are again dispatched to
39 * the parent, they're popped in round-robin order so that no single source
40 * hogs the dispatch window.
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
44 * throtl_service_queue and then dispatched in round-robin order.
46 * It's also used to track the reference counts on blkg's. A qnode always
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
48 * incrementing the reference of the associated throtl_grp when a qnode is
49 * queued and decrementing when dequeued is enough to keep the whole blkg
50 * tree pinned while bios are in flight.
53 struct list_head node
; /* service_queue->queued[] */
54 struct bio_list bios
; /* queued bios */
55 struct throtl_grp
*tg
; /* tg this qnode belongs to */
58 struct throtl_service_queue
{
59 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
62 * Bios queued directly to this service_queue or dispatched from
63 * children throtl_grp's.
65 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
66 unsigned int nr_queued
[2]; /* number of queued bios */
69 * RB tree of active children throtl_grp's, which are sorted by
72 struct rb_root pending_tree
; /* RB tree of active tgs */
73 struct rb_node
*first_pending
; /* first node in the tree */
74 unsigned int nr_pending
; /* # queued in the tree */
75 unsigned long first_pending_disptime
; /* disptime of the first tg */
76 struct timer_list pending_timer
; /* fires on first_pending_disptime */
80 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
81 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
84 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
93 /* must be the first member */
94 struct blkg_policy_data pd
;
96 /* active throtl group service_queue member */
97 struct rb_node rb_node
;
99 /* throtl_data this group belongs to */
100 struct throtl_data
*td
;
102 /* this group's service queue */
103 struct throtl_service_queue service_queue
;
106 * qnode_on_self is used when bios are directly queued to this
107 * throtl_grp so that local bios compete fairly with bios
108 * dispatched from children. qnode_on_parent is used when bios are
109 * dispatched from this throtl_grp into its parent and will compete
110 * with the sibling qnode_on_parents and the parent's
113 struct throtl_qnode qnode_on_self
[2];
114 struct throtl_qnode qnode_on_parent
[2];
117 * Dispatch time in jiffies. This is the estimated time when group
118 * will unthrottle and is ready to dispatch more bio. It is used as
119 * key to sort active groups in service tree.
121 unsigned long disptime
;
125 /* are there any throtl rules between this group and td? */
128 /* internally used bytes per second rate limits */
129 uint64_t bps
[2][LIMIT_CNT
];
130 /* user configured bps limits */
131 uint64_t bps_conf
[2][LIMIT_CNT
];
133 /* internally used IOPS limits */
134 unsigned int iops
[2][LIMIT_CNT
];
135 /* user configured IOPS limits */
136 unsigned int iops_conf
[2][LIMIT_CNT
];
138 /* Number of bytes disptached in current slice */
139 uint64_t bytes_disp
[2];
140 /* Number of bio's dispatched in current slice */
141 unsigned int io_disp
[2];
143 /* When did we start a new slice */
144 unsigned long slice_start
[2];
145 unsigned long slice_end
[2];
150 /* service tree for active throtl groups */
151 struct throtl_service_queue service_queue
;
153 struct request_queue
*queue
;
155 /* Total Number of queued bios on READ and WRITE lists */
156 unsigned int nr_queued
[2];
158 /* Work for dispatching throttled bios */
159 struct work_struct dispatch_work
;
160 unsigned int limit_index
;
161 bool limit_valid
[LIMIT_CNT
];
164 static void throtl_pending_timer_fn(unsigned long arg
);
166 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
168 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
171 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
173 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
176 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
178 return pd_to_blkg(&tg
->pd
);
182 * sq_to_tg - return the throl_grp the specified service queue belongs to
183 * @sq: the throtl_service_queue of interest
185 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
186 * embedded in throtl_data, %NULL is returned.
188 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
190 if (sq
&& sq
->parent_sq
)
191 return container_of(sq
, struct throtl_grp
, service_queue
);
197 * sq_to_td - return throtl_data the specified service queue belongs to
198 * @sq: the throtl_service_queue of interest
200 * A service_queue can be embedded in either a throtl_grp or throtl_data.
201 * Determine the associated throtl_data accordingly and return it.
203 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
205 struct throtl_grp
*tg
= sq_to_tg(sq
);
210 return container_of(sq
, struct throtl_data
, service_queue
);
213 static uint64_t tg_bps_limit(struct throtl_grp
*tg
, int rw
)
215 return tg
->bps
[rw
][tg
->td
->limit_index
];
218 static unsigned int tg_iops_limit(struct throtl_grp
*tg
, int rw
)
220 return tg
->iops
[rw
][tg
->td
->limit_index
];
224 * throtl_log - log debug message via blktrace
225 * @sq: the service_queue being reported
226 * @fmt: printf format string
229 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
230 * throtl_grp; otherwise, just "throtl".
232 #define throtl_log(sq, fmt, args...) do { \
233 struct throtl_grp *__tg = sq_to_tg((sq)); \
234 struct throtl_data *__td = sq_to_td((sq)); \
237 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
242 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
243 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
245 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
249 static void throtl_qnode_init(struct throtl_qnode
*qn
, struct throtl_grp
*tg
)
251 INIT_LIST_HEAD(&qn
->node
);
252 bio_list_init(&qn
->bios
);
257 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
258 * @bio: bio being added
259 * @qn: qnode to add bio to
260 * @queued: the service_queue->queued[] list @qn belongs to
262 * Add @bio to @qn and put @qn on @queued if it's not already on.
263 * @qn->tg's reference count is bumped when @qn is activated. See the
264 * comment on top of throtl_qnode definition for details.
266 static void throtl_qnode_add_bio(struct bio
*bio
, struct throtl_qnode
*qn
,
267 struct list_head
*queued
)
269 bio_list_add(&qn
->bios
, bio
);
270 if (list_empty(&qn
->node
)) {
271 list_add_tail(&qn
->node
, queued
);
272 blkg_get(tg_to_blkg(qn
->tg
));
277 * throtl_peek_queued - peek the first bio on a qnode list
278 * @queued: the qnode list to peek
280 static struct bio
*throtl_peek_queued(struct list_head
*queued
)
282 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
285 if (list_empty(queued
))
288 bio
= bio_list_peek(&qn
->bios
);
294 * throtl_pop_queued - pop the first bio form a qnode list
295 * @queued: the qnode list to pop a bio from
296 * @tg_to_put: optional out argument for throtl_grp to put
298 * Pop the first bio from the qnode list @queued. After popping, the first
299 * qnode is removed from @queued if empty or moved to the end of @queued so
300 * that the popping order is round-robin.
302 * When the first qnode is removed, its associated throtl_grp should be put
303 * too. If @tg_to_put is NULL, this function automatically puts it;
304 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
305 * responsible for putting it.
307 static struct bio
*throtl_pop_queued(struct list_head
*queued
,
308 struct throtl_grp
**tg_to_put
)
310 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
313 if (list_empty(queued
))
316 bio
= bio_list_pop(&qn
->bios
);
319 if (bio_list_empty(&qn
->bios
)) {
320 list_del_init(&qn
->node
);
324 blkg_put(tg_to_blkg(qn
->tg
));
326 list_move_tail(&qn
->node
, queued
);
332 /* init a service_queue, assumes the caller zeroed it */
333 static void throtl_service_queue_init(struct throtl_service_queue
*sq
)
335 INIT_LIST_HEAD(&sq
->queued
[0]);
336 INIT_LIST_HEAD(&sq
->queued
[1]);
337 sq
->pending_tree
= RB_ROOT
;
338 setup_timer(&sq
->pending_timer
, throtl_pending_timer_fn
,
342 static struct blkg_policy_data
*throtl_pd_alloc(gfp_t gfp
, int node
)
344 struct throtl_grp
*tg
;
347 tg
= kzalloc_node(sizeof(*tg
), gfp
, node
);
351 throtl_service_queue_init(&tg
->service_queue
);
353 for (rw
= READ
; rw
<= WRITE
; rw
++) {
354 throtl_qnode_init(&tg
->qnode_on_self
[rw
], tg
);
355 throtl_qnode_init(&tg
->qnode_on_parent
[rw
], tg
);
358 RB_CLEAR_NODE(&tg
->rb_node
);
359 tg
->bps
[READ
][LIMIT_MAX
] = U64_MAX
;
360 tg
->bps
[WRITE
][LIMIT_MAX
] = U64_MAX
;
361 tg
->iops
[READ
][LIMIT_MAX
] = UINT_MAX
;
362 tg
->iops
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
363 tg
->bps_conf
[READ
][LIMIT_MAX
] = U64_MAX
;
364 tg
->bps_conf
[WRITE
][LIMIT_MAX
] = U64_MAX
;
365 tg
->iops_conf
[READ
][LIMIT_MAX
] = UINT_MAX
;
366 tg
->iops_conf
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
367 /* LIMIT_LOW will have default value 0 */
372 static void throtl_pd_init(struct blkg_policy_data
*pd
)
374 struct throtl_grp
*tg
= pd_to_tg(pd
);
375 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
376 struct throtl_data
*td
= blkg
->q
->td
;
377 struct throtl_service_queue
*sq
= &tg
->service_queue
;
380 * If on the default hierarchy, we switch to properly hierarchical
381 * behavior where limits on a given throtl_grp are applied to the
382 * whole subtree rather than just the group itself. e.g. If 16M
383 * read_bps limit is set on the root group, the whole system can't
384 * exceed 16M for the device.
386 * If not on the default hierarchy, the broken flat hierarchy
387 * behavior is retained where all throtl_grps are treated as if
388 * they're all separate root groups right below throtl_data.
389 * Limits of a group don't interact with limits of other groups
390 * regardless of the position of the group in the hierarchy.
392 sq
->parent_sq
= &td
->service_queue
;
393 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && blkg
->parent
)
394 sq
->parent_sq
= &blkg_to_tg(blkg
->parent
)->service_queue
;
399 * Set has_rules[] if @tg or any of its parents have limits configured.
400 * This doesn't require walking up to the top of the hierarchy as the
401 * parent's has_rules[] is guaranteed to be correct.
403 static void tg_update_has_rules(struct throtl_grp
*tg
)
405 struct throtl_grp
*parent_tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
406 struct throtl_data
*td
= tg
->td
;
409 for (rw
= READ
; rw
<= WRITE
; rw
++)
410 tg
->has_rules
[rw
] = (parent_tg
&& parent_tg
->has_rules
[rw
]) ||
411 (td
->limit_valid
[td
->limit_index
] &&
412 (tg_bps_limit(tg
, rw
) != U64_MAX
||
413 tg_iops_limit(tg
, rw
) != UINT_MAX
));
416 static void throtl_pd_online(struct blkg_policy_data
*pd
)
419 * We don't want new groups to escape the limits of its ancestors.
420 * Update has_rules[] after a new group is brought online.
422 tg_update_has_rules(pd_to_tg(pd
));
425 static void blk_throtl_update_limit_valid(struct throtl_data
*td
)
427 struct cgroup_subsys_state
*pos_css
;
428 struct blkcg_gq
*blkg
;
429 bool low_valid
= false;
432 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
433 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
435 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->bps
[WRITE
][LIMIT_LOW
] ||
436 tg
->iops
[READ
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
441 td
->limit_valid
[LIMIT_LOW
] = low_valid
;
444 static void throtl_pd_offline(struct blkg_policy_data
*pd
)
446 struct throtl_grp
*tg
= pd_to_tg(pd
);
448 tg
->bps
[READ
][LIMIT_LOW
] = 0;
449 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
450 tg
->iops
[READ
][LIMIT_LOW
] = 0;
451 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
453 blk_throtl_update_limit_valid(tg
->td
);
455 if (tg
->td
->limit_index
== LIMIT_LOW
&&
456 !tg
->td
->limit_valid
[LIMIT_LOW
])
457 tg
->td
->limit_index
= LIMIT_MAX
;
460 static void throtl_pd_free(struct blkg_policy_data
*pd
)
462 struct throtl_grp
*tg
= pd_to_tg(pd
);
464 del_timer_sync(&tg
->service_queue
.pending_timer
);
468 static struct throtl_grp
*
469 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
471 /* Service tree is empty */
472 if (!parent_sq
->nr_pending
)
475 if (!parent_sq
->first_pending
)
476 parent_sq
->first_pending
= rb_first(&parent_sq
->pending_tree
);
478 if (parent_sq
->first_pending
)
479 return rb_entry_tg(parent_sq
->first_pending
);
484 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
490 static void throtl_rb_erase(struct rb_node
*n
,
491 struct throtl_service_queue
*parent_sq
)
493 if (parent_sq
->first_pending
== n
)
494 parent_sq
->first_pending
= NULL
;
495 rb_erase_init(n
, &parent_sq
->pending_tree
);
496 --parent_sq
->nr_pending
;
499 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
501 struct throtl_grp
*tg
;
503 tg
= throtl_rb_first(parent_sq
);
507 parent_sq
->first_pending_disptime
= tg
->disptime
;
510 static void tg_service_queue_add(struct throtl_grp
*tg
)
512 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
513 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_node
;
514 struct rb_node
*parent
= NULL
;
515 struct throtl_grp
*__tg
;
516 unsigned long key
= tg
->disptime
;
519 while (*node
!= NULL
) {
521 __tg
= rb_entry_tg(parent
);
523 if (time_before(key
, __tg
->disptime
))
524 node
= &parent
->rb_left
;
526 node
= &parent
->rb_right
;
532 parent_sq
->first_pending
= &tg
->rb_node
;
534 rb_link_node(&tg
->rb_node
, parent
, node
);
535 rb_insert_color(&tg
->rb_node
, &parent_sq
->pending_tree
);
538 static void __throtl_enqueue_tg(struct throtl_grp
*tg
)
540 tg_service_queue_add(tg
);
541 tg
->flags
|= THROTL_TG_PENDING
;
542 tg
->service_queue
.parent_sq
->nr_pending
++;
545 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
547 if (!(tg
->flags
& THROTL_TG_PENDING
))
548 __throtl_enqueue_tg(tg
);
551 static void __throtl_dequeue_tg(struct throtl_grp
*tg
)
553 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
554 tg
->flags
&= ~THROTL_TG_PENDING
;
557 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
559 if (tg
->flags
& THROTL_TG_PENDING
)
560 __throtl_dequeue_tg(tg
);
563 /* Call with queue lock held */
564 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
565 unsigned long expires
)
567 mod_timer(&sq
->pending_timer
, expires
);
568 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
569 expires
- jiffies
, jiffies
);
573 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
574 * @sq: the service_queue to schedule dispatch for
575 * @force: force scheduling
577 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
578 * dispatch time of the first pending child. Returns %true if either timer
579 * is armed or there's no pending child left. %false if the current
580 * dispatch window is still open and the caller should continue
583 * If @force is %true, the dispatch timer is always scheduled and this
584 * function is guaranteed to return %true. This is to be used when the
585 * caller can't dispatch itself and needs to invoke pending_timer
586 * unconditionally. Note that forced scheduling is likely to induce short
587 * delay before dispatch starts even if @sq->first_pending_disptime is not
588 * in the future and thus shouldn't be used in hot paths.
590 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
593 /* any pending children left? */
597 update_min_dispatch_time(sq
);
599 /* is the next dispatch time in the future? */
600 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
601 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
605 /* tell the caller to continue dispatching */
609 static inline void throtl_start_new_slice_with_credit(struct throtl_grp
*tg
,
610 bool rw
, unsigned long start
)
612 tg
->bytes_disp
[rw
] = 0;
616 * Previous slice has expired. We must have trimmed it after last
617 * bio dispatch. That means since start of last slice, we never used
618 * that bandwidth. Do try to make use of that bandwidth while giving
621 if (time_after_eq(start
, tg
->slice_start
[rw
]))
622 tg
->slice_start
[rw
] = start
;
624 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
625 throtl_log(&tg
->service_queue
,
626 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
627 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
628 tg
->slice_end
[rw
], jiffies
);
631 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
633 tg
->bytes_disp
[rw
] = 0;
635 tg
->slice_start
[rw
] = jiffies
;
636 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
637 throtl_log(&tg
->service_queue
,
638 "[%c] new slice start=%lu end=%lu jiffies=%lu",
639 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
640 tg
->slice_end
[rw
], jiffies
);
643 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
644 unsigned long jiffy_end
)
646 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
649 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
650 unsigned long jiffy_end
)
652 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
653 throtl_log(&tg
->service_queue
,
654 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
655 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
656 tg
->slice_end
[rw
], jiffies
);
659 /* Determine if previously allocated or extended slice is complete or not */
660 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
662 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
668 /* Trim the used slices and adjust slice start accordingly */
669 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
671 unsigned long nr_slices
, time_elapsed
, io_trim
;
674 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
677 * If bps are unlimited (-1), then time slice don't get
678 * renewed. Don't try to trim the slice if slice is used. A new
679 * slice will start when appropriate.
681 if (throtl_slice_used(tg
, rw
))
685 * A bio has been dispatched. Also adjust slice_end. It might happen
686 * that initially cgroup limit was very low resulting in high
687 * slice_end, but later limit was bumped up and bio was dispached
688 * sooner, then we need to reduce slice_end. A high bogus slice_end
689 * is bad because it does not allow new slice to start.
692 throtl_set_slice_end(tg
, rw
, jiffies
+ throtl_slice
);
694 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
696 nr_slices
= time_elapsed
/ throtl_slice
;
700 tmp
= tg_bps_limit(tg
, rw
) * throtl_slice
* nr_slices
;
704 io_trim
= (tg_iops_limit(tg
, rw
) * throtl_slice
* nr_slices
) / HZ
;
706 if (!bytes_trim
&& !io_trim
)
709 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
710 tg
->bytes_disp
[rw
] -= bytes_trim
;
712 tg
->bytes_disp
[rw
] = 0;
714 if (tg
->io_disp
[rw
] >= io_trim
)
715 tg
->io_disp
[rw
] -= io_trim
;
719 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
721 throtl_log(&tg
->service_queue
,
722 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
723 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
724 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
727 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
730 bool rw
= bio_data_dir(bio
);
731 unsigned int io_allowed
;
732 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
735 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
737 /* Slice has just started. Consider one slice interval */
739 jiffy_elapsed_rnd
= throtl_slice
;
741 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
744 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
745 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
746 * will allow dispatch after 1 second and after that slice should
750 tmp
= (u64
)tg_iops_limit(tg
, rw
) * jiffy_elapsed_rnd
;
754 io_allowed
= UINT_MAX
;
758 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
764 /* Calc approx time to dispatch */
765 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
) / tg_iops_limit(tg
, rw
) + 1;
767 if (jiffy_wait
> jiffy_elapsed
)
768 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
777 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
780 bool rw
= bio_data_dir(bio
);
781 u64 bytes_allowed
, extra_bytes
, tmp
;
782 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
784 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
786 /* Slice has just started. Consider one slice interval */
788 jiffy_elapsed_rnd
= throtl_slice
;
790 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
792 tmp
= tg_bps_limit(tg
, rw
) * jiffy_elapsed_rnd
;
796 if (tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
<= bytes_allowed
) {
802 /* Calc approx time to dispatch */
803 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
- bytes_allowed
;
804 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg_bps_limit(tg
, rw
));
810 * This wait time is without taking into consideration the rounding
811 * up we did. Add that time also.
813 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
820 * Returns whether one can dispatch a bio or not. Also returns approx number
821 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
823 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
826 bool rw
= bio_data_dir(bio
);
827 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
830 * Currently whole state machine of group depends on first bio
831 * queued in the group bio list. So one should not be calling
832 * this function with a different bio if there are other bios
835 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
836 bio
!= throtl_peek_queued(&tg
->service_queue
.queued
[rw
]));
838 /* If tg->bps = -1, then BW is unlimited */
839 if (tg_bps_limit(tg
, rw
) == U64_MAX
&&
840 tg_iops_limit(tg
, rw
) == UINT_MAX
) {
847 * If previous slice expired, start a new one otherwise renew/extend
848 * existing slice to make sure it is at least throtl_slice interval
849 * long since now. New slice is started only for empty throttle group.
850 * If there is queued bio, that means there should be an active
851 * slice and it should be extended instead.
853 if (throtl_slice_used(tg
, rw
) && !(tg
->service_queue
.nr_queued
[rw
]))
854 throtl_start_new_slice(tg
, rw
);
856 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
857 throtl_extend_slice(tg
, rw
, jiffies
+ throtl_slice
);
860 if (tg_with_in_bps_limit(tg
, bio
, &bps_wait
) &&
861 tg_with_in_iops_limit(tg
, bio
, &iops_wait
)) {
867 max_wait
= max(bps_wait
, iops_wait
);
872 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
873 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
878 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
880 bool rw
= bio_data_dir(bio
);
882 /* Charge the bio to the group */
883 tg
->bytes_disp
[rw
] += bio
->bi_iter
.bi_size
;
887 * BIO_THROTTLED is used to prevent the same bio to be throttled
888 * more than once as a throttled bio will go through blk-throtl the
889 * second time when it eventually gets issued. Set it when a bio
890 * is being charged to a tg.
892 if (!bio_flagged(bio
, BIO_THROTTLED
))
893 bio_set_flag(bio
, BIO_THROTTLED
);
897 * throtl_add_bio_tg - add a bio to the specified throtl_grp
900 * @tg: the target throtl_grp
902 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
903 * tg->qnode_on_self[] is used.
905 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_qnode
*qn
,
906 struct throtl_grp
*tg
)
908 struct throtl_service_queue
*sq
= &tg
->service_queue
;
909 bool rw
= bio_data_dir(bio
);
912 qn
= &tg
->qnode_on_self
[rw
];
915 * If @tg doesn't currently have any bios queued in the same
916 * direction, queueing @bio can change when @tg should be
917 * dispatched. Mark that @tg was empty. This is automatically
918 * cleaered on the next tg_update_disptime().
920 if (!sq
->nr_queued
[rw
])
921 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
923 throtl_qnode_add_bio(bio
, qn
, &sq
->queued
[rw
]);
926 throtl_enqueue_tg(tg
);
929 static void tg_update_disptime(struct throtl_grp
*tg
)
931 struct throtl_service_queue
*sq
= &tg
->service_queue
;
932 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
935 bio
= throtl_peek_queued(&sq
->queued
[READ
]);
937 tg_may_dispatch(tg
, bio
, &read_wait
);
939 bio
= throtl_peek_queued(&sq
->queued
[WRITE
]);
941 tg_may_dispatch(tg
, bio
, &write_wait
);
943 min_wait
= min(read_wait
, write_wait
);
944 disptime
= jiffies
+ min_wait
;
946 /* Update dispatch time */
947 throtl_dequeue_tg(tg
);
948 tg
->disptime
= disptime
;
949 throtl_enqueue_tg(tg
);
951 /* see throtl_add_bio_tg() */
952 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
955 static void start_parent_slice_with_credit(struct throtl_grp
*child_tg
,
956 struct throtl_grp
*parent_tg
, bool rw
)
958 if (throtl_slice_used(parent_tg
, rw
)) {
959 throtl_start_new_slice_with_credit(parent_tg
, rw
,
960 child_tg
->slice_start
[rw
]);
965 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
967 struct throtl_service_queue
*sq
= &tg
->service_queue
;
968 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
969 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
970 struct throtl_grp
*tg_to_put
= NULL
;
974 * @bio is being transferred from @tg to @parent_sq. Popping a bio
975 * from @tg may put its reference and @parent_sq might end up
976 * getting released prematurely. Remember the tg to put and put it
977 * after @bio is transferred to @parent_sq.
979 bio
= throtl_pop_queued(&sq
->queued
[rw
], &tg_to_put
);
982 throtl_charge_bio(tg
, bio
);
985 * If our parent is another tg, we just need to transfer @bio to
986 * the parent using throtl_add_bio_tg(). If our parent is
987 * @td->service_queue, @bio is ready to be issued. Put it on its
988 * bio_lists[] and decrease total number queued. The caller is
989 * responsible for issuing these bios.
992 throtl_add_bio_tg(bio
, &tg
->qnode_on_parent
[rw
], parent_tg
);
993 start_parent_slice_with_credit(tg
, parent_tg
, rw
);
995 throtl_qnode_add_bio(bio
, &tg
->qnode_on_parent
[rw
],
996 &parent_sq
->queued
[rw
]);
997 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
998 tg
->td
->nr_queued
[rw
]--;
1001 throtl_trim_slice(tg
, rw
);
1004 blkg_put(tg_to_blkg(tg_to_put
));
1007 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
1009 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1010 unsigned int nr_reads
= 0, nr_writes
= 0;
1011 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
1012 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
1015 /* Try to dispatch 75% READS and 25% WRITES */
1017 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])) &&
1018 tg_may_dispatch(tg
, bio
, NULL
)) {
1020 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1023 if (nr_reads
>= max_nr_reads
)
1027 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])) &&
1028 tg_may_dispatch(tg
, bio
, NULL
)) {
1030 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1033 if (nr_writes
>= max_nr_writes
)
1037 return nr_reads
+ nr_writes
;
1040 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
1042 unsigned int nr_disp
= 0;
1045 struct throtl_grp
*tg
= throtl_rb_first(parent_sq
);
1046 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1051 if (time_before(jiffies
, tg
->disptime
))
1054 throtl_dequeue_tg(tg
);
1056 nr_disp
+= throtl_dispatch_tg(tg
);
1058 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
1059 tg_update_disptime(tg
);
1061 if (nr_disp
>= throtl_quantum
)
1069 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1070 * @arg: the throtl_service_queue being serviced
1072 * This timer is armed when a child throtl_grp with active bio's become
1073 * pending and queued on the service_queue's pending_tree and expires when
1074 * the first child throtl_grp should be dispatched. This function
1075 * dispatches bio's from the children throtl_grps to the parent
1078 * If the parent's parent is another throtl_grp, dispatching is propagated
1079 * by either arming its pending_timer or repeating dispatch directly. If
1080 * the top-level service_tree is reached, throtl_data->dispatch_work is
1081 * kicked so that the ready bio's are issued.
1083 static void throtl_pending_timer_fn(unsigned long arg
)
1085 struct throtl_service_queue
*sq
= (void *)arg
;
1086 struct throtl_grp
*tg
= sq_to_tg(sq
);
1087 struct throtl_data
*td
= sq_to_td(sq
);
1088 struct request_queue
*q
= td
->queue
;
1089 struct throtl_service_queue
*parent_sq
;
1093 spin_lock_irq(q
->queue_lock
);
1095 parent_sq
= sq
->parent_sq
;
1099 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
1100 sq
->nr_queued
[READ
] + sq
->nr_queued
[WRITE
],
1101 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1103 ret
= throtl_select_dispatch(sq
);
1105 throtl_log(sq
, "bios disp=%u", ret
);
1109 if (throtl_schedule_next_dispatch(sq
, false))
1112 /* this dispatch windows is still open, relax and repeat */
1113 spin_unlock_irq(q
->queue_lock
);
1115 spin_lock_irq(q
->queue_lock
);
1122 /* @parent_sq is another throl_grp, propagate dispatch */
1123 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1124 tg_update_disptime(tg
);
1125 if (!throtl_schedule_next_dispatch(parent_sq
, false)) {
1126 /* window is already open, repeat dispatching */
1133 /* reached the top-level, queue issueing */
1134 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1137 spin_unlock_irq(q
->queue_lock
);
1141 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1142 * @work: work item being executed
1144 * This function is queued for execution when bio's reach the bio_lists[]
1145 * of throtl_data->service_queue. Those bio's are ready and issued by this
1148 static void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1150 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1152 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1153 struct request_queue
*q
= td
->queue
;
1154 struct bio_list bio_list_on_stack
;
1156 struct blk_plug plug
;
1159 bio_list_init(&bio_list_on_stack
);
1161 spin_lock_irq(q
->queue_lock
);
1162 for (rw
= READ
; rw
<= WRITE
; rw
++)
1163 while ((bio
= throtl_pop_queued(&td_sq
->queued
[rw
], NULL
)))
1164 bio_list_add(&bio_list_on_stack
, bio
);
1165 spin_unlock_irq(q
->queue_lock
);
1167 if (!bio_list_empty(&bio_list_on_stack
)) {
1168 blk_start_plug(&plug
);
1169 while((bio
= bio_list_pop(&bio_list_on_stack
)))
1170 generic_make_request(bio
);
1171 blk_finish_plug(&plug
);
1175 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1178 struct throtl_grp
*tg
= pd_to_tg(pd
);
1179 u64 v
= *(u64
*)((void *)tg
+ off
);
1183 return __blkg_prfill_u64(sf
, pd
, v
);
1186 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1189 struct throtl_grp
*tg
= pd_to_tg(pd
);
1190 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1194 return __blkg_prfill_u64(sf
, pd
, v
);
1197 static int tg_print_conf_u64(struct seq_file
*sf
, void *v
)
1199 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_u64
,
1200 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1204 static int tg_print_conf_uint(struct seq_file
*sf
, void *v
)
1206 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_uint
,
1207 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1211 static void tg_conf_updated(struct throtl_grp
*tg
)
1213 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1214 struct cgroup_subsys_state
*pos_css
;
1215 struct blkcg_gq
*blkg
;
1217 throtl_log(&tg
->service_queue
,
1218 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1219 tg_bps_limit(tg
, READ
), tg_bps_limit(tg
, WRITE
),
1220 tg_iops_limit(tg
, READ
), tg_iops_limit(tg
, WRITE
));
1223 * Update has_rules[] flags for the updated tg's subtree. A tg is
1224 * considered to have rules if either the tg itself or any of its
1225 * ancestors has rules. This identifies groups without any
1226 * restrictions in the whole hierarchy and allows them to bypass
1229 blkg_for_each_descendant_pre(blkg
, pos_css
, tg_to_blkg(tg
))
1230 tg_update_has_rules(blkg_to_tg(blkg
));
1233 * We're already holding queue_lock and know @tg is valid. Let's
1234 * apply the new config directly.
1236 * Restart the slices for both READ and WRITES. It might happen
1237 * that a group's limit are dropped suddenly and we don't want to
1238 * account recently dispatched IO with new low rate.
1240 throtl_start_new_slice(tg
, 0);
1241 throtl_start_new_slice(tg
, 1);
1243 if (tg
->flags
& THROTL_TG_PENDING
) {
1244 tg_update_disptime(tg
);
1245 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1249 static ssize_t
tg_set_conf(struct kernfs_open_file
*of
,
1250 char *buf
, size_t nbytes
, loff_t off
, bool is_u64
)
1252 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1253 struct blkg_conf_ctx ctx
;
1254 struct throtl_grp
*tg
;
1258 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1263 if (sscanf(ctx
.body
, "%llu", &v
) != 1)
1268 tg
= blkg_to_tg(ctx
.blkg
);
1271 *(u64
*)((void *)tg
+ of_cft(of
)->private) = v
;
1273 *(unsigned int *)((void *)tg
+ of_cft(of
)->private) = v
;
1275 tg_conf_updated(tg
);
1278 blkg_conf_finish(&ctx
);
1279 return ret
?: nbytes
;
1282 static ssize_t
tg_set_conf_u64(struct kernfs_open_file
*of
,
1283 char *buf
, size_t nbytes
, loff_t off
)
1285 return tg_set_conf(of
, buf
, nbytes
, off
, true);
1288 static ssize_t
tg_set_conf_uint(struct kernfs_open_file
*of
,
1289 char *buf
, size_t nbytes
, loff_t off
)
1291 return tg_set_conf(of
, buf
, nbytes
, off
, false);
1294 static struct cftype throtl_legacy_files
[] = {
1296 .name
= "throttle.read_bps_device",
1297 .private = offsetof(struct throtl_grp
, bps
[READ
][LIMIT_MAX
]),
1298 .seq_show
= tg_print_conf_u64
,
1299 .write
= tg_set_conf_u64
,
1302 .name
= "throttle.write_bps_device",
1303 .private = offsetof(struct throtl_grp
, bps
[WRITE
][LIMIT_MAX
]),
1304 .seq_show
= tg_print_conf_u64
,
1305 .write
= tg_set_conf_u64
,
1308 .name
= "throttle.read_iops_device",
1309 .private = offsetof(struct throtl_grp
, iops
[READ
][LIMIT_MAX
]),
1310 .seq_show
= tg_print_conf_uint
,
1311 .write
= tg_set_conf_uint
,
1314 .name
= "throttle.write_iops_device",
1315 .private = offsetof(struct throtl_grp
, iops
[WRITE
][LIMIT_MAX
]),
1316 .seq_show
= tg_print_conf_uint
,
1317 .write
= tg_set_conf_uint
,
1320 .name
= "throttle.io_service_bytes",
1321 .private = (unsigned long)&blkcg_policy_throtl
,
1322 .seq_show
= blkg_print_stat_bytes
,
1325 .name
= "throttle.io_serviced",
1326 .private = (unsigned long)&blkcg_policy_throtl
,
1327 .seq_show
= blkg_print_stat_ios
,
1332 static u64
tg_prfill_limit(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1335 struct throtl_grp
*tg
= pd_to_tg(pd
);
1336 const char *dname
= blkg_dev_name(pd
->blkg
);
1337 char bufs
[4][21] = { "max", "max", "max", "max" };
1339 unsigned int iops_dft
;
1344 if (off
== LIMIT_LOW
) {
1349 iops_dft
= UINT_MAX
;
1352 if (tg
->bps_conf
[READ
][off
] == bps_dft
&&
1353 tg
->bps_conf
[WRITE
][off
] == bps_dft
&&
1354 tg
->iops_conf
[READ
][off
] == iops_dft
&&
1355 tg
->iops_conf
[WRITE
][off
] == iops_dft
)
1358 if (tg
->bps_conf
[READ
][off
] != bps_dft
)
1359 snprintf(bufs
[0], sizeof(bufs
[0]), "%llu",
1360 tg
->bps_conf
[READ
][off
]);
1361 if (tg
->bps_conf
[WRITE
][off
] != bps_dft
)
1362 snprintf(bufs
[1], sizeof(bufs
[1]), "%llu",
1363 tg
->bps_conf
[WRITE
][off
]);
1364 if (tg
->iops_conf
[READ
][off
] != iops_dft
)
1365 snprintf(bufs
[2], sizeof(bufs
[2]), "%u",
1366 tg
->iops_conf
[READ
][off
]);
1367 if (tg
->iops_conf
[WRITE
][off
] != iops_dft
)
1368 snprintf(bufs
[3], sizeof(bufs
[3]), "%u",
1369 tg
->iops_conf
[WRITE
][off
]);
1371 seq_printf(sf
, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
1372 dname
, bufs
[0], bufs
[1], bufs
[2], bufs
[3]);
1376 static int tg_print_limit(struct seq_file
*sf
, void *v
)
1378 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_limit
,
1379 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1383 static ssize_t
tg_set_limit(struct kernfs_open_file
*of
,
1384 char *buf
, size_t nbytes
, loff_t off
)
1386 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1387 struct blkg_conf_ctx ctx
;
1388 struct throtl_grp
*tg
;
1391 int index
= of_cft(of
)->private;
1393 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1397 tg
= blkg_to_tg(ctx
.blkg
);
1399 v
[0] = tg
->bps_conf
[READ
][index
];
1400 v
[1] = tg
->bps_conf
[WRITE
][index
];
1401 v
[2] = tg
->iops_conf
[READ
][index
];
1402 v
[3] = tg
->iops_conf
[WRITE
][index
];
1405 char tok
[27]; /* wiops=18446744073709551616 */
1410 if (sscanf(ctx
.body
, "%26s%n", tok
, &len
) != 1)
1419 if (!p
|| (sscanf(p
, "%llu", &val
) != 1 && strcmp(p
, "max")))
1427 if (!strcmp(tok
, "rbps"))
1429 else if (!strcmp(tok
, "wbps"))
1431 else if (!strcmp(tok
, "riops"))
1432 v
[2] = min_t(u64
, val
, UINT_MAX
);
1433 else if (!strcmp(tok
, "wiops"))
1434 v
[3] = min_t(u64
, val
, UINT_MAX
);
1439 tg
->bps_conf
[READ
][index
] = v
[0];
1440 tg
->bps_conf
[WRITE
][index
] = v
[1];
1441 tg
->iops_conf
[READ
][index
] = v
[2];
1442 tg
->iops_conf
[WRITE
][index
] = v
[3];
1444 if (index
== LIMIT_MAX
) {
1445 tg
->bps
[READ
][index
] = v
[0];
1446 tg
->bps
[WRITE
][index
] = v
[1];
1447 tg
->iops
[READ
][index
] = v
[2];
1448 tg
->iops
[WRITE
][index
] = v
[3];
1450 tg
->bps
[READ
][LIMIT_LOW
] = min(tg
->bps_conf
[READ
][LIMIT_LOW
],
1451 tg
->bps_conf
[READ
][LIMIT_MAX
]);
1452 tg
->bps
[WRITE
][LIMIT_LOW
] = min(tg
->bps_conf
[WRITE
][LIMIT_LOW
],
1453 tg
->bps_conf
[WRITE
][LIMIT_MAX
]);
1454 tg
->iops
[READ
][LIMIT_LOW
] = min(tg
->iops_conf
[READ
][LIMIT_LOW
],
1455 tg
->iops_conf
[READ
][LIMIT_MAX
]);
1456 tg
->iops
[WRITE
][LIMIT_LOW
] = min(tg
->iops_conf
[WRITE
][LIMIT_LOW
],
1457 tg
->iops_conf
[WRITE
][LIMIT_MAX
]);
1459 if (index
== LIMIT_LOW
) {
1460 blk_throtl_update_limit_valid(tg
->td
);
1461 if (tg
->td
->limit_valid
[LIMIT_LOW
])
1462 tg
->td
->limit_index
= LIMIT_LOW
;
1464 tg_conf_updated(tg
);
1467 blkg_conf_finish(&ctx
);
1468 return ret
?: nbytes
;
1471 static struct cftype throtl_files
[] = {
1472 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1475 .flags
= CFTYPE_NOT_ON_ROOT
,
1476 .seq_show
= tg_print_limit
,
1477 .write
= tg_set_limit
,
1478 .private = LIMIT_LOW
,
1483 .flags
= CFTYPE_NOT_ON_ROOT
,
1484 .seq_show
= tg_print_limit
,
1485 .write
= tg_set_limit
,
1486 .private = LIMIT_MAX
,
1491 static void throtl_shutdown_wq(struct request_queue
*q
)
1493 struct throtl_data
*td
= q
->td
;
1495 cancel_work_sync(&td
->dispatch_work
);
1498 static struct blkcg_policy blkcg_policy_throtl
= {
1499 .dfl_cftypes
= throtl_files
,
1500 .legacy_cftypes
= throtl_legacy_files
,
1502 .pd_alloc_fn
= throtl_pd_alloc
,
1503 .pd_init_fn
= throtl_pd_init
,
1504 .pd_online_fn
= throtl_pd_online
,
1505 .pd_offline_fn
= throtl_pd_offline
,
1506 .pd_free_fn
= throtl_pd_free
,
1509 bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
1512 struct throtl_qnode
*qn
= NULL
;
1513 struct throtl_grp
*tg
= blkg_to_tg(blkg
?: q
->root_blkg
);
1514 struct throtl_service_queue
*sq
;
1515 bool rw
= bio_data_dir(bio
);
1516 bool throttled
= false;
1518 WARN_ON_ONCE(!rcu_read_lock_held());
1520 /* see throtl_charge_bio() */
1521 if (bio_flagged(bio
, BIO_THROTTLED
) || !tg
->has_rules
[rw
])
1524 spin_lock_irq(q
->queue_lock
);
1526 if (unlikely(blk_queue_bypass(q
)))
1529 sq
= &tg
->service_queue
;
1532 /* throtl is FIFO - if bios are already queued, should queue */
1533 if (sq
->nr_queued
[rw
])
1536 /* if above limits, break to queue */
1537 if (!tg_may_dispatch(tg
, bio
, NULL
))
1540 /* within limits, let's charge and dispatch directly */
1541 throtl_charge_bio(tg
, bio
);
1544 * We need to trim slice even when bios are not being queued
1545 * otherwise it might happen that a bio is not queued for
1546 * a long time and slice keeps on extending and trim is not
1547 * called for a long time. Now if limits are reduced suddenly
1548 * we take into account all the IO dispatched so far at new
1549 * low rate and * newly queued IO gets a really long dispatch
1552 * So keep on trimming slice even if bio is not queued.
1554 throtl_trim_slice(tg
, rw
);
1557 * @bio passed through this layer without being throttled.
1558 * Climb up the ladder. If we''re already at the top, it
1559 * can be executed directly.
1561 qn
= &tg
->qnode_on_parent
[rw
];
1568 /* out-of-limit, queue to @tg */
1569 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1570 rw
== READ
? 'R' : 'W',
1571 tg
->bytes_disp
[rw
], bio
->bi_iter
.bi_size
,
1572 tg_bps_limit(tg
, rw
),
1573 tg
->io_disp
[rw
], tg_iops_limit(tg
, rw
),
1574 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1576 bio_associate_current(bio
);
1577 tg
->td
->nr_queued
[rw
]++;
1578 throtl_add_bio_tg(bio
, qn
, tg
);
1582 * Update @tg's dispatch time and force schedule dispatch if @tg
1583 * was empty before @bio. The forced scheduling isn't likely to
1584 * cause undue delay as @bio is likely to be dispatched directly if
1585 * its @tg's disptime is not in the future.
1587 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1588 tg_update_disptime(tg
);
1589 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
1593 spin_unlock_irq(q
->queue_lock
);
1596 * As multiple blk-throtls may stack in the same issue path, we
1597 * don't want bios to leave with the flag set. Clear the flag if
1601 bio_clear_flag(bio
, BIO_THROTTLED
);
1606 * Dispatch all bios from all children tg's queued on @parent_sq. On
1607 * return, @parent_sq is guaranteed to not have any active children tg's
1608 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1610 static void tg_drain_bios(struct throtl_service_queue
*parent_sq
)
1612 struct throtl_grp
*tg
;
1614 while ((tg
= throtl_rb_first(parent_sq
))) {
1615 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1618 throtl_dequeue_tg(tg
);
1620 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])))
1621 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1622 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])))
1623 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1628 * blk_throtl_drain - drain throttled bios
1629 * @q: request_queue to drain throttled bios for
1631 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1633 void blk_throtl_drain(struct request_queue
*q
)
1634 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1636 struct throtl_data
*td
= q
->td
;
1637 struct blkcg_gq
*blkg
;
1638 struct cgroup_subsys_state
*pos_css
;
1642 queue_lockdep_assert_held(q
);
1646 * Drain each tg while doing post-order walk on the blkg tree, so
1647 * that all bios are propagated to td->service_queue. It'd be
1648 * better to walk service_queue tree directly but blkg walk is
1651 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
)
1652 tg_drain_bios(&blkg_to_tg(blkg
)->service_queue
);
1654 /* finally, transfer bios from top-level tg's into the td */
1655 tg_drain_bios(&td
->service_queue
);
1658 spin_unlock_irq(q
->queue_lock
);
1660 /* all bios now should be in td->service_queue, issue them */
1661 for (rw
= READ
; rw
<= WRITE
; rw
++)
1662 while ((bio
= throtl_pop_queued(&td
->service_queue
.queued
[rw
],
1664 generic_make_request(bio
);
1666 spin_lock_irq(q
->queue_lock
);
1669 int blk_throtl_init(struct request_queue
*q
)
1671 struct throtl_data
*td
;
1674 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1678 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
1679 throtl_service_queue_init(&td
->service_queue
);
1684 td
->limit_valid
[LIMIT_MAX
] = true;
1685 td
->limit_index
= LIMIT_MAX
;
1686 /* activate policy */
1687 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
1693 void blk_throtl_exit(struct request_queue
*q
)
1696 throtl_shutdown_wq(q
);
1697 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
1701 static int __init
throtl_init(void)
1703 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1704 if (!kthrotld_workqueue
)
1705 panic("Failed to create kthrotld\n");
1707 return blkcg_policy_register(&blkcg_policy_throtl
);
1710 module_init(throtl_init
);