2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-cgroup.h>
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over a slice and after that slice is renewed */
22 #define DFL_THROTL_SLICE_HD (HZ / 10)
23 #define DFL_THROTL_SLICE_SSD (HZ / 50)
24 #define MAX_THROTL_SLICE (HZ)
26 static struct blkcg_policy blkcg_policy_throtl
;
28 /* A workqueue to queue throttle related work */
29 static struct workqueue_struct
*kthrotld_workqueue
;
32 * To implement hierarchical throttling, throtl_grps form a tree and bios
33 * are dispatched upwards level by level until they reach the top and get
34 * issued. When dispatching bios from the children and local group at each
35 * level, if the bios are dispatched into a single bio_list, there's a risk
36 * of a local or child group which can queue many bios at once filling up
37 * the list starving others.
39 * To avoid such starvation, dispatched bios are queued separately
40 * according to where they came from. When they are again dispatched to
41 * the parent, they're popped in round-robin order so that no single source
42 * hogs the dispatch window.
44 * throtl_qnode is used to keep the queued bios separated by their sources.
45 * Bios are queued to throtl_qnode which in turn is queued to
46 * throtl_service_queue and then dispatched in round-robin order.
48 * It's also used to track the reference counts on blkg's. A qnode always
49 * belongs to a throtl_grp and gets queued on itself or the parent, so
50 * incrementing the reference of the associated throtl_grp when a qnode is
51 * queued and decrementing when dequeued is enough to keep the whole blkg
52 * tree pinned while bios are in flight.
55 struct list_head node
; /* service_queue->queued[] */
56 struct bio_list bios
; /* queued bios */
57 struct throtl_grp
*tg
; /* tg this qnode belongs to */
60 struct throtl_service_queue
{
61 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
64 * Bios queued directly to this service_queue or dispatched from
65 * children throtl_grp's.
67 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
68 unsigned int nr_queued
[2]; /* number of queued bios */
71 * RB tree of active children throtl_grp's, which are sorted by
74 struct rb_root pending_tree
; /* RB tree of active tgs */
75 struct rb_node
*first_pending
; /* first node in the tree */
76 unsigned int nr_pending
; /* # queued in the tree */
77 unsigned long first_pending_disptime
; /* disptime of the first tg */
78 struct timer_list pending_timer
; /* fires on first_pending_disptime */
82 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
83 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
86 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
95 /* must be the first member */
96 struct blkg_policy_data pd
;
98 /* active throtl group service_queue member */
99 struct rb_node rb_node
;
101 /* throtl_data this group belongs to */
102 struct throtl_data
*td
;
104 /* this group's service queue */
105 struct throtl_service_queue service_queue
;
108 * qnode_on_self is used when bios are directly queued to this
109 * throtl_grp so that local bios compete fairly with bios
110 * dispatched from children. qnode_on_parent is used when bios are
111 * dispatched from this throtl_grp into its parent and will compete
112 * with the sibling qnode_on_parents and the parent's
115 struct throtl_qnode qnode_on_self
[2];
116 struct throtl_qnode qnode_on_parent
[2];
119 * Dispatch time in jiffies. This is the estimated time when group
120 * will unthrottle and is ready to dispatch more bio. It is used as
121 * key to sort active groups in service tree.
123 unsigned long disptime
;
127 /* are there any throtl rules between this group and td? */
130 /* internally used bytes per second rate limits */
131 uint64_t bps
[2][LIMIT_CNT
];
132 /* user configured bps limits */
133 uint64_t bps_conf
[2][LIMIT_CNT
];
135 /* internally used IOPS limits */
136 unsigned int iops
[2][LIMIT_CNT
];
137 /* user configured IOPS limits */
138 unsigned int iops_conf
[2][LIMIT_CNT
];
140 /* Number of bytes disptached in current slice */
141 uint64_t bytes_disp
[2];
142 /* Number of bio's dispatched in current slice */
143 unsigned int io_disp
[2];
145 unsigned long last_low_overflow_time
[2];
147 uint64_t last_bytes_disp
[2];
148 unsigned int last_io_disp
[2];
150 unsigned long last_check_time
;
152 unsigned long last_dispatch_time
[2];
154 /* When did we start a new slice */
155 unsigned long slice_start
[2];
156 unsigned long slice_end
[2];
161 /* service tree for active throtl groups */
162 struct throtl_service_queue service_queue
;
164 struct request_queue
*queue
;
166 /* Total Number of queued bios on READ and WRITE lists */
167 unsigned int nr_queued
[2];
169 unsigned int throtl_slice
;
171 /* Work for dispatching throttled bios */
172 struct work_struct dispatch_work
;
173 unsigned int limit_index
;
174 bool limit_valid
[LIMIT_CNT
];
176 unsigned long low_upgrade_time
;
177 unsigned long low_downgrade_time
;
182 static void throtl_pending_timer_fn(unsigned long arg
);
184 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
186 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
189 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
191 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
194 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
196 return pd_to_blkg(&tg
->pd
);
200 * sq_to_tg - return the throl_grp the specified service queue belongs to
201 * @sq: the throtl_service_queue of interest
203 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
204 * embedded in throtl_data, %NULL is returned.
206 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
208 if (sq
&& sq
->parent_sq
)
209 return container_of(sq
, struct throtl_grp
, service_queue
);
215 * sq_to_td - return throtl_data the specified service queue belongs to
216 * @sq: the throtl_service_queue of interest
218 * A service_queue can be embedded in either a throtl_grp or throtl_data.
219 * Determine the associated throtl_data accordingly and return it.
221 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
223 struct throtl_grp
*tg
= sq_to_tg(sq
);
228 return container_of(sq
, struct throtl_data
, service_queue
);
232 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
233 * make the IO dispatch more smooth.
234 * Scale up: linearly scale up according to lapsed time since upgrade. For
235 * every throtl_slice, the limit scales up 1/2 .low limit till the
236 * limit hits .max limit
237 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
239 static uint64_t throtl_adjusted_limit(uint64_t low
, struct throtl_data
*td
)
241 /* arbitrary value to avoid too big scale */
242 if (td
->scale
< 4096 && time_after_eq(jiffies
,
243 td
->low_upgrade_time
+ td
->scale
* td
->throtl_slice
))
244 td
->scale
= (jiffies
- td
->low_upgrade_time
) / td
->throtl_slice
;
246 return low
+ (low
>> 1) * td
->scale
;
249 static uint64_t tg_bps_limit(struct throtl_grp
*tg
, int rw
)
251 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
252 struct throtl_data
*td
;
255 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
259 ret
= tg
->bps
[rw
][td
->limit_index
];
260 if (ret
== 0 && td
->limit_index
== LIMIT_LOW
)
261 return tg
->bps
[rw
][LIMIT_MAX
];
263 if (td
->limit_index
== LIMIT_MAX
&& tg
->bps
[rw
][LIMIT_LOW
] &&
264 tg
->bps
[rw
][LIMIT_LOW
] != tg
->bps
[rw
][LIMIT_MAX
]) {
267 adjusted
= throtl_adjusted_limit(tg
->bps
[rw
][LIMIT_LOW
], td
);
268 ret
= min(tg
->bps
[rw
][LIMIT_MAX
], adjusted
);
273 static unsigned int tg_iops_limit(struct throtl_grp
*tg
, int rw
)
275 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
276 struct throtl_data
*td
;
279 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
282 ret
= tg
->iops
[rw
][td
->limit_index
];
283 if (ret
== 0 && tg
->td
->limit_index
== LIMIT_LOW
)
284 return tg
->iops
[rw
][LIMIT_MAX
];
286 if (td
->limit_index
== LIMIT_MAX
&& tg
->iops
[rw
][LIMIT_LOW
] &&
287 tg
->iops
[rw
][LIMIT_LOW
] != tg
->iops
[rw
][LIMIT_MAX
]) {
290 adjusted
= throtl_adjusted_limit(tg
->iops
[rw
][LIMIT_LOW
], td
);
291 if (adjusted
> UINT_MAX
)
293 ret
= min_t(unsigned int, tg
->iops
[rw
][LIMIT_MAX
], adjusted
);
299 * throtl_log - log debug message via blktrace
300 * @sq: the service_queue being reported
301 * @fmt: printf format string
304 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
305 * throtl_grp; otherwise, just "throtl".
307 #define throtl_log(sq, fmt, args...) do { \
308 struct throtl_grp *__tg = sq_to_tg((sq)); \
309 struct throtl_data *__td = sq_to_td((sq)); \
312 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
317 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
318 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
320 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
324 static void throtl_qnode_init(struct throtl_qnode
*qn
, struct throtl_grp
*tg
)
326 INIT_LIST_HEAD(&qn
->node
);
327 bio_list_init(&qn
->bios
);
332 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
333 * @bio: bio being added
334 * @qn: qnode to add bio to
335 * @queued: the service_queue->queued[] list @qn belongs to
337 * Add @bio to @qn and put @qn on @queued if it's not already on.
338 * @qn->tg's reference count is bumped when @qn is activated. See the
339 * comment on top of throtl_qnode definition for details.
341 static void throtl_qnode_add_bio(struct bio
*bio
, struct throtl_qnode
*qn
,
342 struct list_head
*queued
)
344 bio_list_add(&qn
->bios
, bio
);
345 if (list_empty(&qn
->node
)) {
346 list_add_tail(&qn
->node
, queued
);
347 blkg_get(tg_to_blkg(qn
->tg
));
352 * throtl_peek_queued - peek the first bio on a qnode list
353 * @queued: the qnode list to peek
355 static struct bio
*throtl_peek_queued(struct list_head
*queued
)
357 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
360 if (list_empty(queued
))
363 bio
= bio_list_peek(&qn
->bios
);
369 * throtl_pop_queued - pop the first bio form a qnode list
370 * @queued: the qnode list to pop a bio from
371 * @tg_to_put: optional out argument for throtl_grp to put
373 * Pop the first bio from the qnode list @queued. After popping, the first
374 * qnode is removed from @queued if empty or moved to the end of @queued so
375 * that the popping order is round-robin.
377 * When the first qnode is removed, its associated throtl_grp should be put
378 * too. If @tg_to_put is NULL, this function automatically puts it;
379 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
380 * responsible for putting it.
382 static struct bio
*throtl_pop_queued(struct list_head
*queued
,
383 struct throtl_grp
**tg_to_put
)
385 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
388 if (list_empty(queued
))
391 bio
= bio_list_pop(&qn
->bios
);
394 if (bio_list_empty(&qn
->bios
)) {
395 list_del_init(&qn
->node
);
399 blkg_put(tg_to_blkg(qn
->tg
));
401 list_move_tail(&qn
->node
, queued
);
407 /* init a service_queue, assumes the caller zeroed it */
408 static void throtl_service_queue_init(struct throtl_service_queue
*sq
)
410 INIT_LIST_HEAD(&sq
->queued
[0]);
411 INIT_LIST_HEAD(&sq
->queued
[1]);
412 sq
->pending_tree
= RB_ROOT
;
413 setup_timer(&sq
->pending_timer
, throtl_pending_timer_fn
,
417 static struct blkg_policy_data
*throtl_pd_alloc(gfp_t gfp
, int node
)
419 struct throtl_grp
*tg
;
422 tg
= kzalloc_node(sizeof(*tg
), gfp
, node
);
426 throtl_service_queue_init(&tg
->service_queue
);
428 for (rw
= READ
; rw
<= WRITE
; rw
++) {
429 throtl_qnode_init(&tg
->qnode_on_self
[rw
], tg
);
430 throtl_qnode_init(&tg
->qnode_on_parent
[rw
], tg
);
433 RB_CLEAR_NODE(&tg
->rb_node
);
434 tg
->bps
[READ
][LIMIT_MAX
] = U64_MAX
;
435 tg
->bps
[WRITE
][LIMIT_MAX
] = U64_MAX
;
436 tg
->iops
[READ
][LIMIT_MAX
] = UINT_MAX
;
437 tg
->iops
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
438 tg
->bps_conf
[READ
][LIMIT_MAX
] = U64_MAX
;
439 tg
->bps_conf
[WRITE
][LIMIT_MAX
] = U64_MAX
;
440 tg
->iops_conf
[READ
][LIMIT_MAX
] = UINT_MAX
;
441 tg
->iops_conf
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
442 /* LIMIT_LOW will have default value 0 */
447 static void throtl_pd_init(struct blkg_policy_data
*pd
)
449 struct throtl_grp
*tg
= pd_to_tg(pd
);
450 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
451 struct throtl_data
*td
= blkg
->q
->td
;
452 struct throtl_service_queue
*sq
= &tg
->service_queue
;
455 * If on the default hierarchy, we switch to properly hierarchical
456 * behavior where limits on a given throtl_grp are applied to the
457 * whole subtree rather than just the group itself. e.g. If 16M
458 * read_bps limit is set on the root group, the whole system can't
459 * exceed 16M for the device.
461 * If not on the default hierarchy, the broken flat hierarchy
462 * behavior is retained where all throtl_grps are treated as if
463 * they're all separate root groups right below throtl_data.
464 * Limits of a group don't interact with limits of other groups
465 * regardless of the position of the group in the hierarchy.
467 sq
->parent_sq
= &td
->service_queue
;
468 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && blkg
->parent
)
469 sq
->parent_sq
= &blkg_to_tg(blkg
->parent
)->service_queue
;
474 * Set has_rules[] if @tg or any of its parents have limits configured.
475 * This doesn't require walking up to the top of the hierarchy as the
476 * parent's has_rules[] is guaranteed to be correct.
478 static void tg_update_has_rules(struct throtl_grp
*tg
)
480 struct throtl_grp
*parent_tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
481 struct throtl_data
*td
= tg
->td
;
484 for (rw
= READ
; rw
<= WRITE
; rw
++)
485 tg
->has_rules
[rw
] = (parent_tg
&& parent_tg
->has_rules
[rw
]) ||
486 (td
->limit_valid
[td
->limit_index
] &&
487 (tg_bps_limit(tg
, rw
) != U64_MAX
||
488 tg_iops_limit(tg
, rw
) != UINT_MAX
));
491 static void throtl_pd_online(struct blkg_policy_data
*pd
)
493 struct throtl_grp
*tg
= pd_to_tg(pd
);
495 * We don't want new groups to escape the limits of its ancestors.
496 * Update has_rules[] after a new group is brought online.
498 tg_update_has_rules(tg
);
499 tg
->last_dispatch_time
[READ
] = jiffies
;
500 tg
->last_dispatch_time
[WRITE
] = jiffies
;
503 static void blk_throtl_update_limit_valid(struct throtl_data
*td
)
505 struct cgroup_subsys_state
*pos_css
;
506 struct blkcg_gq
*blkg
;
507 bool low_valid
= false;
510 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
511 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
513 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->bps
[WRITE
][LIMIT_LOW
] ||
514 tg
->iops
[READ
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
519 td
->limit_valid
[LIMIT_LOW
] = low_valid
;
522 static void throtl_upgrade_state(struct throtl_data
*td
);
523 static void throtl_pd_offline(struct blkg_policy_data
*pd
)
525 struct throtl_grp
*tg
= pd_to_tg(pd
);
527 tg
->bps
[READ
][LIMIT_LOW
] = 0;
528 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
529 tg
->iops
[READ
][LIMIT_LOW
] = 0;
530 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
532 blk_throtl_update_limit_valid(tg
->td
);
534 if (!tg
->td
->limit_valid
[tg
->td
->limit_index
])
535 throtl_upgrade_state(tg
->td
);
538 static void throtl_pd_free(struct blkg_policy_data
*pd
)
540 struct throtl_grp
*tg
= pd_to_tg(pd
);
542 del_timer_sync(&tg
->service_queue
.pending_timer
);
546 static struct throtl_grp
*
547 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
549 /* Service tree is empty */
550 if (!parent_sq
->nr_pending
)
553 if (!parent_sq
->first_pending
)
554 parent_sq
->first_pending
= rb_first(&parent_sq
->pending_tree
);
556 if (parent_sq
->first_pending
)
557 return rb_entry_tg(parent_sq
->first_pending
);
562 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
568 static void throtl_rb_erase(struct rb_node
*n
,
569 struct throtl_service_queue
*parent_sq
)
571 if (parent_sq
->first_pending
== n
)
572 parent_sq
->first_pending
= NULL
;
573 rb_erase_init(n
, &parent_sq
->pending_tree
);
574 --parent_sq
->nr_pending
;
577 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
579 struct throtl_grp
*tg
;
581 tg
= throtl_rb_first(parent_sq
);
585 parent_sq
->first_pending_disptime
= tg
->disptime
;
588 static void tg_service_queue_add(struct throtl_grp
*tg
)
590 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
591 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_node
;
592 struct rb_node
*parent
= NULL
;
593 struct throtl_grp
*__tg
;
594 unsigned long key
= tg
->disptime
;
597 while (*node
!= NULL
) {
599 __tg
= rb_entry_tg(parent
);
601 if (time_before(key
, __tg
->disptime
))
602 node
= &parent
->rb_left
;
604 node
= &parent
->rb_right
;
610 parent_sq
->first_pending
= &tg
->rb_node
;
612 rb_link_node(&tg
->rb_node
, parent
, node
);
613 rb_insert_color(&tg
->rb_node
, &parent_sq
->pending_tree
);
616 static void __throtl_enqueue_tg(struct throtl_grp
*tg
)
618 tg_service_queue_add(tg
);
619 tg
->flags
|= THROTL_TG_PENDING
;
620 tg
->service_queue
.parent_sq
->nr_pending
++;
623 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
625 if (!(tg
->flags
& THROTL_TG_PENDING
))
626 __throtl_enqueue_tg(tg
);
629 static void __throtl_dequeue_tg(struct throtl_grp
*tg
)
631 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
632 tg
->flags
&= ~THROTL_TG_PENDING
;
635 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
637 if (tg
->flags
& THROTL_TG_PENDING
)
638 __throtl_dequeue_tg(tg
);
641 /* Call with queue lock held */
642 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
643 unsigned long expires
)
645 unsigned long max_expire
= jiffies
+ 8 * sq_to_tg(sq
)->td
->throtl_slice
;
648 * Since we are adjusting the throttle limit dynamically, the sleep
649 * time calculated according to previous limit might be invalid. It's
650 * possible the cgroup sleep time is very long and no other cgroups
651 * have IO running so notify the limit changes. Make sure the cgroup
652 * doesn't sleep too long to avoid the missed notification.
654 if (time_after(expires
, max_expire
))
655 expires
= max_expire
;
656 mod_timer(&sq
->pending_timer
, expires
);
657 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
658 expires
- jiffies
, jiffies
);
662 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
663 * @sq: the service_queue to schedule dispatch for
664 * @force: force scheduling
666 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
667 * dispatch time of the first pending child. Returns %true if either timer
668 * is armed or there's no pending child left. %false if the current
669 * dispatch window is still open and the caller should continue
672 * If @force is %true, the dispatch timer is always scheduled and this
673 * function is guaranteed to return %true. This is to be used when the
674 * caller can't dispatch itself and needs to invoke pending_timer
675 * unconditionally. Note that forced scheduling is likely to induce short
676 * delay before dispatch starts even if @sq->first_pending_disptime is not
677 * in the future and thus shouldn't be used in hot paths.
679 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
682 /* any pending children left? */
686 update_min_dispatch_time(sq
);
688 /* is the next dispatch time in the future? */
689 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
690 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
694 /* tell the caller to continue dispatching */
698 static inline void throtl_start_new_slice_with_credit(struct throtl_grp
*tg
,
699 bool rw
, unsigned long start
)
701 tg
->bytes_disp
[rw
] = 0;
705 * Previous slice has expired. We must have trimmed it after last
706 * bio dispatch. That means since start of last slice, we never used
707 * that bandwidth. Do try to make use of that bandwidth while giving
710 if (time_after_eq(start
, tg
->slice_start
[rw
]))
711 tg
->slice_start
[rw
] = start
;
713 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
714 throtl_log(&tg
->service_queue
,
715 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
716 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
717 tg
->slice_end
[rw
], jiffies
);
720 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
722 tg
->bytes_disp
[rw
] = 0;
724 tg
->slice_start
[rw
] = jiffies
;
725 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
726 throtl_log(&tg
->service_queue
,
727 "[%c] new slice start=%lu end=%lu jiffies=%lu",
728 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
729 tg
->slice_end
[rw
], jiffies
);
732 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
733 unsigned long jiffy_end
)
735 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
738 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
739 unsigned long jiffy_end
)
741 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
742 throtl_log(&tg
->service_queue
,
743 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
744 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
745 tg
->slice_end
[rw
], jiffies
);
748 /* Determine if previously allocated or extended slice is complete or not */
749 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
751 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
757 /* Trim the used slices and adjust slice start accordingly */
758 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
760 unsigned long nr_slices
, time_elapsed
, io_trim
;
763 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
766 * If bps are unlimited (-1), then time slice don't get
767 * renewed. Don't try to trim the slice if slice is used. A new
768 * slice will start when appropriate.
770 if (throtl_slice_used(tg
, rw
))
774 * A bio has been dispatched. Also adjust slice_end. It might happen
775 * that initially cgroup limit was very low resulting in high
776 * slice_end, but later limit was bumped up and bio was dispached
777 * sooner, then we need to reduce slice_end. A high bogus slice_end
778 * is bad because it does not allow new slice to start.
781 throtl_set_slice_end(tg
, rw
, jiffies
+ tg
->td
->throtl_slice
);
783 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
785 nr_slices
= time_elapsed
/ tg
->td
->throtl_slice
;
789 tmp
= tg_bps_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
;
793 io_trim
= (tg_iops_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
) /
796 if (!bytes_trim
&& !io_trim
)
799 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
800 tg
->bytes_disp
[rw
] -= bytes_trim
;
802 tg
->bytes_disp
[rw
] = 0;
804 if (tg
->io_disp
[rw
] >= io_trim
)
805 tg
->io_disp
[rw
] -= io_trim
;
809 tg
->slice_start
[rw
] += nr_slices
* tg
->td
->throtl_slice
;
811 throtl_log(&tg
->service_queue
,
812 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
813 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
814 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
817 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
820 bool rw
= bio_data_dir(bio
);
821 unsigned int io_allowed
;
822 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
825 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
827 /* Slice has just started. Consider one slice interval */
829 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
831 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
834 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
835 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
836 * will allow dispatch after 1 second and after that slice should
840 tmp
= (u64
)tg_iops_limit(tg
, rw
) * jiffy_elapsed_rnd
;
844 io_allowed
= UINT_MAX
;
848 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
854 /* Calc approx time to dispatch */
855 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
) / tg_iops_limit(tg
, rw
) + 1;
857 if (jiffy_wait
> jiffy_elapsed
)
858 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
867 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
870 bool rw
= bio_data_dir(bio
);
871 u64 bytes_allowed
, extra_bytes
, tmp
;
872 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
874 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
876 /* Slice has just started. Consider one slice interval */
878 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
880 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
882 tmp
= tg_bps_limit(tg
, rw
) * jiffy_elapsed_rnd
;
886 if (tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
<= bytes_allowed
) {
892 /* Calc approx time to dispatch */
893 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
- bytes_allowed
;
894 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg_bps_limit(tg
, rw
));
900 * This wait time is without taking into consideration the rounding
901 * up we did. Add that time also.
903 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
910 * Returns whether one can dispatch a bio or not. Also returns approx number
911 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
913 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
916 bool rw
= bio_data_dir(bio
);
917 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
920 * Currently whole state machine of group depends on first bio
921 * queued in the group bio list. So one should not be calling
922 * this function with a different bio if there are other bios
925 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
926 bio
!= throtl_peek_queued(&tg
->service_queue
.queued
[rw
]));
928 /* If tg->bps = -1, then BW is unlimited */
929 if (tg_bps_limit(tg
, rw
) == U64_MAX
&&
930 tg_iops_limit(tg
, rw
) == UINT_MAX
) {
937 * If previous slice expired, start a new one otherwise renew/extend
938 * existing slice to make sure it is at least throtl_slice interval
939 * long since now. New slice is started only for empty throttle group.
940 * If there is queued bio, that means there should be an active
941 * slice and it should be extended instead.
943 if (throtl_slice_used(tg
, rw
) && !(tg
->service_queue
.nr_queued
[rw
]))
944 throtl_start_new_slice(tg
, rw
);
946 if (time_before(tg
->slice_end
[rw
],
947 jiffies
+ tg
->td
->throtl_slice
))
948 throtl_extend_slice(tg
, rw
,
949 jiffies
+ tg
->td
->throtl_slice
);
952 if (tg_with_in_bps_limit(tg
, bio
, &bps_wait
) &&
953 tg_with_in_iops_limit(tg
, bio
, &iops_wait
)) {
959 max_wait
= max(bps_wait
, iops_wait
);
964 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
965 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
970 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
972 bool rw
= bio_data_dir(bio
);
974 /* Charge the bio to the group */
975 tg
->bytes_disp
[rw
] += bio
->bi_iter
.bi_size
;
977 tg
->last_bytes_disp
[rw
] += bio
->bi_iter
.bi_size
;
978 tg
->last_io_disp
[rw
]++;
981 * BIO_THROTTLED is used to prevent the same bio to be throttled
982 * more than once as a throttled bio will go through blk-throtl the
983 * second time when it eventually gets issued. Set it when a bio
984 * is being charged to a tg.
986 if (!bio_flagged(bio
, BIO_THROTTLED
))
987 bio_set_flag(bio
, BIO_THROTTLED
);
991 * throtl_add_bio_tg - add a bio to the specified throtl_grp
994 * @tg: the target throtl_grp
996 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
997 * tg->qnode_on_self[] is used.
999 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_qnode
*qn
,
1000 struct throtl_grp
*tg
)
1002 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1003 bool rw
= bio_data_dir(bio
);
1006 qn
= &tg
->qnode_on_self
[rw
];
1009 * If @tg doesn't currently have any bios queued in the same
1010 * direction, queueing @bio can change when @tg should be
1011 * dispatched. Mark that @tg was empty. This is automatically
1012 * cleaered on the next tg_update_disptime().
1014 if (!sq
->nr_queued
[rw
])
1015 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
1017 throtl_qnode_add_bio(bio
, qn
, &sq
->queued
[rw
]);
1019 sq
->nr_queued
[rw
]++;
1020 throtl_enqueue_tg(tg
);
1023 static void tg_update_disptime(struct throtl_grp
*tg
)
1025 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1026 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
1029 bio
= throtl_peek_queued(&sq
->queued
[READ
]);
1031 tg_may_dispatch(tg
, bio
, &read_wait
);
1033 bio
= throtl_peek_queued(&sq
->queued
[WRITE
]);
1035 tg_may_dispatch(tg
, bio
, &write_wait
);
1037 min_wait
= min(read_wait
, write_wait
);
1038 disptime
= jiffies
+ min_wait
;
1040 /* Update dispatch time */
1041 throtl_dequeue_tg(tg
);
1042 tg
->disptime
= disptime
;
1043 throtl_enqueue_tg(tg
);
1045 /* see throtl_add_bio_tg() */
1046 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
1049 static void start_parent_slice_with_credit(struct throtl_grp
*child_tg
,
1050 struct throtl_grp
*parent_tg
, bool rw
)
1052 if (throtl_slice_used(parent_tg
, rw
)) {
1053 throtl_start_new_slice_with_credit(parent_tg
, rw
,
1054 child_tg
->slice_start
[rw
]);
1059 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
1061 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1062 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
1063 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
1064 struct throtl_grp
*tg_to_put
= NULL
;
1068 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1069 * from @tg may put its reference and @parent_sq might end up
1070 * getting released prematurely. Remember the tg to put and put it
1071 * after @bio is transferred to @parent_sq.
1073 bio
= throtl_pop_queued(&sq
->queued
[rw
], &tg_to_put
);
1074 sq
->nr_queued
[rw
]--;
1076 throtl_charge_bio(tg
, bio
);
1079 * If our parent is another tg, we just need to transfer @bio to
1080 * the parent using throtl_add_bio_tg(). If our parent is
1081 * @td->service_queue, @bio is ready to be issued. Put it on its
1082 * bio_lists[] and decrease total number queued. The caller is
1083 * responsible for issuing these bios.
1086 throtl_add_bio_tg(bio
, &tg
->qnode_on_parent
[rw
], parent_tg
);
1087 start_parent_slice_with_credit(tg
, parent_tg
, rw
);
1089 throtl_qnode_add_bio(bio
, &tg
->qnode_on_parent
[rw
],
1090 &parent_sq
->queued
[rw
]);
1091 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
1092 tg
->td
->nr_queued
[rw
]--;
1095 throtl_trim_slice(tg
, rw
);
1098 blkg_put(tg_to_blkg(tg_to_put
));
1101 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
1103 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1104 unsigned int nr_reads
= 0, nr_writes
= 0;
1105 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
1106 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
1109 /* Try to dispatch 75% READS and 25% WRITES */
1111 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])) &&
1112 tg_may_dispatch(tg
, bio
, NULL
)) {
1114 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1117 if (nr_reads
>= max_nr_reads
)
1121 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])) &&
1122 tg_may_dispatch(tg
, bio
, NULL
)) {
1124 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1127 if (nr_writes
>= max_nr_writes
)
1131 return nr_reads
+ nr_writes
;
1134 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
1136 unsigned int nr_disp
= 0;
1139 struct throtl_grp
*tg
= throtl_rb_first(parent_sq
);
1140 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1145 if (time_before(jiffies
, tg
->disptime
))
1148 throtl_dequeue_tg(tg
);
1150 nr_disp
+= throtl_dispatch_tg(tg
);
1152 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
1153 tg_update_disptime(tg
);
1155 if (nr_disp
>= throtl_quantum
)
1162 static bool throtl_can_upgrade(struct throtl_data
*td
,
1163 struct throtl_grp
*this_tg
);
1165 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1166 * @arg: the throtl_service_queue being serviced
1168 * This timer is armed when a child throtl_grp with active bio's become
1169 * pending and queued on the service_queue's pending_tree and expires when
1170 * the first child throtl_grp should be dispatched. This function
1171 * dispatches bio's from the children throtl_grps to the parent
1174 * If the parent's parent is another throtl_grp, dispatching is propagated
1175 * by either arming its pending_timer or repeating dispatch directly. If
1176 * the top-level service_tree is reached, throtl_data->dispatch_work is
1177 * kicked so that the ready bio's are issued.
1179 static void throtl_pending_timer_fn(unsigned long arg
)
1181 struct throtl_service_queue
*sq
= (void *)arg
;
1182 struct throtl_grp
*tg
= sq_to_tg(sq
);
1183 struct throtl_data
*td
= sq_to_td(sq
);
1184 struct request_queue
*q
= td
->queue
;
1185 struct throtl_service_queue
*parent_sq
;
1189 spin_lock_irq(q
->queue_lock
);
1190 if (throtl_can_upgrade(td
, NULL
))
1191 throtl_upgrade_state(td
);
1194 parent_sq
= sq
->parent_sq
;
1198 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
1199 sq
->nr_queued
[READ
] + sq
->nr_queued
[WRITE
],
1200 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1202 ret
= throtl_select_dispatch(sq
);
1204 throtl_log(sq
, "bios disp=%u", ret
);
1208 if (throtl_schedule_next_dispatch(sq
, false))
1211 /* this dispatch windows is still open, relax and repeat */
1212 spin_unlock_irq(q
->queue_lock
);
1214 spin_lock_irq(q
->queue_lock
);
1221 /* @parent_sq is another throl_grp, propagate dispatch */
1222 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1223 tg_update_disptime(tg
);
1224 if (!throtl_schedule_next_dispatch(parent_sq
, false)) {
1225 /* window is already open, repeat dispatching */
1232 /* reached the top-level, queue issueing */
1233 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1236 spin_unlock_irq(q
->queue_lock
);
1240 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1241 * @work: work item being executed
1243 * This function is queued for execution when bio's reach the bio_lists[]
1244 * of throtl_data->service_queue. Those bio's are ready and issued by this
1247 static void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1249 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1251 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1252 struct request_queue
*q
= td
->queue
;
1253 struct bio_list bio_list_on_stack
;
1255 struct blk_plug plug
;
1258 bio_list_init(&bio_list_on_stack
);
1260 spin_lock_irq(q
->queue_lock
);
1261 for (rw
= READ
; rw
<= WRITE
; rw
++)
1262 while ((bio
= throtl_pop_queued(&td_sq
->queued
[rw
], NULL
)))
1263 bio_list_add(&bio_list_on_stack
, bio
);
1264 spin_unlock_irq(q
->queue_lock
);
1266 if (!bio_list_empty(&bio_list_on_stack
)) {
1267 blk_start_plug(&plug
);
1268 while((bio
= bio_list_pop(&bio_list_on_stack
)))
1269 generic_make_request(bio
);
1270 blk_finish_plug(&plug
);
1274 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1277 struct throtl_grp
*tg
= pd_to_tg(pd
);
1278 u64 v
= *(u64
*)((void *)tg
+ off
);
1282 return __blkg_prfill_u64(sf
, pd
, v
);
1285 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1288 struct throtl_grp
*tg
= pd_to_tg(pd
);
1289 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1293 return __blkg_prfill_u64(sf
, pd
, v
);
1296 static int tg_print_conf_u64(struct seq_file
*sf
, void *v
)
1298 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_u64
,
1299 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1303 static int tg_print_conf_uint(struct seq_file
*sf
, void *v
)
1305 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_uint
,
1306 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1310 static void tg_conf_updated(struct throtl_grp
*tg
)
1312 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1313 struct cgroup_subsys_state
*pos_css
;
1314 struct blkcg_gq
*blkg
;
1316 throtl_log(&tg
->service_queue
,
1317 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1318 tg_bps_limit(tg
, READ
), tg_bps_limit(tg
, WRITE
),
1319 tg_iops_limit(tg
, READ
), tg_iops_limit(tg
, WRITE
));
1322 * Update has_rules[] flags for the updated tg's subtree. A tg is
1323 * considered to have rules if either the tg itself or any of its
1324 * ancestors has rules. This identifies groups without any
1325 * restrictions in the whole hierarchy and allows them to bypass
1328 blkg_for_each_descendant_pre(blkg
, pos_css
, tg_to_blkg(tg
))
1329 tg_update_has_rules(blkg_to_tg(blkg
));
1332 * We're already holding queue_lock and know @tg is valid. Let's
1333 * apply the new config directly.
1335 * Restart the slices for both READ and WRITES. It might happen
1336 * that a group's limit are dropped suddenly and we don't want to
1337 * account recently dispatched IO with new low rate.
1339 throtl_start_new_slice(tg
, 0);
1340 throtl_start_new_slice(tg
, 1);
1342 if (tg
->flags
& THROTL_TG_PENDING
) {
1343 tg_update_disptime(tg
);
1344 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1348 static ssize_t
tg_set_conf(struct kernfs_open_file
*of
,
1349 char *buf
, size_t nbytes
, loff_t off
, bool is_u64
)
1351 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1352 struct blkg_conf_ctx ctx
;
1353 struct throtl_grp
*tg
;
1357 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1362 if (sscanf(ctx
.body
, "%llu", &v
) != 1)
1367 tg
= blkg_to_tg(ctx
.blkg
);
1370 *(u64
*)((void *)tg
+ of_cft(of
)->private) = v
;
1372 *(unsigned int *)((void *)tg
+ of_cft(of
)->private) = v
;
1374 tg_conf_updated(tg
);
1377 blkg_conf_finish(&ctx
);
1378 return ret
?: nbytes
;
1381 static ssize_t
tg_set_conf_u64(struct kernfs_open_file
*of
,
1382 char *buf
, size_t nbytes
, loff_t off
)
1384 return tg_set_conf(of
, buf
, nbytes
, off
, true);
1387 static ssize_t
tg_set_conf_uint(struct kernfs_open_file
*of
,
1388 char *buf
, size_t nbytes
, loff_t off
)
1390 return tg_set_conf(of
, buf
, nbytes
, off
, false);
1393 static struct cftype throtl_legacy_files
[] = {
1395 .name
= "throttle.read_bps_device",
1396 .private = offsetof(struct throtl_grp
, bps
[READ
][LIMIT_MAX
]),
1397 .seq_show
= tg_print_conf_u64
,
1398 .write
= tg_set_conf_u64
,
1401 .name
= "throttle.write_bps_device",
1402 .private = offsetof(struct throtl_grp
, bps
[WRITE
][LIMIT_MAX
]),
1403 .seq_show
= tg_print_conf_u64
,
1404 .write
= tg_set_conf_u64
,
1407 .name
= "throttle.read_iops_device",
1408 .private = offsetof(struct throtl_grp
, iops
[READ
][LIMIT_MAX
]),
1409 .seq_show
= tg_print_conf_uint
,
1410 .write
= tg_set_conf_uint
,
1413 .name
= "throttle.write_iops_device",
1414 .private = offsetof(struct throtl_grp
, iops
[WRITE
][LIMIT_MAX
]),
1415 .seq_show
= tg_print_conf_uint
,
1416 .write
= tg_set_conf_uint
,
1419 .name
= "throttle.io_service_bytes",
1420 .private = (unsigned long)&blkcg_policy_throtl
,
1421 .seq_show
= blkg_print_stat_bytes
,
1424 .name
= "throttle.io_serviced",
1425 .private = (unsigned long)&blkcg_policy_throtl
,
1426 .seq_show
= blkg_print_stat_ios
,
1431 static u64
tg_prfill_limit(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1434 struct throtl_grp
*tg
= pd_to_tg(pd
);
1435 const char *dname
= blkg_dev_name(pd
->blkg
);
1436 char bufs
[4][21] = { "max", "max", "max", "max" };
1438 unsigned int iops_dft
;
1443 if (off
== LIMIT_LOW
) {
1448 iops_dft
= UINT_MAX
;
1451 if (tg
->bps_conf
[READ
][off
] == bps_dft
&&
1452 tg
->bps_conf
[WRITE
][off
] == bps_dft
&&
1453 tg
->iops_conf
[READ
][off
] == iops_dft
&&
1454 tg
->iops_conf
[WRITE
][off
] == iops_dft
)
1457 if (tg
->bps_conf
[READ
][off
] != bps_dft
)
1458 snprintf(bufs
[0], sizeof(bufs
[0]), "%llu",
1459 tg
->bps_conf
[READ
][off
]);
1460 if (tg
->bps_conf
[WRITE
][off
] != bps_dft
)
1461 snprintf(bufs
[1], sizeof(bufs
[1]), "%llu",
1462 tg
->bps_conf
[WRITE
][off
]);
1463 if (tg
->iops_conf
[READ
][off
] != iops_dft
)
1464 snprintf(bufs
[2], sizeof(bufs
[2]), "%u",
1465 tg
->iops_conf
[READ
][off
]);
1466 if (tg
->iops_conf
[WRITE
][off
] != iops_dft
)
1467 snprintf(bufs
[3], sizeof(bufs
[3]), "%u",
1468 tg
->iops_conf
[WRITE
][off
]);
1470 seq_printf(sf
, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
1471 dname
, bufs
[0], bufs
[1], bufs
[2], bufs
[3]);
1475 static int tg_print_limit(struct seq_file
*sf
, void *v
)
1477 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_limit
,
1478 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1482 static ssize_t
tg_set_limit(struct kernfs_open_file
*of
,
1483 char *buf
, size_t nbytes
, loff_t off
)
1485 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1486 struct blkg_conf_ctx ctx
;
1487 struct throtl_grp
*tg
;
1490 int index
= of_cft(of
)->private;
1492 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1496 tg
= blkg_to_tg(ctx
.blkg
);
1498 v
[0] = tg
->bps_conf
[READ
][index
];
1499 v
[1] = tg
->bps_conf
[WRITE
][index
];
1500 v
[2] = tg
->iops_conf
[READ
][index
];
1501 v
[3] = tg
->iops_conf
[WRITE
][index
];
1504 char tok
[27]; /* wiops=18446744073709551616 */
1509 if (sscanf(ctx
.body
, "%26s%n", tok
, &len
) != 1)
1518 if (!p
|| (sscanf(p
, "%llu", &val
) != 1 && strcmp(p
, "max")))
1526 if (!strcmp(tok
, "rbps"))
1528 else if (!strcmp(tok
, "wbps"))
1530 else if (!strcmp(tok
, "riops"))
1531 v
[2] = min_t(u64
, val
, UINT_MAX
);
1532 else if (!strcmp(tok
, "wiops"))
1533 v
[3] = min_t(u64
, val
, UINT_MAX
);
1538 tg
->bps_conf
[READ
][index
] = v
[0];
1539 tg
->bps_conf
[WRITE
][index
] = v
[1];
1540 tg
->iops_conf
[READ
][index
] = v
[2];
1541 tg
->iops_conf
[WRITE
][index
] = v
[3];
1543 if (index
== LIMIT_MAX
) {
1544 tg
->bps
[READ
][index
] = v
[0];
1545 tg
->bps
[WRITE
][index
] = v
[1];
1546 tg
->iops
[READ
][index
] = v
[2];
1547 tg
->iops
[WRITE
][index
] = v
[3];
1549 tg
->bps
[READ
][LIMIT_LOW
] = min(tg
->bps_conf
[READ
][LIMIT_LOW
],
1550 tg
->bps_conf
[READ
][LIMIT_MAX
]);
1551 tg
->bps
[WRITE
][LIMIT_LOW
] = min(tg
->bps_conf
[WRITE
][LIMIT_LOW
],
1552 tg
->bps_conf
[WRITE
][LIMIT_MAX
]);
1553 tg
->iops
[READ
][LIMIT_LOW
] = min(tg
->iops_conf
[READ
][LIMIT_LOW
],
1554 tg
->iops_conf
[READ
][LIMIT_MAX
]);
1555 tg
->iops
[WRITE
][LIMIT_LOW
] = min(tg
->iops_conf
[WRITE
][LIMIT_LOW
],
1556 tg
->iops_conf
[WRITE
][LIMIT_MAX
]);
1558 if (index
== LIMIT_LOW
) {
1559 blk_throtl_update_limit_valid(tg
->td
);
1560 if (tg
->td
->limit_valid
[LIMIT_LOW
])
1561 tg
->td
->limit_index
= LIMIT_LOW
;
1563 tg_conf_updated(tg
);
1566 blkg_conf_finish(&ctx
);
1567 return ret
?: nbytes
;
1570 static struct cftype throtl_files
[] = {
1571 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1574 .flags
= CFTYPE_NOT_ON_ROOT
,
1575 .seq_show
= tg_print_limit
,
1576 .write
= tg_set_limit
,
1577 .private = LIMIT_LOW
,
1582 .flags
= CFTYPE_NOT_ON_ROOT
,
1583 .seq_show
= tg_print_limit
,
1584 .write
= tg_set_limit
,
1585 .private = LIMIT_MAX
,
1590 static void throtl_shutdown_wq(struct request_queue
*q
)
1592 struct throtl_data
*td
= q
->td
;
1594 cancel_work_sync(&td
->dispatch_work
);
1597 static struct blkcg_policy blkcg_policy_throtl
= {
1598 .dfl_cftypes
= throtl_files
,
1599 .legacy_cftypes
= throtl_legacy_files
,
1601 .pd_alloc_fn
= throtl_pd_alloc
,
1602 .pd_init_fn
= throtl_pd_init
,
1603 .pd_online_fn
= throtl_pd_online
,
1604 .pd_offline_fn
= throtl_pd_offline
,
1605 .pd_free_fn
= throtl_pd_free
,
1608 static unsigned long __tg_last_low_overflow_time(struct throtl_grp
*tg
)
1610 unsigned long rtime
= jiffies
, wtime
= jiffies
;
1612 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
])
1613 rtime
= tg
->last_low_overflow_time
[READ
];
1614 if (tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
1615 wtime
= tg
->last_low_overflow_time
[WRITE
];
1616 return min(rtime
, wtime
);
1619 /* tg should not be an intermediate node */
1620 static unsigned long tg_last_low_overflow_time(struct throtl_grp
*tg
)
1622 struct throtl_service_queue
*parent_sq
;
1623 struct throtl_grp
*parent
= tg
;
1624 unsigned long ret
= __tg_last_low_overflow_time(tg
);
1627 parent_sq
= parent
->service_queue
.parent_sq
;
1628 parent
= sq_to_tg(parent_sq
);
1633 * The parent doesn't have low limit, it always reaches low
1634 * limit. Its overflow time is useless for children
1636 if (!parent
->bps
[READ
][LIMIT_LOW
] &&
1637 !parent
->iops
[READ
][LIMIT_LOW
] &&
1638 !parent
->bps
[WRITE
][LIMIT_LOW
] &&
1639 !parent
->iops
[WRITE
][LIMIT_LOW
])
1641 if (time_after(__tg_last_low_overflow_time(parent
), ret
))
1642 ret
= __tg_last_low_overflow_time(parent
);
1647 static bool throtl_tg_can_upgrade(struct throtl_grp
*tg
)
1649 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1650 bool read_limit
, write_limit
;
1653 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1654 * reaches), it's ok to upgrade to next limit
1656 read_limit
= tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
];
1657 write_limit
= tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
];
1658 if (!read_limit
&& !write_limit
)
1660 if (read_limit
&& sq
->nr_queued
[READ
] &&
1661 (!write_limit
|| sq
->nr_queued
[WRITE
]))
1663 if (write_limit
&& sq
->nr_queued
[WRITE
] &&
1664 (!read_limit
|| sq
->nr_queued
[READ
]))
1667 if (time_after_eq(jiffies
,
1668 tg
->last_dispatch_time
[READ
] + tg
->td
->throtl_slice
) &&
1669 time_after_eq(jiffies
,
1670 tg
->last_dispatch_time
[WRITE
] + tg
->td
->throtl_slice
))
1675 static bool throtl_hierarchy_can_upgrade(struct throtl_grp
*tg
)
1678 if (throtl_tg_can_upgrade(tg
))
1680 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1681 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1687 static bool throtl_can_upgrade(struct throtl_data
*td
,
1688 struct throtl_grp
*this_tg
)
1690 struct cgroup_subsys_state
*pos_css
;
1691 struct blkcg_gq
*blkg
;
1693 if (td
->limit_index
!= LIMIT_LOW
)
1696 if (time_before(jiffies
, td
->low_downgrade_time
+ td
->throtl_slice
))
1700 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1701 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1705 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1707 if (!throtl_hierarchy_can_upgrade(tg
)) {
1716 static void throtl_upgrade_state(struct throtl_data
*td
)
1718 struct cgroup_subsys_state
*pos_css
;
1719 struct blkcg_gq
*blkg
;
1721 td
->limit_index
= LIMIT_MAX
;
1722 td
->low_upgrade_time
= jiffies
;
1725 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1726 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1727 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1729 tg
->disptime
= jiffies
- 1;
1730 throtl_select_dispatch(sq
);
1731 throtl_schedule_next_dispatch(sq
, false);
1734 throtl_select_dispatch(&td
->service_queue
);
1735 throtl_schedule_next_dispatch(&td
->service_queue
, false);
1736 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1739 static void throtl_downgrade_state(struct throtl_data
*td
, int new)
1744 td
->low_upgrade_time
= jiffies
- td
->scale
* td
->throtl_slice
;
1748 td
->limit_index
= new;
1749 td
->low_downgrade_time
= jiffies
;
1752 static bool throtl_tg_can_downgrade(struct throtl_grp
*tg
)
1754 struct throtl_data
*td
= tg
->td
;
1755 unsigned long now
= jiffies
;
1757 if (time_after_eq(now
, tg
->last_dispatch_time
[READ
] +
1758 td
->throtl_slice
) &&
1759 time_after_eq(now
, tg
->last_dispatch_time
[WRITE
] +
1763 * If cgroup is below low limit, consider downgrade and throttle other
1766 if (time_after_eq(now
, td
->low_upgrade_time
+ td
->throtl_slice
) &&
1767 time_after_eq(now
, tg_last_low_overflow_time(tg
) +
1773 static bool throtl_hierarchy_can_downgrade(struct throtl_grp
*tg
)
1776 if (!throtl_tg_can_downgrade(tg
))
1778 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1779 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1785 static void throtl_downgrade_check(struct throtl_grp
*tg
)
1789 unsigned long elapsed_time
;
1790 unsigned long now
= jiffies
;
1792 if (tg
->td
->limit_index
!= LIMIT_MAX
||
1793 !tg
->td
->limit_valid
[LIMIT_LOW
])
1795 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1797 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
1800 elapsed_time
= now
- tg
->last_check_time
;
1801 tg
->last_check_time
= now
;
1803 if (time_before(now
, tg_last_low_overflow_time(tg
) +
1804 tg
->td
->throtl_slice
))
1807 if (tg
->bps
[READ
][LIMIT_LOW
]) {
1808 bps
= tg
->last_bytes_disp
[READ
] * HZ
;
1809 do_div(bps
, elapsed_time
);
1810 if (bps
>= tg
->bps
[READ
][LIMIT_LOW
])
1811 tg
->last_low_overflow_time
[READ
] = now
;
1814 if (tg
->bps
[WRITE
][LIMIT_LOW
]) {
1815 bps
= tg
->last_bytes_disp
[WRITE
] * HZ
;
1816 do_div(bps
, elapsed_time
);
1817 if (bps
>= tg
->bps
[WRITE
][LIMIT_LOW
])
1818 tg
->last_low_overflow_time
[WRITE
] = now
;
1821 if (tg
->iops
[READ
][LIMIT_LOW
]) {
1822 iops
= tg
->last_io_disp
[READ
] * HZ
/ elapsed_time
;
1823 if (iops
>= tg
->iops
[READ
][LIMIT_LOW
])
1824 tg
->last_low_overflow_time
[READ
] = now
;
1827 if (tg
->iops
[WRITE
][LIMIT_LOW
]) {
1828 iops
= tg
->last_io_disp
[WRITE
] * HZ
/ elapsed_time
;
1829 if (iops
>= tg
->iops
[WRITE
][LIMIT_LOW
])
1830 tg
->last_low_overflow_time
[WRITE
] = now
;
1834 * If cgroup is below low limit, consider downgrade and throttle other
1837 if (throtl_hierarchy_can_downgrade(tg
))
1838 throtl_downgrade_state(tg
->td
, LIMIT_LOW
);
1840 tg
->last_bytes_disp
[READ
] = 0;
1841 tg
->last_bytes_disp
[WRITE
] = 0;
1842 tg
->last_io_disp
[READ
] = 0;
1843 tg
->last_io_disp
[WRITE
] = 0;
1846 bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
1849 struct throtl_qnode
*qn
= NULL
;
1850 struct throtl_grp
*tg
= blkg_to_tg(blkg
?: q
->root_blkg
);
1851 struct throtl_service_queue
*sq
;
1852 bool rw
= bio_data_dir(bio
);
1853 bool throttled
= false;
1855 WARN_ON_ONCE(!rcu_read_lock_held());
1857 /* see throtl_charge_bio() */
1858 if (bio_flagged(bio
, BIO_THROTTLED
) || !tg
->has_rules
[rw
])
1861 spin_lock_irq(q
->queue_lock
);
1863 if (unlikely(blk_queue_bypass(q
)))
1866 sq
= &tg
->service_queue
;
1870 tg
->last_dispatch_time
[rw
] = jiffies
;
1871 if (tg
->last_low_overflow_time
[rw
] == 0)
1872 tg
->last_low_overflow_time
[rw
] = jiffies
;
1873 throtl_downgrade_check(tg
);
1874 /* throtl is FIFO - if bios are already queued, should queue */
1875 if (sq
->nr_queued
[rw
])
1878 /* if above limits, break to queue */
1879 if (!tg_may_dispatch(tg
, bio
, NULL
)) {
1880 tg
->last_low_overflow_time
[rw
] = jiffies
;
1881 if (throtl_can_upgrade(tg
->td
, tg
)) {
1882 throtl_upgrade_state(tg
->td
);
1888 /* within limits, let's charge and dispatch directly */
1889 throtl_charge_bio(tg
, bio
);
1892 * We need to trim slice even when bios are not being queued
1893 * otherwise it might happen that a bio is not queued for
1894 * a long time and slice keeps on extending and trim is not
1895 * called for a long time. Now if limits are reduced suddenly
1896 * we take into account all the IO dispatched so far at new
1897 * low rate and * newly queued IO gets a really long dispatch
1900 * So keep on trimming slice even if bio is not queued.
1902 throtl_trim_slice(tg
, rw
);
1905 * @bio passed through this layer without being throttled.
1906 * Climb up the ladder. If we''re already at the top, it
1907 * can be executed directly.
1909 qn
= &tg
->qnode_on_parent
[rw
];
1916 /* out-of-limit, queue to @tg */
1917 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1918 rw
== READ
? 'R' : 'W',
1919 tg
->bytes_disp
[rw
], bio
->bi_iter
.bi_size
,
1920 tg_bps_limit(tg
, rw
),
1921 tg
->io_disp
[rw
], tg_iops_limit(tg
, rw
),
1922 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1924 tg
->last_low_overflow_time
[rw
] = jiffies
;
1926 bio_associate_current(bio
);
1927 tg
->td
->nr_queued
[rw
]++;
1928 throtl_add_bio_tg(bio
, qn
, tg
);
1932 * Update @tg's dispatch time and force schedule dispatch if @tg
1933 * was empty before @bio. The forced scheduling isn't likely to
1934 * cause undue delay as @bio is likely to be dispatched directly if
1935 * its @tg's disptime is not in the future.
1937 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1938 tg_update_disptime(tg
);
1939 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
1943 spin_unlock_irq(q
->queue_lock
);
1946 * As multiple blk-throtls may stack in the same issue path, we
1947 * don't want bios to leave with the flag set. Clear the flag if
1951 bio_clear_flag(bio
, BIO_THROTTLED
);
1956 * Dispatch all bios from all children tg's queued on @parent_sq. On
1957 * return, @parent_sq is guaranteed to not have any active children tg's
1958 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1960 static void tg_drain_bios(struct throtl_service_queue
*parent_sq
)
1962 struct throtl_grp
*tg
;
1964 while ((tg
= throtl_rb_first(parent_sq
))) {
1965 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1968 throtl_dequeue_tg(tg
);
1970 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])))
1971 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1972 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])))
1973 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1978 * blk_throtl_drain - drain throttled bios
1979 * @q: request_queue to drain throttled bios for
1981 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1983 void blk_throtl_drain(struct request_queue
*q
)
1984 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1986 struct throtl_data
*td
= q
->td
;
1987 struct blkcg_gq
*blkg
;
1988 struct cgroup_subsys_state
*pos_css
;
1992 queue_lockdep_assert_held(q
);
1996 * Drain each tg while doing post-order walk on the blkg tree, so
1997 * that all bios are propagated to td->service_queue. It'd be
1998 * better to walk service_queue tree directly but blkg walk is
2001 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
)
2002 tg_drain_bios(&blkg_to_tg(blkg
)->service_queue
);
2004 /* finally, transfer bios from top-level tg's into the td */
2005 tg_drain_bios(&td
->service_queue
);
2008 spin_unlock_irq(q
->queue_lock
);
2010 /* all bios now should be in td->service_queue, issue them */
2011 for (rw
= READ
; rw
<= WRITE
; rw
++)
2012 while ((bio
= throtl_pop_queued(&td
->service_queue
.queued
[rw
],
2014 generic_make_request(bio
);
2016 spin_lock_irq(q
->queue_lock
);
2019 int blk_throtl_init(struct request_queue
*q
)
2021 struct throtl_data
*td
;
2024 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
2028 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
2029 throtl_service_queue_init(&td
->service_queue
);
2034 td
->limit_valid
[LIMIT_MAX
] = true;
2035 td
->limit_index
= LIMIT_MAX
;
2036 td
->low_upgrade_time
= jiffies
;
2037 td
->low_downgrade_time
= jiffies
;
2038 /* activate policy */
2039 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
2045 void blk_throtl_exit(struct request_queue
*q
)
2048 throtl_shutdown_wq(q
);
2049 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
2053 void blk_throtl_register_queue(struct request_queue
*q
)
2055 struct throtl_data
*td
;
2060 if (blk_queue_nonrot(q
))
2061 td
->throtl_slice
= DFL_THROTL_SLICE_SSD
;
2063 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2064 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2065 /* if no low limit, use previous default */
2066 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2070 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2071 ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
)
2075 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->td
->throtl_slice
));
2078 ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
2079 const char *page
, size_t count
)
2086 if (kstrtoul(page
, 10, &v
))
2088 t
= msecs_to_jiffies(v
);
2089 if (t
== 0 || t
> MAX_THROTL_SLICE
)
2091 q
->td
->throtl_slice
= t
;
2096 static int __init
throtl_init(void)
2098 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
2099 if (!kthrotld_workqueue
)
2100 panic("Failed to create kthrotld\n");
2102 return blkcg_policy_register(&blkcg_policy_throtl
);
2105 module_init(throtl_init
);