1 // SPDX-License-Identifier: GPL-2.0
3 * Interface for controlling IO bandwidth on a request queue
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
15 #include "blk-cgroup-rwstat.h"
17 /* Max dispatch from a group in 1 round */
18 #define THROTL_GRP_QUANTUM 8
20 /* Total max dispatch from all groups in one round */
21 #define THROTL_QUANTUM 32
23 /* Throttling is performed over a slice and after that slice is renewed */
24 #define DFL_THROTL_SLICE_HD (HZ / 10)
25 #define DFL_THROTL_SLICE_SSD (HZ / 50)
26 #define MAX_THROTL_SLICE (HZ)
27 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
28 #define MIN_THROTL_BPS (320 * 1024)
29 #define MIN_THROTL_IOPS (10)
30 #define DFL_LATENCY_TARGET (-1L)
31 #define DFL_IDLE_THRESHOLD (0)
32 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
33 #define LATENCY_FILTERED_SSD (0)
35 * For HD, very small latency comes from sequential IO. Such IO is helpless to
36 * help determine if its IO is impacted by others, hence we ignore the IO
38 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
40 static struct blkcg_policy blkcg_policy_throtl
;
42 /* A workqueue to queue throttle related work */
43 static struct workqueue_struct
*kthrotld_workqueue
;
46 * To implement hierarchical throttling, throtl_grps form a tree and bios
47 * are dispatched upwards level by level until they reach the top and get
48 * issued. When dispatching bios from the children and local group at each
49 * level, if the bios are dispatched into a single bio_list, there's a risk
50 * of a local or child group which can queue many bios at once filling up
51 * the list starving others.
53 * To avoid such starvation, dispatched bios are queued separately
54 * according to where they came from. When they are again dispatched to
55 * the parent, they're popped in round-robin order so that no single source
56 * hogs the dispatch window.
58 * throtl_qnode is used to keep the queued bios separated by their sources.
59 * Bios are queued to throtl_qnode which in turn is queued to
60 * throtl_service_queue and then dispatched in round-robin order.
62 * It's also used to track the reference counts on blkg's. A qnode always
63 * belongs to a throtl_grp and gets queued on itself or the parent, so
64 * incrementing the reference of the associated throtl_grp when a qnode is
65 * queued and decrementing when dequeued is enough to keep the whole blkg
66 * tree pinned while bios are in flight.
69 struct list_head node
; /* service_queue->queued[] */
70 struct bio_list bios
; /* queued bios */
71 struct throtl_grp
*tg
; /* tg this qnode belongs to */
74 struct throtl_service_queue
{
75 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
78 * Bios queued directly to this service_queue or dispatched from
79 * children throtl_grp's.
81 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
82 unsigned int nr_queued
[2]; /* number of queued bios */
85 * RB tree of active children throtl_grp's, which are sorted by
88 struct rb_root_cached pending_tree
; /* RB tree of active tgs */
89 unsigned int nr_pending
; /* # queued in the tree */
90 unsigned long first_pending_disptime
; /* disptime of the first tg */
91 struct timer_list pending_timer
; /* fires on first_pending_disptime */
95 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
96 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
99 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
108 /* must be the first member */
109 struct blkg_policy_data pd
;
111 /* active throtl group service_queue member */
112 struct rb_node rb_node
;
114 /* throtl_data this group belongs to */
115 struct throtl_data
*td
;
117 /* this group's service queue */
118 struct throtl_service_queue service_queue
;
121 * qnode_on_self is used when bios are directly queued to this
122 * throtl_grp so that local bios compete fairly with bios
123 * dispatched from children. qnode_on_parent is used when bios are
124 * dispatched from this throtl_grp into its parent and will compete
125 * with the sibling qnode_on_parents and the parent's
128 struct throtl_qnode qnode_on_self
[2];
129 struct throtl_qnode qnode_on_parent
[2];
132 * Dispatch time in jiffies. This is the estimated time when group
133 * will unthrottle and is ready to dispatch more bio. It is used as
134 * key to sort active groups in service tree.
136 unsigned long disptime
;
140 /* are there any throtl rules between this group and td? */
143 /* internally used bytes per second rate limits */
144 uint64_t bps
[2][LIMIT_CNT
];
145 /* user configured bps limits */
146 uint64_t bps_conf
[2][LIMIT_CNT
];
148 /* internally used IOPS limits */
149 unsigned int iops
[2][LIMIT_CNT
];
150 /* user configured IOPS limits */
151 unsigned int iops_conf
[2][LIMIT_CNT
];
153 /* Number of bytes dispatched in current slice */
154 uint64_t bytes_disp
[2];
155 /* Number of bio's dispatched in current slice */
156 unsigned int io_disp
[2];
158 unsigned long last_low_overflow_time
[2];
160 uint64_t last_bytes_disp
[2];
161 unsigned int last_io_disp
[2];
163 unsigned long last_check_time
;
165 unsigned long latency_target
; /* us */
166 unsigned long latency_target_conf
; /* us */
167 /* When did we start a new slice */
168 unsigned long slice_start
[2];
169 unsigned long slice_end
[2];
171 unsigned long last_finish_time
; /* ns / 1024 */
172 unsigned long checked_last_finish_time
; /* ns / 1024 */
173 unsigned long avg_idletime
; /* ns / 1024 */
174 unsigned long idletime_threshold
; /* us */
175 unsigned long idletime_threshold_conf
; /* us */
177 unsigned int bio_cnt
; /* total bios */
178 unsigned int bad_bio_cnt
; /* bios exceeding latency threshold */
179 unsigned long bio_cnt_reset_time
;
181 atomic_t io_split_cnt
[2];
182 atomic_t last_io_split_cnt
[2];
184 struct blkg_rwstat stat_bytes
;
185 struct blkg_rwstat stat_ios
;
188 /* We measure latency for request size from <= 4k to >= 1M */
189 #define LATENCY_BUCKET_SIZE 9
191 struct latency_bucket
{
192 unsigned long total_latency
; /* ns / 1024 */
196 struct avg_latency_bucket
{
197 unsigned long latency
; /* ns / 1024 */
203 /* service tree for active throtl groups */
204 struct throtl_service_queue service_queue
;
206 struct request_queue
*queue
;
208 /* Total Number of queued bios on READ and WRITE lists */
209 unsigned int nr_queued
[2];
211 unsigned int throtl_slice
;
213 /* Work for dispatching throttled bios */
214 struct work_struct dispatch_work
;
215 unsigned int limit_index
;
216 bool limit_valid
[LIMIT_CNT
];
218 unsigned long low_upgrade_time
;
219 unsigned long low_downgrade_time
;
223 struct latency_bucket tmp_buckets
[2][LATENCY_BUCKET_SIZE
];
224 struct avg_latency_bucket avg_buckets
[2][LATENCY_BUCKET_SIZE
];
225 struct latency_bucket __percpu
*latency_buckets
[2];
226 unsigned long last_calculate_time
;
227 unsigned long filtered_latency
;
229 bool track_bio_latency
;
232 static void throtl_pending_timer_fn(struct timer_list
*t
);
234 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
236 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
239 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
241 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
244 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
246 return pd_to_blkg(&tg
->pd
);
250 * sq_to_tg - return the throl_grp the specified service queue belongs to
251 * @sq: the throtl_service_queue of interest
253 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
254 * embedded in throtl_data, %NULL is returned.
256 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
258 if (sq
&& sq
->parent_sq
)
259 return container_of(sq
, struct throtl_grp
, service_queue
);
265 * sq_to_td - return throtl_data the specified service queue belongs to
266 * @sq: the throtl_service_queue of interest
268 * A service_queue can be embedded in either a throtl_grp or throtl_data.
269 * Determine the associated throtl_data accordingly and return it.
271 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
273 struct throtl_grp
*tg
= sq_to_tg(sq
);
278 return container_of(sq
, struct throtl_data
, service_queue
);
282 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
283 * make the IO dispatch more smooth.
284 * Scale up: linearly scale up according to lapsed time since upgrade. For
285 * every throtl_slice, the limit scales up 1/2 .low limit till the
286 * limit hits .max limit
287 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
289 static uint64_t throtl_adjusted_limit(uint64_t low
, struct throtl_data
*td
)
291 /* arbitrary value to avoid too big scale */
292 if (td
->scale
< 4096 && time_after_eq(jiffies
,
293 td
->low_upgrade_time
+ td
->scale
* td
->throtl_slice
))
294 td
->scale
= (jiffies
- td
->low_upgrade_time
) / td
->throtl_slice
;
296 return low
+ (low
>> 1) * td
->scale
;
299 static uint64_t tg_bps_limit(struct throtl_grp
*tg
, int rw
)
301 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
302 struct throtl_data
*td
;
305 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
309 ret
= tg
->bps
[rw
][td
->limit_index
];
310 if (ret
== 0 && td
->limit_index
== LIMIT_LOW
) {
311 /* intermediate node or iops isn't 0 */
312 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
313 tg
->iops
[rw
][td
->limit_index
])
316 return MIN_THROTL_BPS
;
319 if (td
->limit_index
== LIMIT_MAX
&& tg
->bps
[rw
][LIMIT_LOW
] &&
320 tg
->bps
[rw
][LIMIT_LOW
] != tg
->bps
[rw
][LIMIT_MAX
]) {
323 adjusted
= throtl_adjusted_limit(tg
->bps
[rw
][LIMIT_LOW
], td
);
324 ret
= min(tg
->bps
[rw
][LIMIT_MAX
], adjusted
);
329 static unsigned int tg_iops_limit(struct throtl_grp
*tg
, int rw
)
331 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
332 struct throtl_data
*td
;
335 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
339 ret
= tg
->iops
[rw
][td
->limit_index
];
340 if (ret
== 0 && tg
->td
->limit_index
== LIMIT_LOW
) {
341 /* intermediate node or bps isn't 0 */
342 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
343 tg
->bps
[rw
][td
->limit_index
])
346 return MIN_THROTL_IOPS
;
349 if (td
->limit_index
== LIMIT_MAX
&& tg
->iops
[rw
][LIMIT_LOW
] &&
350 tg
->iops
[rw
][LIMIT_LOW
] != tg
->iops
[rw
][LIMIT_MAX
]) {
353 adjusted
= throtl_adjusted_limit(tg
->iops
[rw
][LIMIT_LOW
], td
);
354 if (adjusted
> UINT_MAX
)
356 ret
= min_t(unsigned int, tg
->iops
[rw
][LIMIT_MAX
], adjusted
);
361 #define request_bucket_index(sectors) \
362 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
365 * throtl_log - log debug message via blktrace
366 * @sq: the service_queue being reported
367 * @fmt: printf format string
370 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
371 * throtl_grp; otherwise, just "throtl".
373 #define throtl_log(sq, fmt, args...) do { \
374 struct throtl_grp *__tg = sq_to_tg((sq)); \
375 struct throtl_data *__td = sq_to_td((sq)); \
378 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
381 blk_add_cgroup_trace_msg(__td->queue, \
382 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
384 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
388 static inline unsigned int throtl_bio_data_size(struct bio
*bio
)
390 /* assume it's one sector */
391 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
))
393 return bio
->bi_iter
.bi_size
;
396 static void throtl_qnode_init(struct throtl_qnode
*qn
, struct throtl_grp
*tg
)
398 INIT_LIST_HEAD(&qn
->node
);
399 bio_list_init(&qn
->bios
);
404 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
405 * @bio: bio being added
406 * @qn: qnode to add bio to
407 * @queued: the service_queue->queued[] list @qn belongs to
409 * Add @bio to @qn and put @qn on @queued if it's not already on.
410 * @qn->tg's reference count is bumped when @qn is activated. See the
411 * comment on top of throtl_qnode definition for details.
413 static void throtl_qnode_add_bio(struct bio
*bio
, struct throtl_qnode
*qn
,
414 struct list_head
*queued
)
416 bio_list_add(&qn
->bios
, bio
);
417 if (list_empty(&qn
->node
)) {
418 list_add_tail(&qn
->node
, queued
);
419 blkg_get(tg_to_blkg(qn
->tg
));
424 * throtl_peek_queued - peek the first bio on a qnode list
425 * @queued: the qnode list to peek
427 static struct bio
*throtl_peek_queued(struct list_head
*queued
)
429 struct throtl_qnode
*qn
;
432 if (list_empty(queued
))
435 qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
436 bio
= bio_list_peek(&qn
->bios
);
442 * throtl_pop_queued - pop the first bio form a qnode list
443 * @queued: the qnode list to pop a bio from
444 * @tg_to_put: optional out argument for throtl_grp to put
446 * Pop the first bio from the qnode list @queued. After popping, the first
447 * qnode is removed from @queued if empty or moved to the end of @queued so
448 * that the popping order is round-robin.
450 * When the first qnode is removed, its associated throtl_grp should be put
451 * too. If @tg_to_put is NULL, this function automatically puts it;
452 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
453 * responsible for putting it.
455 static struct bio
*throtl_pop_queued(struct list_head
*queued
,
456 struct throtl_grp
**tg_to_put
)
458 struct throtl_qnode
*qn
;
461 if (list_empty(queued
))
464 qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
465 bio
= bio_list_pop(&qn
->bios
);
468 if (bio_list_empty(&qn
->bios
)) {
469 list_del_init(&qn
->node
);
473 blkg_put(tg_to_blkg(qn
->tg
));
475 list_move_tail(&qn
->node
, queued
);
481 /* init a service_queue, assumes the caller zeroed it */
482 static void throtl_service_queue_init(struct throtl_service_queue
*sq
)
484 INIT_LIST_HEAD(&sq
->queued
[0]);
485 INIT_LIST_HEAD(&sq
->queued
[1]);
486 sq
->pending_tree
= RB_ROOT_CACHED
;
487 timer_setup(&sq
->pending_timer
, throtl_pending_timer_fn
, 0);
490 static struct blkg_policy_data
*throtl_pd_alloc(gfp_t gfp
,
491 struct request_queue
*q
,
494 struct throtl_grp
*tg
;
497 tg
= kzalloc_node(sizeof(*tg
), gfp
, q
->node
);
501 if (blkg_rwstat_init(&tg
->stat_bytes
, gfp
))
504 if (blkg_rwstat_init(&tg
->stat_ios
, gfp
))
505 goto err_exit_stat_bytes
;
507 throtl_service_queue_init(&tg
->service_queue
);
509 for (rw
= READ
; rw
<= WRITE
; rw
++) {
510 throtl_qnode_init(&tg
->qnode_on_self
[rw
], tg
);
511 throtl_qnode_init(&tg
->qnode_on_parent
[rw
], tg
);
514 RB_CLEAR_NODE(&tg
->rb_node
);
515 tg
->bps
[READ
][LIMIT_MAX
] = U64_MAX
;
516 tg
->bps
[WRITE
][LIMIT_MAX
] = U64_MAX
;
517 tg
->iops
[READ
][LIMIT_MAX
] = UINT_MAX
;
518 tg
->iops
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
519 tg
->bps_conf
[READ
][LIMIT_MAX
] = U64_MAX
;
520 tg
->bps_conf
[WRITE
][LIMIT_MAX
] = U64_MAX
;
521 tg
->iops_conf
[READ
][LIMIT_MAX
] = UINT_MAX
;
522 tg
->iops_conf
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
523 /* LIMIT_LOW will have default value 0 */
525 tg
->latency_target
= DFL_LATENCY_TARGET
;
526 tg
->latency_target_conf
= DFL_LATENCY_TARGET
;
527 tg
->idletime_threshold
= DFL_IDLE_THRESHOLD
;
528 tg
->idletime_threshold_conf
= DFL_IDLE_THRESHOLD
;
533 blkg_rwstat_exit(&tg
->stat_bytes
);
539 static void throtl_pd_init(struct blkg_policy_data
*pd
)
541 struct throtl_grp
*tg
= pd_to_tg(pd
);
542 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
543 struct throtl_data
*td
= blkg
->q
->td
;
544 struct throtl_service_queue
*sq
= &tg
->service_queue
;
547 * If on the default hierarchy, we switch to properly hierarchical
548 * behavior where limits on a given throtl_grp are applied to the
549 * whole subtree rather than just the group itself. e.g. If 16M
550 * read_bps limit is set on the root group, the whole system can't
551 * exceed 16M for the device.
553 * If not on the default hierarchy, the broken flat hierarchy
554 * behavior is retained where all throtl_grps are treated as if
555 * they're all separate root groups right below throtl_data.
556 * Limits of a group don't interact with limits of other groups
557 * regardless of the position of the group in the hierarchy.
559 sq
->parent_sq
= &td
->service_queue
;
560 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && blkg
->parent
)
561 sq
->parent_sq
= &blkg_to_tg(blkg
->parent
)->service_queue
;
566 * Set has_rules[] if @tg or any of its parents have limits configured.
567 * This doesn't require walking up to the top of the hierarchy as the
568 * parent's has_rules[] is guaranteed to be correct.
570 static void tg_update_has_rules(struct throtl_grp
*tg
)
572 struct throtl_grp
*parent_tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
573 struct throtl_data
*td
= tg
->td
;
576 for (rw
= READ
; rw
<= WRITE
; rw
++)
577 tg
->has_rules
[rw
] = (parent_tg
&& parent_tg
->has_rules
[rw
]) ||
578 (td
->limit_valid
[td
->limit_index
] &&
579 (tg_bps_limit(tg
, rw
) != U64_MAX
||
580 tg_iops_limit(tg
, rw
) != UINT_MAX
));
583 static void throtl_pd_online(struct blkg_policy_data
*pd
)
585 struct throtl_grp
*tg
= pd_to_tg(pd
);
587 * We don't want new groups to escape the limits of its ancestors.
588 * Update has_rules[] after a new group is brought online.
590 tg_update_has_rules(tg
);
593 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
594 static void blk_throtl_update_limit_valid(struct throtl_data
*td
)
596 struct cgroup_subsys_state
*pos_css
;
597 struct blkcg_gq
*blkg
;
598 bool low_valid
= false;
601 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
602 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
604 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->bps
[WRITE
][LIMIT_LOW
] ||
605 tg
->iops
[READ
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
]) {
612 td
->limit_valid
[LIMIT_LOW
] = low_valid
;
615 static inline void blk_throtl_update_limit_valid(struct throtl_data
*td
)
620 static void throtl_upgrade_state(struct throtl_data
*td
);
621 static void throtl_pd_offline(struct blkg_policy_data
*pd
)
623 struct throtl_grp
*tg
= pd_to_tg(pd
);
625 tg
->bps
[READ
][LIMIT_LOW
] = 0;
626 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
627 tg
->iops
[READ
][LIMIT_LOW
] = 0;
628 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
630 blk_throtl_update_limit_valid(tg
->td
);
632 if (!tg
->td
->limit_valid
[tg
->td
->limit_index
])
633 throtl_upgrade_state(tg
->td
);
636 static void throtl_pd_free(struct blkg_policy_data
*pd
)
638 struct throtl_grp
*tg
= pd_to_tg(pd
);
640 del_timer_sync(&tg
->service_queue
.pending_timer
);
641 blkg_rwstat_exit(&tg
->stat_bytes
);
642 blkg_rwstat_exit(&tg
->stat_ios
);
646 static struct throtl_grp
*
647 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
651 n
= rb_first_cached(&parent_sq
->pending_tree
);
655 return rb_entry_tg(n
);
658 static void throtl_rb_erase(struct rb_node
*n
,
659 struct throtl_service_queue
*parent_sq
)
661 rb_erase_cached(n
, &parent_sq
->pending_tree
);
663 --parent_sq
->nr_pending
;
666 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
668 struct throtl_grp
*tg
;
670 tg
= throtl_rb_first(parent_sq
);
674 parent_sq
->first_pending_disptime
= tg
->disptime
;
677 static void tg_service_queue_add(struct throtl_grp
*tg
)
679 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
680 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_root
.rb_node
;
681 struct rb_node
*parent
= NULL
;
682 struct throtl_grp
*__tg
;
683 unsigned long key
= tg
->disptime
;
684 bool leftmost
= true;
686 while (*node
!= NULL
) {
688 __tg
= rb_entry_tg(parent
);
690 if (time_before(key
, __tg
->disptime
))
691 node
= &parent
->rb_left
;
693 node
= &parent
->rb_right
;
698 rb_link_node(&tg
->rb_node
, parent
, node
);
699 rb_insert_color_cached(&tg
->rb_node
, &parent_sq
->pending_tree
,
703 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
705 if (!(tg
->flags
& THROTL_TG_PENDING
)) {
706 tg_service_queue_add(tg
);
707 tg
->flags
|= THROTL_TG_PENDING
;
708 tg
->service_queue
.parent_sq
->nr_pending
++;
712 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
714 if (tg
->flags
& THROTL_TG_PENDING
) {
715 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
716 tg
->flags
&= ~THROTL_TG_PENDING
;
720 /* Call with queue lock held */
721 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
722 unsigned long expires
)
724 unsigned long max_expire
= jiffies
+ 8 * sq_to_td(sq
)->throtl_slice
;
727 * Since we are adjusting the throttle limit dynamically, the sleep
728 * time calculated according to previous limit might be invalid. It's
729 * possible the cgroup sleep time is very long and no other cgroups
730 * have IO running so notify the limit changes. Make sure the cgroup
731 * doesn't sleep too long to avoid the missed notification.
733 if (time_after(expires
, max_expire
))
734 expires
= max_expire
;
735 mod_timer(&sq
->pending_timer
, expires
);
736 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
737 expires
- jiffies
, jiffies
);
741 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
742 * @sq: the service_queue to schedule dispatch for
743 * @force: force scheduling
745 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
746 * dispatch time of the first pending child. Returns %true if either timer
747 * is armed or there's no pending child left. %false if the current
748 * dispatch window is still open and the caller should continue
751 * If @force is %true, the dispatch timer is always scheduled and this
752 * function is guaranteed to return %true. This is to be used when the
753 * caller can't dispatch itself and needs to invoke pending_timer
754 * unconditionally. Note that forced scheduling is likely to induce short
755 * delay before dispatch starts even if @sq->first_pending_disptime is not
756 * in the future and thus shouldn't be used in hot paths.
758 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
761 /* any pending children left? */
765 update_min_dispatch_time(sq
);
767 /* is the next dispatch time in the future? */
768 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
769 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
773 /* tell the caller to continue dispatching */
777 static inline void throtl_start_new_slice_with_credit(struct throtl_grp
*tg
,
778 bool rw
, unsigned long start
)
780 tg
->bytes_disp
[rw
] = 0;
783 atomic_set(&tg
->io_split_cnt
[rw
], 0);
786 * Previous slice has expired. We must have trimmed it after last
787 * bio dispatch. That means since start of last slice, we never used
788 * that bandwidth. Do try to make use of that bandwidth while giving
791 if (time_after_eq(start
, tg
->slice_start
[rw
]))
792 tg
->slice_start
[rw
] = start
;
794 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
795 throtl_log(&tg
->service_queue
,
796 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
797 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
798 tg
->slice_end
[rw
], jiffies
);
801 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
803 tg
->bytes_disp
[rw
] = 0;
805 tg
->slice_start
[rw
] = jiffies
;
806 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
808 atomic_set(&tg
->io_split_cnt
[rw
], 0);
810 throtl_log(&tg
->service_queue
,
811 "[%c] new slice start=%lu end=%lu jiffies=%lu",
812 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
813 tg
->slice_end
[rw
], jiffies
);
816 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
817 unsigned long jiffy_end
)
819 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
822 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
823 unsigned long jiffy_end
)
825 throtl_set_slice_end(tg
, rw
, jiffy_end
);
826 throtl_log(&tg
->service_queue
,
827 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
828 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
829 tg
->slice_end
[rw
], jiffies
);
832 /* Determine if previously allocated or extended slice is complete or not */
833 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
835 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
841 /* Trim the used slices and adjust slice start accordingly */
842 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
844 unsigned long nr_slices
, time_elapsed
, io_trim
;
847 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
850 * If bps are unlimited (-1), then time slice don't get
851 * renewed. Don't try to trim the slice if slice is used. A new
852 * slice will start when appropriate.
854 if (throtl_slice_used(tg
, rw
))
858 * A bio has been dispatched. Also adjust slice_end. It might happen
859 * that initially cgroup limit was very low resulting in high
860 * slice_end, but later limit was bumped up and bio was dispatched
861 * sooner, then we need to reduce slice_end. A high bogus slice_end
862 * is bad because it does not allow new slice to start.
865 throtl_set_slice_end(tg
, rw
, jiffies
+ tg
->td
->throtl_slice
);
867 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
869 nr_slices
= time_elapsed
/ tg
->td
->throtl_slice
;
873 tmp
= tg_bps_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
;
877 io_trim
= (tg_iops_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
) /
880 if (!bytes_trim
&& !io_trim
)
883 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
884 tg
->bytes_disp
[rw
] -= bytes_trim
;
886 tg
->bytes_disp
[rw
] = 0;
888 if (tg
->io_disp
[rw
] >= io_trim
)
889 tg
->io_disp
[rw
] -= io_trim
;
893 tg
->slice_start
[rw
] += nr_slices
* tg
->td
->throtl_slice
;
895 throtl_log(&tg
->service_queue
,
896 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
897 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
898 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
901 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
902 u32 iops_limit
, unsigned long *wait
)
904 bool rw
= bio_data_dir(bio
);
905 unsigned int io_allowed
;
906 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
909 if (iops_limit
== UINT_MAX
) {
915 jiffy_elapsed
= jiffies
- tg
->slice_start
[rw
];
917 /* Round up to the next throttle slice, wait time must be nonzero */
918 jiffy_elapsed_rnd
= roundup(jiffy_elapsed
+ 1, tg
->td
->throtl_slice
);
921 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
922 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
923 * will allow dispatch after 1 second and after that slice should
927 tmp
= (u64
)iops_limit
* jiffy_elapsed_rnd
;
931 io_allowed
= UINT_MAX
;
935 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
941 /* Calc approx time to dispatch */
942 jiffy_wait
= jiffy_elapsed_rnd
- jiffy_elapsed
;
949 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
950 u64 bps_limit
, unsigned long *wait
)
952 bool rw
= bio_data_dir(bio
);
953 u64 bytes_allowed
, extra_bytes
, tmp
;
954 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
955 unsigned int bio_size
= throtl_bio_data_size(bio
);
957 if (bps_limit
== U64_MAX
) {
963 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
965 /* Slice has just started. Consider one slice interval */
967 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
969 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
971 tmp
= bps_limit
* jiffy_elapsed_rnd
;
975 if (tg
->bytes_disp
[rw
] + bio_size
<= bytes_allowed
) {
981 /* Calc approx time to dispatch */
982 extra_bytes
= tg
->bytes_disp
[rw
] + bio_size
- bytes_allowed
;
983 jiffy_wait
= div64_u64(extra_bytes
* HZ
, bps_limit
);
989 * This wait time is without taking into consideration the rounding
990 * up we did. Add that time also.
992 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
999 * Returns whether one can dispatch a bio or not. Also returns approx number
1000 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
1002 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
1003 unsigned long *wait
)
1005 bool rw
= bio_data_dir(bio
);
1006 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
1007 u64 bps_limit
= tg_bps_limit(tg
, rw
);
1008 u32 iops_limit
= tg_iops_limit(tg
, rw
);
1011 * Currently whole state machine of group depends on first bio
1012 * queued in the group bio list. So one should not be calling
1013 * this function with a different bio if there are other bios
1016 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
1017 bio
!= throtl_peek_queued(&tg
->service_queue
.queued
[rw
]));
1019 /* If tg->bps = -1, then BW is unlimited */
1020 if (bps_limit
== U64_MAX
&& iops_limit
== UINT_MAX
) {
1027 * If previous slice expired, start a new one otherwise renew/extend
1028 * existing slice to make sure it is at least throtl_slice interval
1029 * long since now. New slice is started only for empty throttle group.
1030 * If there is queued bio, that means there should be an active
1031 * slice and it should be extended instead.
1033 if (throtl_slice_used(tg
, rw
) && !(tg
->service_queue
.nr_queued
[rw
]))
1034 throtl_start_new_slice(tg
, rw
);
1036 if (time_before(tg
->slice_end
[rw
],
1037 jiffies
+ tg
->td
->throtl_slice
))
1038 throtl_extend_slice(tg
, rw
,
1039 jiffies
+ tg
->td
->throtl_slice
);
1042 if (iops_limit
!= UINT_MAX
)
1043 tg
->io_disp
[rw
] += atomic_xchg(&tg
->io_split_cnt
[rw
], 0);
1045 if (tg_with_in_bps_limit(tg
, bio
, bps_limit
, &bps_wait
) &&
1046 tg_with_in_iops_limit(tg
, bio
, iops_limit
, &iops_wait
)) {
1052 max_wait
= max(bps_wait
, iops_wait
);
1057 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
1058 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
1063 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
1065 bool rw
= bio_data_dir(bio
);
1066 unsigned int bio_size
= throtl_bio_data_size(bio
);
1068 /* Charge the bio to the group */
1069 tg
->bytes_disp
[rw
] += bio_size
;
1071 tg
->last_bytes_disp
[rw
] += bio_size
;
1072 tg
->last_io_disp
[rw
]++;
1075 * BIO_THROTTLED is used to prevent the same bio to be throttled
1076 * more than once as a throttled bio will go through blk-throtl the
1077 * second time when it eventually gets issued. Set it when a bio
1078 * is being charged to a tg.
1080 if (!bio_flagged(bio
, BIO_THROTTLED
))
1081 bio_set_flag(bio
, BIO_THROTTLED
);
1085 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1088 * @tg: the target throtl_grp
1090 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1091 * tg->qnode_on_self[] is used.
1093 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_qnode
*qn
,
1094 struct throtl_grp
*tg
)
1096 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1097 bool rw
= bio_data_dir(bio
);
1100 qn
= &tg
->qnode_on_self
[rw
];
1103 * If @tg doesn't currently have any bios queued in the same
1104 * direction, queueing @bio can change when @tg should be
1105 * dispatched. Mark that @tg was empty. This is automatically
1106 * cleared on the next tg_update_disptime().
1108 if (!sq
->nr_queued
[rw
])
1109 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
1111 throtl_qnode_add_bio(bio
, qn
, &sq
->queued
[rw
]);
1113 sq
->nr_queued
[rw
]++;
1114 throtl_enqueue_tg(tg
);
1117 static void tg_update_disptime(struct throtl_grp
*tg
)
1119 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1120 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
1123 bio
= throtl_peek_queued(&sq
->queued
[READ
]);
1125 tg_may_dispatch(tg
, bio
, &read_wait
);
1127 bio
= throtl_peek_queued(&sq
->queued
[WRITE
]);
1129 tg_may_dispatch(tg
, bio
, &write_wait
);
1131 min_wait
= min(read_wait
, write_wait
);
1132 disptime
= jiffies
+ min_wait
;
1134 /* Update dispatch time */
1135 throtl_dequeue_tg(tg
);
1136 tg
->disptime
= disptime
;
1137 throtl_enqueue_tg(tg
);
1139 /* see throtl_add_bio_tg() */
1140 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
1143 static void start_parent_slice_with_credit(struct throtl_grp
*child_tg
,
1144 struct throtl_grp
*parent_tg
, bool rw
)
1146 if (throtl_slice_used(parent_tg
, rw
)) {
1147 throtl_start_new_slice_with_credit(parent_tg
, rw
,
1148 child_tg
->slice_start
[rw
]);
1153 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
1155 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1156 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
1157 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
1158 struct throtl_grp
*tg_to_put
= NULL
;
1162 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1163 * from @tg may put its reference and @parent_sq might end up
1164 * getting released prematurely. Remember the tg to put and put it
1165 * after @bio is transferred to @parent_sq.
1167 bio
= throtl_pop_queued(&sq
->queued
[rw
], &tg_to_put
);
1168 sq
->nr_queued
[rw
]--;
1170 throtl_charge_bio(tg
, bio
);
1173 * If our parent is another tg, we just need to transfer @bio to
1174 * the parent using throtl_add_bio_tg(). If our parent is
1175 * @td->service_queue, @bio is ready to be issued. Put it on its
1176 * bio_lists[] and decrease total number queued. The caller is
1177 * responsible for issuing these bios.
1180 throtl_add_bio_tg(bio
, &tg
->qnode_on_parent
[rw
], parent_tg
);
1181 start_parent_slice_with_credit(tg
, parent_tg
, rw
);
1183 throtl_qnode_add_bio(bio
, &tg
->qnode_on_parent
[rw
],
1184 &parent_sq
->queued
[rw
]);
1185 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
1186 tg
->td
->nr_queued
[rw
]--;
1189 throtl_trim_slice(tg
, rw
);
1192 blkg_put(tg_to_blkg(tg_to_put
));
1195 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
1197 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1198 unsigned int nr_reads
= 0, nr_writes
= 0;
1199 unsigned int max_nr_reads
= THROTL_GRP_QUANTUM
* 3 / 4;
1200 unsigned int max_nr_writes
= THROTL_GRP_QUANTUM
- max_nr_reads
;
1203 /* Try to dispatch 75% READS and 25% WRITES */
1205 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])) &&
1206 tg_may_dispatch(tg
, bio
, NULL
)) {
1208 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1211 if (nr_reads
>= max_nr_reads
)
1215 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])) &&
1216 tg_may_dispatch(tg
, bio
, NULL
)) {
1218 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1221 if (nr_writes
>= max_nr_writes
)
1225 return nr_reads
+ nr_writes
;
1228 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
1230 unsigned int nr_disp
= 0;
1233 struct throtl_grp
*tg
;
1234 struct throtl_service_queue
*sq
;
1236 if (!parent_sq
->nr_pending
)
1239 tg
= throtl_rb_first(parent_sq
);
1243 if (time_before(jiffies
, tg
->disptime
))
1246 throtl_dequeue_tg(tg
);
1248 nr_disp
+= throtl_dispatch_tg(tg
);
1250 sq
= &tg
->service_queue
;
1251 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
1252 tg_update_disptime(tg
);
1254 if (nr_disp
>= THROTL_QUANTUM
)
1261 static bool throtl_can_upgrade(struct throtl_data
*td
,
1262 struct throtl_grp
*this_tg
);
1264 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1265 * @t: the pending_timer member of the throtl_service_queue being serviced
1267 * This timer is armed when a child throtl_grp with active bio's become
1268 * pending and queued on the service_queue's pending_tree and expires when
1269 * the first child throtl_grp should be dispatched. This function
1270 * dispatches bio's from the children throtl_grps to the parent
1273 * If the parent's parent is another throtl_grp, dispatching is propagated
1274 * by either arming its pending_timer or repeating dispatch directly. If
1275 * the top-level service_tree is reached, throtl_data->dispatch_work is
1276 * kicked so that the ready bio's are issued.
1278 static void throtl_pending_timer_fn(struct timer_list
*t
)
1280 struct throtl_service_queue
*sq
= from_timer(sq
, t
, pending_timer
);
1281 struct throtl_grp
*tg
= sq_to_tg(sq
);
1282 struct throtl_data
*td
= sq_to_td(sq
);
1283 struct request_queue
*q
= td
->queue
;
1284 struct throtl_service_queue
*parent_sq
;
1288 spin_lock_irq(&q
->queue_lock
);
1289 if (throtl_can_upgrade(td
, NULL
))
1290 throtl_upgrade_state(td
);
1293 parent_sq
= sq
->parent_sq
;
1297 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
1298 sq
->nr_queued
[READ
] + sq
->nr_queued
[WRITE
],
1299 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1301 ret
= throtl_select_dispatch(sq
);
1303 throtl_log(sq
, "bios disp=%u", ret
);
1307 if (throtl_schedule_next_dispatch(sq
, false))
1310 /* this dispatch windows is still open, relax and repeat */
1311 spin_unlock_irq(&q
->queue_lock
);
1313 spin_lock_irq(&q
->queue_lock
);
1320 /* @parent_sq is another throl_grp, propagate dispatch */
1321 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1322 tg_update_disptime(tg
);
1323 if (!throtl_schedule_next_dispatch(parent_sq
, false)) {
1324 /* window is already open, repeat dispatching */
1331 /* reached the top-level, queue issuing */
1332 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1335 spin_unlock_irq(&q
->queue_lock
);
1339 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1340 * @work: work item being executed
1342 * This function is queued for execution when bios reach the bio_lists[]
1343 * of throtl_data->service_queue. Those bios are ready and issued by this
1346 static void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1348 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1350 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1351 struct request_queue
*q
= td
->queue
;
1352 struct bio_list bio_list_on_stack
;
1354 struct blk_plug plug
;
1357 bio_list_init(&bio_list_on_stack
);
1359 spin_lock_irq(&q
->queue_lock
);
1360 for (rw
= READ
; rw
<= WRITE
; rw
++)
1361 while ((bio
= throtl_pop_queued(&td_sq
->queued
[rw
], NULL
)))
1362 bio_list_add(&bio_list_on_stack
, bio
);
1363 spin_unlock_irq(&q
->queue_lock
);
1365 if (!bio_list_empty(&bio_list_on_stack
)) {
1366 blk_start_plug(&plug
);
1367 while ((bio
= bio_list_pop(&bio_list_on_stack
)))
1368 submit_bio_noacct(bio
);
1369 blk_finish_plug(&plug
);
1373 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1376 struct throtl_grp
*tg
= pd_to_tg(pd
);
1377 u64 v
= *(u64
*)((void *)tg
+ off
);
1381 return __blkg_prfill_u64(sf
, pd
, v
);
1384 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1387 struct throtl_grp
*tg
= pd_to_tg(pd
);
1388 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1392 return __blkg_prfill_u64(sf
, pd
, v
);
1395 static int tg_print_conf_u64(struct seq_file
*sf
, void *v
)
1397 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_u64
,
1398 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1402 static int tg_print_conf_uint(struct seq_file
*sf
, void *v
)
1404 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_uint
,
1405 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1409 static void tg_conf_updated(struct throtl_grp
*tg
, bool global
)
1411 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1412 struct cgroup_subsys_state
*pos_css
;
1413 struct blkcg_gq
*blkg
;
1415 throtl_log(&tg
->service_queue
,
1416 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1417 tg_bps_limit(tg
, READ
), tg_bps_limit(tg
, WRITE
),
1418 tg_iops_limit(tg
, READ
), tg_iops_limit(tg
, WRITE
));
1421 * Update has_rules[] flags for the updated tg's subtree. A tg is
1422 * considered to have rules if either the tg itself or any of its
1423 * ancestors has rules. This identifies groups without any
1424 * restrictions in the whole hierarchy and allows them to bypass
1427 blkg_for_each_descendant_pre(blkg
, pos_css
,
1428 global
? tg
->td
->queue
->root_blkg
: tg_to_blkg(tg
)) {
1429 struct throtl_grp
*this_tg
= blkg_to_tg(blkg
);
1430 struct throtl_grp
*parent_tg
;
1432 tg_update_has_rules(this_tg
);
1433 /* ignore root/second level */
1434 if (!cgroup_subsys_on_dfl(io_cgrp_subsys
) || !blkg
->parent
||
1435 !blkg
->parent
->parent
)
1437 parent_tg
= blkg_to_tg(blkg
->parent
);
1439 * make sure all children has lower idle time threshold and
1440 * higher latency target
1442 this_tg
->idletime_threshold
= min(this_tg
->idletime_threshold
,
1443 parent_tg
->idletime_threshold
);
1444 this_tg
->latency_target
= max(this_tg
->latency_target
,
1445 parent_tg
->latency_target
);
1449 * We're already holding queue_lock and know @tg is valid. Let's
1450 * apply the new config directly.
1452 * Restart the slices for both READ and WRITES. It might happen
1453 * that a group's limit are dropped suddenly and we don't want to
1454 * account recently dispatched IO with new low rate.
1456 throtl_start_new_slice(tg
, READ
);
1457 throtl_start_new_slice(tg
, WRITE
);
1459 if (tg
->flags
& THROTL_TG_PENDING
) {
1460 tg_update_disptime(tg
);
1461 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1465 static ssize_t
tg_set_conf(struct kernfs_open_file
*of
,
1466 char *buf
, size_t nbytes
, loff_t off
, bool is_u64
)
1468 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1469 struct blkg_conf_ctx ctx
;
1470 struct throtl_grp
*tg
;
1474 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1479 if (sscanf(ctx
.body
, "%llu", &v
) != 1)
1484 tg
= blkg_to_tg(ctx
.blkg
);
1487 *(u64
*)((void *)tg
+ of_cft(of
)->private) = v
;
1489 *(unsigned int *)((void *)tg
+ of_cft(of
)->private) = v
;
1491 tg_conf_updated(tg
, false);
1494 blkg_conf_finish(&ctx
);
1495 return ret
?: nbytes
;
1498 static ssize_t
tg_set_conf_u64(struct kernfs_open_file
*of
,
1499 char *buf
, size_t nbytes
, loff_t off
)
1501 return tg_set_conf(of
, buf
, nbytes
, off
, true);
1504 static ssize_t
tg_set_conf_uint(struct kernfs_open_file
*of
,
1505 char *buf
, size_t nbytes
, loff_t off
)
1507 return tg_set_conf(of
, buf
, nbytes
, off
, false);
1510 static int tg_print_rwstat(struct seq_file
*sf
, void *v
)
1512 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
1513 blkg_prfill_rwstat
, &blkcg_policy_throtl
,
1514 seq_cft(sf
)->private, true);
1518 static u64
tg_prfill_rwstat_recursive(struct seq_file
*sf
,
1519 struct blkg_policy_data
*pd
, int off
)
1521 struct blkg_rwstat_sample sum
;
1523 blkg_rwstat_recursive_sum(pd_to_blkg(pd
), &blkcg_policy_throtl
, off
,
1525 return __blkg_prfill_rwstat(sf
, pd
, &sum
);
1528 static int tg_print_rwstat_recursive(struct seq_file
*sf
, void *v
)
1530 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
1531 tg_prfill_rwstat_recursive
, &blkcg_policy_throtl
,
1532 seq_cft(sf
)->private, true);
1536 static struct cftype throtl_legacy_files
[] = {
1538 .name
= "throttle.read_bps_device",
1539 .private = offsetof(struct throtl_grp
, bps
[READ
][LIMIT_MAX
]),
1540 .seq_show
= tg_print_conf_u64
,
1541 .write
= tg_set_conf_u64
,
1544 .name
= "throttle.write_bps_device",
1545 .private = offsetof(struct throtl_grp
, bps
[WRITE
][LIMIT_MAX
]),
1546 .seq_show
= tg_print_conf_u64
,
1547 .write
= tg_set_conf_u64
,
1550 .name
= "throttle.read_iops_device",
1551 .private = offsetof(struct throtl_grp
, iops
[READ
][LIMIT_MAX
]),
1552 .seq_show
= tg_print_conf_uint
,
1553 .write
= tg_set_conf_uint
,
1556 .name
= "throttle.write_iops_device",
1557 .private = offsetof(struct throtl_grp
, iops
[WRITE
][LIMIT_MAX
]),
1558 .seq_show
= tg_print_conf_uint
,
1559 .write
= tg_set_conf_uint
,
1562 .name
= "throttle.io_service_bytes",
1563 .private = offsetof(struct throtl_grp
, stat_bytes
),
1564 .seq_show
= tg_print_rwstat
,
1567 .name
= "throttle.io_service_bytes_recursive",
1568 .private = offsetof(struct throtl_grp
, stat_bytes
),
1569 .seq_show
= tg_print_rwstat_recursive
,
1572 .name
= "throttle.io_serviced",
1573 .private = offsetof(struct throtl_grp
, stat_ios
),
1574 .seq_show
= tg_print_rwstat
,
1577 .name
= "throttle.io_serviced_recursive",
1578 .private = offsetof(struct throtl_grp
, stat_ios
),
1579 .seq_show
= tg_print_rwstat_recursive
,
1584 static u64
tg_prfill_limit(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1587 struct throtl_grp
*tg
= pd_to_tg(pd
);
1588 const char *dname
= blkg_dev_name(pd
->blkg
);
1589 char bufs
[4][21] = { "max", "max", "max", "max" };
1591 unsigned int iops_dft
;
1592 char idle_time
[26] = "";
1593 char latency_time
[26] = "";
1598 if (off
== LIMIT_LOW
) {
1603 iops_dft
= UINT_MAX
;
1606 if (tg
->bps_conf
[READ
][off
] == bps_dft
&&
1607 tg
->bps_conf
[WRITE
][off
] == bps_dft
&&
1608 tg
->iops_conf
[READ
][off
] == iops_dft
&&
1609 tg
->iops_conf
[WRITE
][off
] == iops_dft
&&
1610 (off
!= LIMIT_LOW
||
1611 (tg
->idletime_threshold_conf
== DFL_IDLE_THRESHOLD
&&
1612 tg
->latency_target_conf
== DFL_LATENCY_TARGET
)))
1615 if (tg
->bps_conf
[READ
][off
] != U64_MAX
)
1616 snprintf(bufs
[0], sizeof(bufs
[0]), "%llu",
1617 tg
->bps_conf
[READ
][off
]);
1618 if (tg
->bps_conf
[WRITE
][off
] != U64_MAX
)
1619 snprintf(bufs
[1], sizeof(bufs
[1]), "%llu",
1620 tg
->bps_conf
[WRITE
][off
]);
1621 if (tg
->iops_conf
[READ
][off
] != UINT_MAX
)
1622 snprintf(bufs
[2], sizeof(bufs
[2]), "%u",
1623 tg
->iops_conf
[READ
][off
]);
1624 if (tg
->iops_conf
[WRITE
][off
] != UINT_MAX
)
1625 snprintf(bufs
[3], sizeof(bufs
[3]), "%u",
1626 tg
->iops_conf
[WRITE
][off
]);
1627 if (off
== LIMIT_LOW
) {
1628 if (tg
->idletime_threshold_conf
== ULONG_MAX
)
1629 strcpy(idle_time
, " idle=max");
1631 snprintf(idle_time
, sizeof(idle_time
), " idle=%lu",
1632 tg
->idletime_threshold_conf
);
1634 if (tg
->latency_target_conf
== ULONG_MAX
)
1635 strcpy(latency_time
, " latency=max");
1637 snprintf(latency_time
, sizeof(latency_time
),
1638 " latency=%lu", tg
->latency_target_conf
);
1641 seq_printf(sf
, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1642 dname
, bufs
[0], bufs
[1], bufs
[2], bufs
[3], idle_time
,
1647 static int tg_print_limit(struct seq_file
*sf
, void *v
)
1649 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_limit
,
1650 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1654 static ssize_t
tg_set_limit(struct kernfs_open_file
*of
,
1655 char *buf
, size_t nbytes
, loff_t off
)
1657 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1658 struct blkg_conf_ctx ctx
;
1659 struct throtl_grp
*tg
;
1661 unsigned long idle_time
;
1662 unsigned long latency_time
;
1664 int index
= of_cft(of
)->private;
1666 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1670 tg
= blkg_to_tg(ctx
.blkg
);
1672 v
[0] = tg
->bps_conf
[READ
][index
];
1673 v
[1] = tg
->bps_conf
[WRITE
][index
];
1674 v
[2] = tg
->iops_conf
[READ
][index
];
1675 v
[3] = tg
->iops_conf
[WRITE
][index
];
1677 idle_time
= tg
->idletime_threshold_conf
;
1678 latency_time
= tg
->latency_target_conf
;
1680 char tok
[27]; /* wiops=18446744073709551616 */
1685 if (sscanf(ctx
.body
, "%26s%n", tok
, &len
) != 1)
1694 if (!p
|| (sscanf(p
, "%llu", &val
) != 1 && strcmp(p
, "max")))
1702 if (!strcmp(tok
, "rbps") && val
> 1)
1704 else if (!strcmp(tok
, "wbps") && val
> 1)
1706 else if (!strcmp(tok
, "riops") && val
> 1)
1707 v
[2] = min_t(u64
, val
, UINT_MAX
);
1708 else if (!strcmp(tok
, "wiops") && val
> 1)
1709 v
[3] = min_t(u64
, val
, UINT_MAX
);
1710 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "idle"))
1712 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "latency"))
1718 tg
->bps_conf
[READ
][index
] = v
[0];
1719 tg
->bps_conf
[WRITE
][index
] = v
[1];
1720 tg
->iops_conf
[READ
][index
] = v
[2];
1721 tg
->iops_conf
[WRITE
][index
] = v
[3];
1723 if (index
== LIMIT_MAX
) {
1724 tg
->bps
[READ
][index
] = v
[0];
1725 tg
->bps
[WRITE
][index
] = v
[1];
1726 tg
->iops
[READ
][index
] = v
[2];
1727 tg
->iops
[WRITE
][index
] = v
[3];
1729 tg
->bps
[READ
][LIMIT_LOW
] = min(tg
->bps_conf
[READ
][LIMIT_LOW
],
1730 tg
->bps_conf
[READ
][LIMIT_MAX
]);
1731 tg
->bps
[WRITE
][LIMIT_LOW
] = min(tg
->bps_conf
[WRITE
][LIMIT_LOW
],
1732 tg
->bps_conf
[WRITE
][LIMIT_MAX
]);
1733 tg
->iops
[READ
][LIMIT_LOW
] = min(tg
->iops_conf
[READ
][LIMIT_LOW
],
1734 tg
->iops_conf
[READ
][LIMIT_MAX
]);
1735 tg
->iops
[WRITE
][LIMIT_LOW
] = min(tg
->iops_conf
[WRITE
][LIMIT_LOW
],
1736 tg
->iops_conf
[WRITE
][LIMIT_MAX
]);
1737 tg
->idletime_threshold_conf
= idle_time
;
1738 tg
->latency_target_conf
= latency_time
;
1740 /* force user to configure all settings for low limit */
1741 if (!(tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
] ||
1742 tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
]) ||
1743 tg
->idletime_threshold_conf
== DFL_IDLE_THRESHOLD
||
1744 tg
->latency_target_conf
== DFL_LATENCY_TARGET
) {
1745 tg
->bps
[READ
][LIMIT_LOW
] = 0;
1746 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
1747 tg
->iops
[READ
][LIMIT_LOW
] = 0;
1748 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
1749 tg
->idletime_threshold
= DFL_IDLE_THRESHOLD
;
1750 tg
->latency_target
= DFL_LATENCY_TARGET
;
1751 } else if (index
== LIMIT_LOW
) {
1752 tg
->idletime_threshold
= tg
->idletime_threshold_conf
;
1753 tg
->latency_target
= tg
->latency_target_conf
;
1756 blk_throtl_update_limit_valid(tg
->td
);
1757 if (tg
->td
->limit_valid
[LIMIT_LOW
]) {
1758 if (index
== LIMIT_LOW
)
1759 tg
->td
->limit_index
= LIMIT_LOW
;
1761 tg
->td
->limit_index
= LIMIT_MAX
;
1762 tg_conf_updated(tg
, index
== LIMIT_LOW
&&
1763 tg
->td
->limit_valid
[LIMIT_LOW
]);
1766 blkg_conf_finish(&ctx
);
1767 return ret
?: nbytes
;
1770 static struct cftype throtl_files
[] = {
1771 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1774 .flags
= CFTYPE_NOT_ON_ROOT
,
1775 .seq_show
= tg_print_limit
,
1776 .write
= tg_set_limit
,
1777 .private = LIMIT_LOW
,
1782 .flags
= CFTYPE_NOT_ON_ROOT
,
1783 .seq_show
= tg_print_limit
,
1784 .write
= tg_set_limit
,
1785 .private = LIMIT_MAX
,
1790 static void throtl_shutdown_wq(struct request_queue
*q
)
1792 struct throtl_data
*td
= q
->td
;
1794 cancel_work_sync(&td
->dispatch_work
);
1797 static struct blkcg_policy blkcg_policy_throtl
= {
1798 .dfl_cftypes
= throtl_files
,
1799 .legacy_cftypes
= throtl_legacy_files
,
1801 .pd_alloc_fn
= throtl_pd_alloc
,
1802 .pd_init_fn
= throtl_pd_init
,
1803 .pd_online_fn
= throtl_pd_online
,
1804 .pd_offline_fn
= throtl_pd_offline
,
1805 .pd_free_fn
= throtl_pd_free
,
1808 static unsigned long __tg_last_low_overflow_time(struct throtl_grp
*tg
)
1810 unsigned long rtime
= jiffies
, wtime
= jiffies
;
1812 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
])
1813 rtime
= tg
->last_low_overflow_time
[READ
];
1814 if (tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
1815 wtime
= tg
->last_low_overflow_time
[WRITE
];
1816 return min(rtime
, wtime
);
1819 /* tg should not be an intermediate node */
1820 static unsigned long tg_last_low_overflow_time(struct throtl_grp
*tg
)
1822 struct throtl_service_queue
*parent_sq
;
1823 struct throtl_grp
*parent
= tg
;
1824 unsigned long ret
= __tg_last_low_overflow_time(tg
);
1827 parent_sq
= parent
->service_queue
.parent_sq
;
1828 parent
= sq_to_tg(parent_sq
);
1833 * The parent doesn't have low limit, it always reaches low
1834 * limit. Its overflow time is useless for children
1836 if (!parent
->bps
[READ
][LIMIT_LOW
] &&
1837 !parent
->iops
[READ
][LIMIT_LOW
] &&
1838 !parent
->bps
[WRITE
][LIMIT_LOW
] &&
1839 !parent
->iops
[WRITE
][LIMIT_LOW
])
1841 if (time_after(__tg_last_low_overflow_time(parent
), ret
))
1842 ret
= __tg_last_low_overflow_time(parent
);
1847 static bool throtl_tg_is_idle(struct throtl_grp
*tg
)
1850 * cgroup is idle if:
1851 * - single idle is too long, longer than a fixed value (in case user
1852 * configure a too big threshold) or 4 times of idletime threshold
1853 * - average think time is more than threshold
1854 * - IO latency is largely below threshold
1859 time
= min_t(unsigned long, MAX_IDLE_TIME
, 4 * tg
->idletime_threshold
);
1860 ret
= tg
->latency_target
== DFL_LATENCY_TARGET
||
1861 tg
->idletime_threshold
== DFL_IDLE_THRESHOLD
||
1862 (ktime_get_ns() >> 10) - tg
->last_finish_time
> time
||
1863 tg
->avg_idletime
> tg
->idletime_threshold
||
1864 (tg
->latency_target
&& tg
->bio_cnt
&&
1865 tg
->bad_bio_cnt
* 5 < tg
->bio_cnt
);
1866 throtl_log(&tg
->service_queue
,
1867 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1868 tg
->avg_idletime
, tg
->idletime_threshold
, tg
->bad_bio_cnt
,
1869 tg
->bio_cnt
, ret
, tg
->td
->scale
);
1873 static bool throtl_tg_can_upgrade(struct throtl_grp
*tg
)
1875 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1876 bool read_limit
, write_limit
;
1879 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1880 * reaches), it's ok to upgrade to next limit
1882 read_limit
= tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
];
1883 write_limit
= tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
];
1884 if (!read_limit
&& !write_limit
)
1886 if (read_limit
&& sq
->nr_queued
[READ
] &&
1887 (!write_limit
|| sq
->nr_queued
[WRITE
]))
1889 if (write_limit
&& sq
->nr_queued
[WRITE
] &&
1890 (!read_limit
|| sq
->nr_queued
[READ
]))
1893 if (time_after_eq(jiffies
,
1894 tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
) &&
1895 throtl_tg_is_idle(tg
))
1900 static bool throtl_hierarchy_can_upgrade(struct throtl_grp
*tg
)
1903 if (throtl_tg_can_upgrade(tg
))
1905 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1906 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1912 static bool throtl_can_upgrade(struct throtl_data
*td
,
1913 struct throtl_grp
*this_tg
)
1915 struct cgroup_subsys_state
*pos_css
;
1916 struct blkcg_gq
*blkg
;
1918 if (td
->limit_index
!= LIMIT_LOW
)
1921 if (time_before(jiffies
, td
->low_downgrade_time
+ td
->throtl_slice
))
1925 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1926 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1930 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1932 if (!throtl_hierarchy_can_upgrade(tg
)) {
1941 static void throtl_upgrade_check(struct throtl_grp
*tg
)
1943 unsigned long now
= jiffies
;
1945 if (tg
->td
->limit_index
!= LIMIT_LOW
)
1948 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
1951 tg
->last_check_time
= now
;
1953 if (!time_after_eq(now
,
1954 __tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
))
1957 if (throtl_can_upgrade(tg
->td
, NULL
))
1958 throtl_upgrade_state(tg
->td
);
1961 static void throtl_upgrade_state(struct throtl_data
*td
)
1963 struct cgroup_subsys_state
*pos_css
;
1964 struct blkcg_gq
*blkg
;
1966 throtl_log(&td
->service_queue
, "upgrade to max");
1967 td
->limit_index
= LIMIT_MAX
;
1968 td
->low_upgrade_time
= jiffies
;
1971 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1972 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1973 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1975 tg
->disptime
= jiffies
- 1;
1976 throtl_select_dispatch(sq
);
1977 throtl_schedule_next_dispatch(sq
, true);
1980 throtl_select_dispatch(&td
->service_queue
);
1981 throtl_schedule_next_dispatch(&td
->service_queue
, true);
1982 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1985 static void throtl_downgrade_state(struct throtl_data
*td
)
1989 throtl_log(&td
->service_queue
, "downgrade, scale %d", td
->scale
);
1991 td
->low_upgrade_time
= jiffies
- td
->scale
* td
->throtl_slice
;
1995 td
->limit_index
= LIMIT_LOW
;
1996 td
->low_downgrade_time
= jiffies
;
1999 static bool throtl_tg_can_downgrade(struct throtl_grp
*tg
)
2001 struct throtl_data
*td
= tg
->td
;
2002 unsigned long now
= jiffies
;
2005 * If cgroup is below low limit, consider downgrade and throttle other
2008 if (time_after_eq(now
, td
->low_upgrade_time
+ td
->throtl_slice
) &&
2009 time_after_eq(now
, tg_last_low_overflow_time(tg
) +
2010 td
->throtl_slice
) &&
2011 (!throtl_tg_is_idle(tg
) ||
2012 !list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
)))
2017 static bool throtl_hierarchy_can_downgrade(struct throtl_grp
*tg
)
2020 if (!throtl_tg_can_downgrade(tg
))
2022 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
2023 if (!tg
|| !tg_to_blkg(tg
)->parent
)
2029 static void throtl_downgrade_check(struct throtl_grp
*tg
)
2033 unsigned long elapsed_time
;
2034 unsigned long now
= jiffies
;
2036 if (tg
->td
->limit_index
!= LIMIT_MAX
||
2037 !tg
->td
->limit_valid
[LIMIT_LOW
])
2039 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
2041 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
2044 elapsed_time
= now
- tg
->last_check_time
;
2045 tg
->last_check_time
= now
;
2047 if (time_before(now
, tg_last_low_overflow_time(tg
) +
2048 tg
->td
->throtl_slice
))
2051 if (tg
->bps
[READ
][LIMIT_LOW
]) {
2052 bps
= tg
->last_bytes_disp
[READ
] * HZ
;
2053 do_div(bps
, elapsed_time
);
2054 if (bps
>= tg
->bps
[READ
][LIMIT_LOW
])
2055 tg
->last_low_overflow_time
[READ
] = now
;
2058 if (tg
->bps
[WRITE
][LIMIT_LOW
]) {
2059 bps
= tg
->last_bytes_disp
[WRITE
] * HZ
;
2060 do_div(bps
, elapsed_time
);
2061 if (bps
>= tg
->bps
[WRITE
][LIMIT_LOW
])
2062 tg
->last_low_overflow_time
[WRITE
] = now
;
2065 if (tg
->iops
[READ
][LIMIT_LOW
]) {
2066 tg
->last_io_disp
[READ
] += atomic_xchg(&tg
->last_io_split_cnt
[READ
], 0);
2067 iops
= tg
->last_io_disp
[READ
] * HZ
/ elapsed_time
;
2068 if (iops
>= tg
->iops
[READ
][LIMIT_LOW
])
2069 tg
->last_low_overflow_time
[READ
] = now
;
2072 if (tg
->iops
[WRITE
][LIMIT_LOW
]) {
2073 tg
->last_io_disp
[WRITE
] += atomic_xchg(&tg
->last_io_split_cnt
[WRITE
], 0);
2074 iops
= tg
->last_io_disp
[WRITE
] * HZ
/ elapsed_time
;
2075 if (iops
>= tg
->iops
[WRITE
][LIMIT_LOW
])
2076 tg
->last_low_overflow_time
[WRITE
] = now
;
2080 * If cgroup is below low limit, consider downgrade and throttle other
2083 if (throtl_hierarchy_can_downgrade(tg
))
2084 throtl_downgrade_state(tg
->td
);
2086 tg
->last_bytes_disp
[READ
] = 0;
2087 tg
->last_bytes_disp
[WRITE
] = 0;
2088 tg
->last_io_disp
[READ
] = 0;
2089 tg
->last_io_disp
[WRITE
] = 0;
2092 static void blk_throtl_update_idletime(struct throtl_grp
*tg
)
2095 unsigned long last_finish_time
= tg
->last_finish_time
;
2097 if (last_finish_time
== 0)
2100 now
= ktime_get_ns() >> 10;
2101 if (now
<= last_finish_time
||
2102 last_finish_time
== tg
->checked_last_finish_time
)
2105 tg
->avg_idletime
= (tg
->avg_idletime
* 7 + now
- last_finish_time
) >> 3;
2106 tg
->checked_last_finish_time
= last_finish_time
;
2109 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2110 static void throtl_update_latency_buckets(struct throtl_data
*td
)
2112 struct avg_latency_bucket avg_latency
[2][LATENCY_BUCKET_SIZE
];
2114 unsigned long last_latency
[2] = { 0 };
2115 unsigned long latency
[2];
2117 if (!blk_queue_nonrot(td
->queue
) || !td
->limit_valid
[LIMIT_LOW
])
2119 if (time_before(jiffies
, td
->last_calculate_time
+ HZ
))
2121 td
->last_calculate_time
= jiffies
;
2123 memset(avg_latency
, 0, sizeof(avg_latency
));
2124 for (rw
= READ
; rw
<= WRITE
; rw
++) {
2125 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2126 struct latency_bucket
*tmp
= &td
->tmp_buckets
[rw
][i
];
2128 for_each_possible_cpu(cpu
) {
2129 struct latency_bucket
*bucket
;
2131 /* this isn't race free, but ok in practice */
2132 bucket
= per_cpu_ptr(td
->latency_buckets
[rw
],
2134 tmp
->total_latency
+= bucket
[i
].total_latency
;
2135 tmp
->samples
+= bucket
[i
].samples
;
2136 bucket
[i
].total_latency
= 0;
2137 bucket
[i
].samples
= 0;
2140 if (tmp
->samples
>= 32) {
2141 int samples
= tmp
->samples
;
2143 latency
[rw
] = tmp
->total_latency
;
2145 tmp
->total_latency
= 0;
2147 latency
[rw
] /= samples
;
2148 if (latency
[rw
] == 0)
2150 avg_latency
[rw
][i
].latency
= latency
[rw
];
2155 for (rw
= READ
; rw
<= WRITE
; rw
++) {
2156 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2157 if (!avg_latency
[rw
][i
].latency
) {
2158 if (td
->avg_buckets
[rw
][i
].latency
< last_latency
[rw
])
2159 td
->avg_buckets
[rw
][i
].latency
=
2164 if (!td
->avg_buckets
[rw
][i
].valid
)
2165 latency
[rw
] = avg_latency
[rw
][i
].latency
;
2167 latency
[rw
] = (td
->avg_buckets
[rw
][i
].latency
* 7 +
2168 avg_latency
[rw
][i
].latency
) >> 3;
2170 td
->avg_buckets
[rw
][i
].latency
= max(latency
[rw
],
2172 td
->avg_buckets
[rw
][i
].valid
= true;
2173 last_latency
[rw
] = td
->avg_buckets
[rw
][i
].latency
;
2177 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++)
2178 throtl_log(&td
->service_queue
,
2179 "Latency bucket %d: read latency=%ld, read valid=%d, "
2180 "write latency=%ld, write valid=%d", i
,
2181 td
->avg_buckets
[READ
][i
].latency
,
2182 td
->avg_buckets
[READ
][i
].valid
,
2183 td
->avg_buckets
[WRITE
][i
].latency
,
2184 td
->avg_buckets
[WRITE
][i
].valid
);
2187 static inline void throtl_update_latency_buckets(struct throtl_data
*td
)
2192 void blk_throtl_charge_bio_split(struct bio
*bio
)
2194 struct blkcg_gq
*blkg
= bio
->bi_blkg
;
2195 struct throtl_grp
*parent
= blkg_to_tg(blkg
);
2196 struct throtl_service_queue
*parent_sq
;
2197 bool rw
= bio_data_dir(bio
);
2200 if (!parent
->has_rules
[rw
])
2203 atomic_inc(&parent
->io_split_cnt
[rw
]);
2204 atomic_inc(&parent
->last_io_split_cnt
[rw
]);
2206 parent_sq
= parent
->service_queue
.parent_sq
;
2207 parent
= sq_to_tg(parent_sq
);
2211 bool blk_throtl_bio(struct bio
*bio
)
2213 struct request_queue
*q
= bio
->bi_bdev
->bd_disk
->queue
;
2214 struct blkcg_gq
*blkg
= bio
->bi_blkg
;
2215 struct throtl_qnode
*qn
= NULL
;
2216 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
2217 struct throtl_service_queue
*sq
;
2218 bool rw
= bio_data_dir(bio
);
2219 bool throttled
= false;
2220 struct throtl_data
*td
= tg
->td
;
2224 /* see throtl_charge_bio() */
2225 if (bio_flagged(bio
, BIO_THROTTLED
))
2228 if (!cgroup_subsys_on_dfl(io_cgrp_subsys
)) {
2229 blkg_rwstat_add(&tg
->stat_bytes
, bio
->bi_opf
,
2230 bio
->bi_iter
.bi_size
);
2231 blkg_rwstat_add(&tg
->stat_ios
, bio
->bi_opf
, 1);
2234 if (!tg
->has_rules
[rw
])
2237 spin_lock_irq(&q
->queue_lock
);
2239 throtl_update_latency_buckets(td
);
2241 blk_throtl_update_idletime(tg
);
2243 sq
= &tg
->service_queue
;
2247 if (tg
->last_low_overflow_time
[rw
] == 0)
2248 tg
->last_low_overflow_time
[rw
] = jiffies
;
2249 throtl_downgrade_check(tg
);
2250 throtl_upgrade_check(tg
);
2251 /* throtl is FIFO - if bios are already queued, should queue */
2252 if (sq
->nr_queued
[rw
])
2255 /* if above limits, break to queue */
2256 if (!tg_may_dispatch(tg
, bio
, NULL
)) {
2257 tg
->last_low_overflow_time
[rw
] = jiffies
;
2258 if (throtl_can_upgrade(td
, tg
)) {
2259 throtl_upgrade_state(td
);
2265 /* within limits, let's charge and dispatch directly */
2266 throtl_charge_bio(tg
, bio
);
2269 * We need to trim slice even when bios are not being queued
2270 * otherwise it might happen that a bio is not queued for
2271 * a long time and slice keeps on extending and trim is not
2272 * called for a long time. Now if limits are reduced suddenly
2273 * we take into account all the IO dispatched so far at new
2274 * low rate and * newly queued IO gets a really long dispatch
2277 * So keep on trimming slice even if bio is not queued.
2279 throtl_trim_slice(tg
, rw
);
2282 * @bio passed through this layer without being throttled.
2283 * Climb up the ladder. If we're already at the top, it
2284 * can be executed directly.
2286 qn
= &tg
->qnode_on_parent
[rw
];
2293 /* out-of-limit, queue to @tg */
2294 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2295 rw
== READ
? 'R' : 'W',
2296 tg
->bytes_disp
[rw
], bio
->bi_iter
.bi_size
,
2297 tg_bps_limit(tg
, rw
),
2298 tg
->io_disp
[rw
], tg_iops_limit(tg
, rw
),
2299 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
2301 tg
->last_low_overflow_time
[rw
] = jiffies
;
2303 td
->nr_queued
[rw
]++;
2304 throtl_add_bio_tg(bio
, qn
, tg
);
2308 * Update @tg's dispatch time and force schedule dispatch if @tg
2309 * was empty before @bio. The forced scheduling isn't likely to
2310 * cause undue delay as @bio is likely to be dispatched directly if
2311 * its @tg's disptime is not in the future.
2313 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
2314 tg_update_disptime(tg
);
2315 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
2319 spin_unlock_irq(&q
->queue_lock
);
2321 bio_set_flag(bio
, BIO_THROTTLED
);
2323 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2324 if (throttled
|| !td
->track_bio_latency
)
2325 bio
->bi_issue
.value
|= BIO_ISSUE_THROTL_SKIP_LATENCY
;
2331 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2332 static void throtl_track_latency(struct throtl_data
*td
, sector_t size
,
2333 int op
, unsigned long time
)
2335 struct latency_bucket
*latency
;
2338 if (!td
|| td
->limit_index
!= LIMIT_LOW
||
2339 !(op
== REQ_OP_READ
|| op
== REQ_OP_WRITE
) ||
2340 !blk_queue_nonrot(td
->queue
))
2343 index
= request_bucket_index(size
);
2345 latency
= get_cpu_ptr(td
->latency_buckets
[op
]);
2346 latency
[index
].total_latency
+= time
;
2347 latency
[index
].samples
++;
2348 put_cpu_ptr(td
->latency_buckets
[op
]);
2351 void blk_throtl_stat_add(struct request
*rq
, u64 time_ns
)
2353 struct request_queue
*q
= rq
->q
;
2354 struct throtl_data
*td
= q
->td
;
2356 throtl_track_latency(td
, blk_rq_stats_sectors(rq
), req_op(rq
),
2360 void blk_throtl_bio_endio(struct bio
*bio
)
2362 struct blkcg_gq
*blkg
;
2363 struct throtl_grp
*tg
;
2365 unsigned long finish_time
;
2366 unsigned long start_time
;
2368 int rw
= bio_data_dir(bio
);
2370 blkg
= bio
->bi_blkg
;
2373 tg
= blkg_to_tg(blkg
);
2374 if (!tg
->td
->limit_valid
[LIMIT_LOW
])
2377 finish_time_ns
= ktime_get_ns();
2378 tg
->last_finish_time
= finish_time_ns
>> 10;
2380 start_time
= bio_issue_time(&bio
->bi_issue
) >> 10;
2381 finish_time
= __bio_issue_time(finish_time_ns
) >> 10;
2382 if (!start_time
|| finish_time
<= start_time
)
2385 lat
= finish_time
- start_time
;
2386 /* this is only for bio based driver */
2387 if (!(bio
->bi_issue
.value
& BIO_ISSUE_THROTL_SKIP_LATENCY
))
2388 throtl_track_latency(tg
->td
, bio_issue_size(&bio
->bi_issue
),
2391 if (tg
->latency_target
&& lat
>= tg
->td
->filtered_latency
) {
2393 unsigned int threshold
;
2395 bucket
= request_bucket_index(bio_issue_size(&bio
->bi_issue
));
2396 threshold
= tg
->td
->avg_buckets
[rw
][bucket
].latency
+
2398 if (lat
> threshold
)
2401 * Not race free, could get wrong count, which means cgroups
2407 if (time_after(jiffies
, tg
->bio_cnt_reset_time
) || tg
->bio_cnt
> 1024) {
2408 tg
->bio_cnt_reset_time
= tg
->td
->throtl_slice
+ jiffies
;
2410 tg
->bad_bio_cnt
/= 2;
2415 int blk_throtl_init(struct request_queue
*q
)
2417 struct throtl_data
*td
;
2420 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
2423 td
->latency_buckets
[READ
] = __alloc_percpu(sizeof(struct latency_bucket
) *
2424 LATENCY_BUCKET_SIZE
, __alignof__(u64
));
2425 if (!td
->latency_buckets
[READ
]) {
2429 td
->latency_buckets
[WRITE
] = __alloc_percpu(sizeof(struct latency_bucket
) *
2430 LATENCY_BUCKET_SIZE
, __alignof__(u64
));
2431 if (!td
->latency_buckets
[WRITE
]) {
2432 free_percpu(td
->latency_buckets
[READ
]);
2437 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
2438 throtl_service_queue_init(&td
->service_queue
);
2443 td
->limit_valid
[LIMIT_MAX
] = true;
2444 td
->limit_index
= LIMIT_MAX
;
2445 td
->low_upgrade_time
= jiffies
;
2446 td
->low_downgrade_time
= jiffies
;
2448 /* activate policy */
2449 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
2451 free_percpu(td
->latency_buckets
[READ
]);
2452 free_percpu(td
->latency_buckets
[WRITE
]);
2458 void blk_throtl_exit(struct request_queue
*q
)
2461 del_timer_sync(&q
->td
->service_queue
.pending_timer
);
2462 throtl_shutdown_wq(q
);
2463 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
2464 free_percpu(q
->td
->latency_buckets
[READ
]);
2465 free_percpu(q
->td
->latency_buckets
[WRITE
]);
2469 void blk_throtl_register_queue(struct request_queue
*q
)
2471 struct throtl_data
*td
;
2477 if (blk_queue_nonrot(q
)) {
2478 td
->throtl_slice
= DFL_THROTL_SLICE_SSD
;
2479 td
->filtered_latency
= LATENCY_FILTERED_SSD
;
2481 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2482 td
->filtered_latency
= LATENCY_FILTERED_HD
;
2483 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2484 td
->avg_buckets
[READ
][i
].latency
= DFL_HD_BASELINE_LATENCY
;
2485 td
->avg_buckets
[WRITE
][i
].latency
= DFL_HD_BASELINE_LATENCY
;
2488 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2489 /* if no low limit, use previous default */
2490 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2493 td
->track_bio_latency
= !queue_is_mq(q
);
2494 if (!td
->track_bio_latency
)
2495 blk_stat_enable_accounting(q
);
2498 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2499 ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
)
2503 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->td
->throtl_slice
));
2506 ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
2507 const char *page
, size_t count
)
2514 if (kstrtoul(page
, 10, &v
))
2516 t
= msecs_to_jiffies(v
);
2517 if (t
== 0 || t
> MAX_THROTL_SLICE
)
2519 q
->td
->throtl_slice
= t
;
2524 static int __init
throtl_init(void)
2526 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
2527 if (!kthrotld_workqueue
)
2528 panic("Failed to create kthrotld\n");
2530 return blkcg_policy_register(&blkcg_policy_throtl
);
2533 module_init(throtl_init
);