2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-cgroup.h>
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over a slice and after that slice is renewed */
22 #define DFL_THROTL_SLICE_HD (HZ / 10)
23 #define DFL_THROTL_SLICE_SSD (HZ / 50)
24 #define MAX_THROTL_SLICE (HZ)
25 #define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */
26 #define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */
27 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
28 /* default latency target is 0, eg, guarantee IO latency by default */
29 #define DFL_LATENCY_TARGET (0)
30 #define MIN_THROTL_BPS (320 * 1024)
31 #define MIN_THROTL_IOPS (10)
33 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
35 static struct blkcg_policy blkcg_policy_throtl
;
37 /* A workqueue to queue throttle related work */
38 static struct workqueue_struct
*kthrotld_workqueue
;
41 * To implement hierarchical throttling, throtl_grps form a tree and bios
42 * are dispatched upwards level by level until they reach the top and get
43 * issued. When dispatching bios from the children and local group at each
44 * level, if the bios are dispatched into a single bio_list, there's a risk
45 * of a local or child group which can queue many bios at once filling up
46 * the list starving others.
48 * To avoid such starvation, dispatched bios are queued separately
49 * according to where they came from. When they are again dispatched to
50 * the parent, they're popped in round-robin order so that no single source
51 * hogs the dispatch window.
53 * throtl_qnode is used to keep the queued bios separated by their sources.
54 * Bios are queued to throtl_qnode which in turn is queued to
55 * throtl_service_queue and then dispatched in round-robin order.
57 * It's also used to track the reference counts on blkg's. A qnode always
58 * belongs to a throtl_grp and gets queued on itself or the parent, so
59 * incrementing the reference of the associated throtl_grp when a qnode is
60 * queued and decrementing when dequeued is enough to keep the whole blkg
61 * tree pinned while bios are in flight.
64 struct list_head node
; /* service_queue->queued[] */
65 struct bio_list bios
; /* queued bios */
66 struct throtl_grp
*tg
; /* tg this qnode belongs to */
69 struct throtl_service_queue
{
70 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
73 * Bios queued directly to this service_queue or dispatched from
74 * children throtl_grp's.
76 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
77 unsigned int nr_queued
[2]; /* number of queued bios */
80 * RB tree of active children throtl_grp's, which are sorted by
83 struct rb_root pending_tree
; /* RB tree of active tgs */
84 struct rb_node
*first_pending
; /* first node in the tree */
85 unsigned int nr_pending
; /* # queued in the tree */
86 unsigned long first_pending_disptime
; /* disptime of the first tg */
87 struct timer_list pending_timer
; /* fires on first_pending_disptime */
91 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
92 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
95 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
104 /* must be the first member */
105 struct blkg_policy_data pd
;
107 /* active throtl group service_queue member */
108 struct rb_node rb_node
;
110 /* throtl_data this group belongs to */
111 struct throtl_data
*td
;
113 /* this group's service queue */
114 struct throtl_service_queue service_queue
;
117 * qnode_on_self is used when bios are directly queued to this
118 * throtl_grp so that local bios compete fairly with bios
119 * dispatched from children. qnode_on_parent is used when bios are
120 * dispatched from this throtl_grp into its parent and will compete
121 * with the sibling qnode_on_parents and the parent's
124 struct throtl_qnode qnode_on_self
[2];
125 struct throtl_qnode qnode_on_parent
[2];
128 * Dispatch time in jiffies. This is the estimated time when group
129 * will unthrottle and is ready to dispatch more bio. It is used as
130 * key to sort active groups in service tree.
132 unsigned long disptime
;
136 /* are there any throtl rules between this group and td? */
139 /* internally used bytes per second rate limits */
140 uint64_t bps
[2][LIMIT_CNT
];
141 /* user configured bps limits */
142 uint64_t bps_conf
[2][LIMIT_CNT
];
144 /* internally used IOPS limits */
145 unsigned int iops
[2][LIMIT_CNT
];
146 /* user configured IOPS limits */
147 unsigned int iops_conf
[2][LIMIT_CNT
];
149 /* Number of bytes disptached in current slice */
150 uint64_t bytes_disp
[2];
151 /* Number of bio's dispatched in current slice */
152 unsigned int io_disp
[2];
154 unsigned long last_low_overflow_time
[2];
156 uint64_t last_bytes_disp
[2];
157 unsigned int last_io_disp
[2];
159 unsigned long last_check_time
;
161 unsigned long latency_target
; /* us */
162 unsigned long latency_target_conf
; /* us */
163 /* When did we start a new slice */
164 unsigned long slice_start
[2];
165 unsigned long slice_end
[2];
167 unsigned long last_finish_time
; /* ns / 1024 */
168 unsigned long checked_last_finish_time
; /* ns / 1024 */
169 unsigned long avg_idletime
; /* ns / 1024 */
170 unsigned long idletime_threshold
; /* us */
171 unsigned long idletime_threshold_conf
; /* us */
173 unsigned int bio_cnt
; /* total bios */
174 unsigned int bad_bio_cnt
; /* bios exceeding latency threshold */
175 unsigned long bio_cnt_reset_time
;
178 /* We measure latency for request size from <= 4k to >= 1M */
179 #define LATENCY_BUCKET_SIZE 9
181 struct latency_bucket
{
182 unsigned long total_latency
; /* ns / 1024 */
186 struct avg_latency_bucket
{
187 unsigned long latency
; /* ns / 1024 */
193 /* service tree for active throtl groups */
194 struct throtl_service_queue service_queue
;
196 struct request_queue
*queue
;
198 /* Total Number of queued bios on READ and WRITE lists */
199 unsigned int nr_queued
[2];
201 unsigned int throtl_slice
;
203 /* Work for dispatching throttled bios */
204 struct work_struct dispatch_work
;
205 unsigned int limit_index
;
206 bool limit_valid
[LIMIT_CNT
];
208 unsigned long dft_idletime_threshold
; /* us */
210 unsigned long low_upgrade_time
;
211 unsigned long low_downgrade_time
;
215 struct latency_bucket tmp_buckets
[LATENCY_BUCKET_SIZE
];
216 struct avg_latency_bucket avg_buckets
[LATENCY_BUCKET_SIZE
];
217 struct latency_bucket __percpu
*latency_buckets
;
218 unsigned long last_calculate_time
;
220 bool track_bio_latency
;
223 static void throtl_pending_timer_fn(unsigned long arg
);
225 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
227 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
230 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
232 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
235 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
237 return pd_to_blkg(&tg
->pd
);
241 * sq_to_tg - return the throl_grp the specified service queue belongs to
242 * @sq: the throtl_service_queue of interest
244 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
245 * embedded in throtl_data, %NULL is returned.
247 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
249 if (sq
&& sq
->parent_sq
)
250 return container_of(sq
, struct throtl_grp
, service_queue
);
256 * sq_to_td - return throtl_data the specified service queue belongs to
257 * @sq: the throtl_service_queue of interest
259 * A service_queue can be embedded in either a throtl_grp or throtl_data.
260 * Determine the associated throtl_data accordingly and return it.
262 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
264 struct throtl_grp
*tg
= sq_to_tg(sq
);
269 return container_of(sq
, struct throtl_data
, service_queue
);
273 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
274 * make the IO dispatch more smooth.
275 * Scale up: linearly scale up according to lapsed time since upgrade. For
276 * every throtl_slice, the limit scales up 1/2 .low limit till the
277 * limit hits .max limit
278 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
280 static uint64_t throtl_adjusted_limit(uint64_t low
, struct throtl_data
*td
)
282 /* arbitrary value to avoid too big scale */
283 if (td
->scale
< 4096 && time_after_eq(jiffies
,
284 td
->low_upgrade_time
+ td
->scale
* td
->throtl_slice
))
285 td
->scale
= (jiffies
- td
->low_upgrade_time
) / td
->throtl_slice
;
287 return low
+ (low
>> 1) * td
->scale
;
290 static uint64_t tg_bps_limit(struct throtl_grp
*tg
, int rw
)
292 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
293 struct throtl_data
*td
;
296 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
300 ret
= tg
->bps
[rw
][td
->limit_index
];
301 if (ret
== 0 && td
->limit_index
== LIMIT_LOW
) {
302 /* intermediate node or iops isn't 0 */
303 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
304 tg
->iops
[rw
][td
->limit_index
])
307 return MIN_THROTL_BPS
;
310 if (td
->limit_index
== LIMIT_MAX
&& tg
->bps
[rw
][LIMIT_LOW
] &&
311 tg
->bps
[rw
][LIMIT_LOW
] != tg
->bps
[rw
][LIMIT_MAX
]) {
314 adjusted
= throtl_adjusted_limit(tg
->bps
[rw
][LIMIT_LOW
], td
);
315 ret
= min(tg
->bps
[rw
][LIMIT_MAX
], adjusted
);
320 static unsigned int tg_iops_limit(struct throtl_grp
*tg
, int rw
)
322 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
323 struct throtl_data
*td
;
326 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
330 ret
= tg
->iops
[rw
][td
->limit_index
];
331 if (ret
== 0 && tg
->td
->limit_index
== LIMIT_LOW
) {
332 /* intermediate node or bps isn't 0 */
333 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
334 tg
->bps
[rw
][td
->limit_index
])
337 return MIN_THROTL_IOPS
;
340 if (td
->limit_index
== LIMIT_MAX
&& tg
->iops
[rw
][LIMIT_LOW
] &&
341 tg
->iops
[rw
][LIMIT_LOW
] != tg
->iops
[rw
][LIMIT_MAX
]) {
344 adjusted
= throtl_adjusted_limit(tg
->iops
[rw
][LIMIT_LOW
], td
);
345 if (adjusted
> UINT_MAX
)
347 ret
= min_t(unsigned int, tg
->iops
[rw
][LIMIT_MAX
], adjusted
);
352 #define request_bucket_index(sectors) \
353 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
356 * throtl_log - log debug message via blktrace
357 * @sq: the service_queue being reported
358 * @fmt: printf format string
361 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
362 * throtl_grp; otherwise, just "throtl".
364 #define throtl_log(sq, fmt, args...) do { \
365 struct throtl_grp *__tg = sq_to_tg((sq)); \
366 struct throtl_data *__td = sq_to_td((sq)); \
369 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
374 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
375 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
377 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
381 static void throtl_qnode_init(struct throtl_qnode
*qn
, struct throtl_grp
*tg
)
383 INIT_LIST_HEAD(&qn
->node
);
384 bio_list_init(&qn
->bios
);
389 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
390 * @bio: bio being added
391 * @qn: qnode to add bio to
392 * @queued: the service_queue->queued[] list @qn belongs to
394 * Add @bio to @qn and put @qn on @queued if it's not already on.
395 * @qn->tg's reference count is bumped when @qn is activated. See the
396 * comment on top of throtl_qnode definition for details.
398 static void throtl_qnode_add_bio(struct bio
*bio
, struct throtl_qnode
*qn
,
399 struct list_head
*queued
)
401 bio_list_add(&qn
->bios
, bio
);
402 if (list_empty(&qn
->node
)) {
403 list_add_tail(&qn
->node
, queued
);
404 blkg_get(tg_to_blkg(qn
->tg
));
409 * throtl_peek_queued - peek the first bio on a qnode list
410 * @queued: the qnode list to peek
412 static struct bio
*throtl_peek_queued(struct list_head
*queued
)
414 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
417 if (list_empty(queued
))
420 bio
= bio_list_peek(&qn
->bios
);
426 * throtl_pop_queued - pop the first bio form a qnode list
427 * @queued: the qnode list to pop a bio from
428 * @tg_to_put: optional out argument for throtl_grp to put
430 * Pop the first bio from the qnode list @queued. After popping, the first
431 * qnode is removed from @queued if empty or moved to the end of @queued so
432 * that the popping order is round-robin.
434 * When the first qnode is removed, its associated throtl_grp should be put
435 * too. If @tg_to_put is NULL, this function automatically puts it;
436 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
437 * responsible for putting it.
439 static struct bio
*throtl_pop_queued(struct list_head
*queued
,
440 struct throtl_grp
**tg_to_put
)
442 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
445 if (list_empty(queued
))
448 bio
= bio_list_pop(&qn
->bios
);
451 if (bio_list_empty(&qn
->bios
)) {
452 list_del_init(&qn
->node
);
456 blkg_put(tg_to_blkg(qn
->tg
));
458 list_move_tail(&qn
->node
, queued
);
464 /* init a service_queue, assumes the caller zeroed it */
465 static void throtl_service_queue_init(struct throtl_service_queue
*sq
)
467 INIT_LIST_HEAD(&sq
->queued
[0]);
468 INIT_LIST_HEAD(&sq
->queued
[1]);
469 sq
->pending_tree
= RB_ROOT
;
470 setup_timer(&sq
->pending_timer
, throtl_pending_timer_fn
,
474 static struct blkg_policy_data
*throtl_pd_alloc(gfp_t gfp
, int node
)
476 struct throtl_grp
*tg
;
479 tg
= kzalloc_node(sizeof(*tg
), gfp
, node
);
483 throtl_service_queue_init(&tg
->service_queue
);
485 for (rw
= READ
; rw
<= WRITE
; rw
++) {
486 throtl_qnode_init(&tg
->qnode_on_self
[rw
], tg
);
487 throtl_qnode_init(&tg
->qnode_on_parent
[rw
], tg
);
490 RB_CLEAR_NODE(&tg
->rb_node
);
491 tg
->bps
[READ
][LIMIT_MAX
] = U64_MAX
;
492 tg
->bps
[WRITE
][LIMIT_MAX
] = U64_MAX
;
493 tg
->iops
[READ
][LIMIT_MAX
] = UINT_MAX
;
494 tg
->iops
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
495 tg
->bps_conf
[READ
][LIMIT_MAX
] = U64_MAX
;
496 tg
->bps_conf
[WRITE
][LIMIT_MAX
] = U64_MAX
;
497 tg
->iops_conf
[READ
][LIMIT_MAX
] = UINT_MAX
;
498 tg
->iops_conf
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
499 /* LIMIT_LOW will have default value 0 */
501 tg
->latency_target
= DFL_LATENCY_TARGET
;
502 tg
->latency_target_conf
= DFL_LATENCY_TARGET
;
507 static void throtl_pd_init(struct blkg_policy_data
*pd
)
509 struct throtl_grp
*tg
= pd_to_tg(pd
);
510 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
511 struct throtl_data
*td
= blkg
->q
->td
;
512 struct throtl_service_queue
*sq
= &tg
->service_queue
;
515 * If on the default hierarchy, we switch to properly hierarchical
516 * behavior where limits on a given throtl_grp are applied to the
517 * whole subtree rather than just the group itself. e.g. If 16M
518 * read_bps limit is set on the root group, the whole system can't
519 * exceed 16M for the device.
521 * If not on the default hierarchy, the broken flat hierarchy
522 * behavior is retained where all throtl_grps are treated as if
523 * they're all separate root groups right below throtl_data.
524 * Limits of a group don't interact with limits of other groups
525 * regardless of the position of the group in the hierarchy.
527 sq
->parent_sq
= &td
->service_queue
;
528 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && blkg
->parent
)
529 sq
->parent_sq
= &blkg_to_tg(blkg
->parent
)->service_queue
;
532 tg
->idletime_threshold
= td
->dft_idletime_threshold
;
533 tg
->idletime_threshold_conf
= td
->dft_idletime_threshold
;
537 * Set has_rules[] if @tg or any of its parents have limits configured.
538 * This doesn't require walking up to the top of the hierarchy as the
539 * parent's has_rules[] is guaranteed to be correct.
541 static void tg_update_has_rules(struct throtl_grp
*tg
)
543 struct throtl_grp
*parent_tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
544 struct throtl_data
*td
= tg
->td
;
547 for (rw
= READ
; rw
<= WRITE
; rw
++)
548 tg
->has_rules
[rw
] = (parent_tg
&& parent_tg
->has_rules
[rw
]) ||
549 (td
->limit_valid
[td
->limit_index
] &&
550 (tg_bps_limit(tg
, rw
) != U64_MAX
||
551 tg_iops_limit(tg
, rw
) != UINT_MAX
));
554 static void throtl_pd_online(struct blkg_policy_data
*pd
)
556 struct throtl_grp
*tg
= pd_to_tg(pd
);
558 * We don't want new groups to escape the limits of its ancestors.
559 * Update has_rules[] after a new group is brought online.
561 tg_update_has_rules(tg
);
564 static void blk_throtl_update_limit_valid(struct throtl_data
*td
)
566 struct cgroup_subsys_state
*pos_css
;
567 struct blkcg_gq
*blkg
;
568 bool low_valid
= false;
571 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
572 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
574 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->bps
[WRITE
][LIMIT_LOW
] ||
575 tg
->iops
[READ
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
580 td
->limit_valid
[LIMIT_LOW
] = low_valid
;
583 static void throtl_upgrade_state(struct throtl_data
*td
);
584 static void throtl_pd_offline(struct blkg_policy_data
*pd
)
586 struct throtl_grp
*tg
= pd_to_tg(pd
);
588 tg
->bps
[READ
][LIMIT_LOW
] = 0;
589 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
590 tg
->iops
[READ
][LIMIT_LOW
] = 0;
591 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
593 blk_throtl_update_limit_valid(tg
->td
);
595 if (!tg
->td
->limit_valid
[tg
->td
->limit_index
])
596 throtl_upgrade_state(tg
->td
);
599 static void throtl_pd_free(struct blkg_policy_data
*pd
)
601 struct throtl_grp
*tg
= pd_to_tg(pd
);
603 del_timer_sync(&tg
->service_queue
.pending_timer
);
607 static struct throtl_grp
*
608 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
610 /* Service tree is empty */
611 if (!parent_sq
->nr_pending
)
614 if (!parent_sq
->first_pending
)
615 parent_sq
->first_pending
= rb_first(&parent_sq
->pending_tree
);
617 if (parent_sq
->first_pending
)
618 return rb_entry_tg(parent_sq
->first_pending
);
623 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
629 static void throtl_rb_erase(struct rb_node
*n
,
630 struct throtl_service_queue
*parent_sq
)
632 if (parent_sq
->first_pending
== n
)
633 parent_sq
->first_pending
= NULL
;
634 rb_erase_init(n
, &parent_sq
->pending_tree
);
635 --parent_sq
->nr_pending
;
638 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
640 struct throtl_grp
*tg
;
642 tg
= throtl_rb_first(parent_sq
);
646 parent_sq
->first_pending_disptime
= tg
->disptime
;
649 static void tg_service_queue_add(struct throtl_grp
*tg
)
651 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
652 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_node
;
653 struct rb_node
*parent
= NULL
;
654 struct throtl_grp
*__tg
;
655 unsigned long key
= tg
->disptime
;
658 while (*node
!= NULL
) {
660 __tg
= rb_entry_tg(parent
);
662 if (time_before(key
, __tg
->disptime
))
663 node
= &parent
->rb_left
;
665 node
= &parent
->rb_right
;
671 parent_sq
->first_pending
= &tg
->rb_node
;
673 rb_link_node(&tg
->rb_node
, parent
, node
);
674 rb_insert_color(&tg
->rb_node
, &parent_sq
->pending_tree
);
677 static void __throtl_enqueue_tg(struct throtl_grp
*tg
)
679 tg_service_queue_add(tg
);
680 tg
->flags
|= THROTL_TG_PENDING
;
681 tg
->service_queue
.parent_sq
->nr_pending
++;
684 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
686 if (!(tg
->flags
& THROTL_TG_PENDING
))
687 __throtl_enqueue_tg(tg
);
690 static void __throtl_dequeue_tg(struct throtl_grp
*tg
)
692 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
693 tg
->flags
&= ~THROTL_TG_PENDING
;
696 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
698 if (tg
->flags
& THROTL_TG_PENDING
)
699 __throtl_dequeue_tg(tg
);
702 /* Call with queue lock held */
703 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
704 unsigned long expires
)
706 unsigned long max_expire
= jiffies
+ 8 * sq_to_tg(sq
)->td
->throtl_slice
;
709 * Since we are adjusting the throttle limit dynamically, the sleep
710 * time calculated according to previous limit might be invalid. It's
711 * possible the cgroup sleep time is very long and no other cgroups
712 * have IO running so notify the limit changes. Make sure the cgroup
713 * doesn't sleep too long to avoid the missed notification.
715 if (time_after(expires
, max_expire
))
716 expires
= max_expire
;
717 mod_timer(&sq
->pending_timer
, expires
);
718 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
719 expires
- jiffies
, jiffies
);
723 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
724 * @sq: the service_queue to schedule dispatch for
725 * @force: force scheduling
727 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
728 * dispatch time of the first pending child. Returns %true if either timer
729 * is armed or there's no pending child left. %false if the current
730 * dispatch window is still open and the caller should continue
733 * If @force is %true, the dispatch timer is always scheduled and this
734 * function is guaranteed to return %true. This is to be used when the
735 * caller can't dispatch itself and needs to invoke pending_timer
736 * unconditionally. Note that forced scheduling is likely to induce short
737 * delay before dispatch starts even if @sq->first_pending_disptime is not
738 * in the future and thus shouldn't be used in hot paths.
740 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
743 /* any pending children left? */
747 update_min_dispatch_time(sq
);
749 /* is the next dispatch time in the future? */
750 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
751 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
755 /* tell the caller to continue dispatching */
759 static inline void throtl_start_new_slice_with_credit(struct throtl_grp
*tg
,
760 bool rw
, unsigned long start
)
762 tg
->bytes_disp
[rw
] = 0;
766 * Previous slice has expired. We must have trimmed it after last
767 * bio dispatch. That means since start of last slice, we never used
768 * that bandwidth. Do try to make use of that bandwidth while giving
771 if (time_after_eq(start
, tg
->slice_start
[rw
]))
772 tg
->slice_start
[rw
] = start
;
774 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
775 throtl_log(&tg
->service_queue
,
776 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
777 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
778 tg
->slice_end
[rw
], jiffies
);
781 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
783 tg
->bytes_disp
[rw
] = 0;
785 tg
->slice_start
[rw
] = jiffies
;
786 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
787 throtl_log(&tg
->service_queue
,
788 "[%c] new slice start=%lu end=%lu jiffies=%lu",
789 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
790 tg
->slice_end
[rw
], jiffies
);
793 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
794 unsigned long jiffy_end
)
796 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
799 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
800 unsigned long jiffy_end
)
802 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
803 throtl_log(&tg
->service_queue
,
804 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
805 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
806 tg
->slice_end
[rw
], jiffies
);
809 /* Determine if previously allocated or extended slice is complete or not */
810 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
812 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
818 /* Trim the used slices and adjust slice start accordingly */
819 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
821 unsigned long nr_slices
, time_elapsed
, io_trim
;
824 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
827 * If bps are unlimited (-1), then time slice don't get
828 * renewed. Don't try to trim the slice if slice is used. A new
829 * slice will start when appropriate.
831 if (throtl_slice_used(tg
, rw
))
835 * A bio has been dispatched. Also adjust slice_end. It might happen
836 * that initially cgroup limit was very low resulting in high
837 * slice_end, but later limit was bumped up and bio was dispached
838 * sooner, then we need to reduce slice_end. A high bogus slice_end
839 * is bad because it does not allow new slice to start.
842 throtl_set_slice_end(tg
, rw
, jiffies
+ tg
->td
->throtl_slice
);
844 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
846 nr_slices
= time_elapsed
/ tg
->td
->throtl_slice
;
850 tmp
= tg_bps_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
;
854 io_trim
= (tg_iops_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
) /
857 if (!bytes_trim
&& !io_trim
)
860 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
861 tg
->bytes_disp
[rw
] -= bytes_trim
;
863 tg
->bytes_disp
[rw
] = 0;
865 if (tg
->io_disp
[rw
] >= io_trim
)
866 tg
->io_disp
[rw
] -= io_trim
;
870 tg
->slice_start
[rw
] += nr_slices
* tg
->td
->throtl_slice
;
872 throtl_log(&tg
->service_queue
,
873 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
874 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
875 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
878 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
881 bool rw
= bio_data_dir(bio
);
882 unsigned int io_allowed
;
883 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
886 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
888 /* Slice has just started. Consider one slice interval */
890 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
892 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
895 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
896 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
897 * will allow dispatch after 1 second and after that slice should
901 tmp
= (u64
)tg_iops_limit(tg
, rw
) * jiffy_elapsed_rnd
;
905 io_allowed
= UINT_MAX
;
909 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
915 /* Calc approx time to dispatch */
916 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
) / tg_iops_limit(tg
, rw
) + 1;
918 if (jiffy_wait
> jiffy_elapsed
)
919 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
928 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
931 bool rw
= bio_data_dir(bio
);
932 u64 bytes_allowed
, extra_bytes
, tmp
;
933 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
935 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
937 /* Slice has just started. Consider one slice interval */
939 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
941 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
943 tmp
= tg_bps_limit(tg
, rw
) * jiffy_elapsed_rnd
;
947 if (tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
<= bytes_allowed
) {
953 /* Calc approx time to dispatch */
954 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
- bytes_allowed
;
955 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg_bps_limit(tg
, rw
));
961 * This wait time is without taking into consideration the rounding
962 * up we did. Add that time also.
964 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
971 * Returns whether one can dispatch a bio or not. Also returns approx number
972 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
974 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
977 bool rw
= bio_data_dir(bio
);
978 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
981 * Currently whole state machine of group depends on first bio
982 * queued in the group bio list. So one should not be calling
983 * this function with a different bio if there are other bios
986 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
987 bio
!= throtl_peek_queued(&tg
->service_queue
.queued
[rw
]));
989 /* If tg->bps = -1, then BW is unlimited */
990 if (tg_bps_limit(tg
, rw
) == U64_MAX
&&
991 tg_iops_limit(tg
, rw
) == UINT_MAX
) {
998 * If previous slice expired, start a new one otherwise renew/extend
999 * existing slice to make sure it is at least throtl_slice interval
1000 * long since now. New slice is started only for empty throttle group.
1001 * If there is queued bio, that means there should be an active
1002 * slice and it should be extended instead.
1004 if (throtl_slice_used(tg
, rw
) && !(tg
->service_queue
.nr_queued
[rw
]))
1005 throtl_start_new_slice(tg
, rw
);
1007 if (time_before(tg
->slice_end
[rw
],
1008 jiffies
+ tg
->td
->throtl_slice
))
1009 throtl_extend_slice(tg
, rw
,
1010 jiffies
+ tg
->td
->throtl_slice
);
1013 if (tg_with_in_bps_limit(tg
, bio
, &bps_wait
) &&
1014 tg_with_in_iops_limit(tg
, bio
, &iops_wait
)) {
1020 max_wait
= max(bps_wait
, iops_wait
);
1025 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
1026 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
1031 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
1033 bool rw
= bio_data_dir(bio
);
1035 /* Charge the bio to the group */
1036 tg
->bytes_disp
[rw
] += bio
->bi_iter
.bi_size
;
1038 tg
->last_bytes_disp
[rw
] += bio
->bi_iter
.bi_size
;
1039 tg
->last_io_disp
[rw
]++;
1042 * BIO_THROTTLED is used to prevent the same bio to be throttled
1043 * more than once as a throttled bio will go through blk-throtl the
1044 * second time when it eventually gets issued. Set it when a bio
1045 * is being charged to a tg.
1047 if (!bio_flagged(bio
, BIO_THROTTLED
))
1048 bio_set_flag(bio
, BIO_THROTTLED
);
1052 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1055 * @tg: the target throtl_grp
1057 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1058 * tg->qnode_on_self[] is used.
1060 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_qnode
*qn
,
1061 struct throtl_grp
*tg
)
1063 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1064 bool rw
= bio_data_dir(bio
);
1067 qn
= &tg
->qnode_on_self
[rw
];
1070 * If @tg doesn't currently have any bios queued in the same
1071 * direction, queueing @bio can change when @tg should be
1072 * dispatched. Mark that @tg was empty. This is automatically
1073 * cleaered on the next tg_update_disptime().
1075 if (!sq
->nr_queued
[rw
])
1076 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
1078 throtl_qnode_add_bio(bio
, qn
, &sq
->queued
[rw
]);
1080 sq
->nr_queued
[rw
]++;
1081 throtl_enqueue_tg(tg
);
1084 static void tg_update_disptime(struct throtl_grp
*tg
)
1086 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1087 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
1090 bio
= throtl_peek_queued(&sq
->queued
[READ
]);
1092 tg_may_dispatch(tg
, bio
, &read_wait
);
1094 bio
= throtl_peek_queued(&sq
->queued
[WRITE
]);
1096 tg_may_dispatch(tg
, bio
, &write_wait
);
1098 min_wait
= min(read_wait
, write_wait
);
1099 disptime
= jiffies
+ min_wait
;
1101 /* Update dispatch time */
1102 throtl_dequeue_tg(tg
);
1103 tg
->disptime
= disptime
;
1104 throtl_enqueue_tg(tg
);
1106 /* see throtl_add_bio_tg() */
1107 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
1110 static void start_parent_slice_with_credit(struct throtl_grp
*child_tg
,
1111 struct throtl_grp
*parent_tg
, bool rw
)
1113 if (throtl_slice_used(parent_tg
, rw
)) {
1114 throtl_start_new_slice_with_credit(parent_tg
, rw
,
1115 child_tg
->slice_start
[rw
]);
1120 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
1122 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1123 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
1124 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
1125 struct throtl_grp
*tg_to_put
= NULL
;
1129 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1130 * from @tg may put its reference and @parent_sq might end up
1131 * getting released prematurely. Remember the tg to put and put it
1132 * after @bio is transferred to @parent_sq.
1134 bio
= throtl_pop_queued(&sq
->queued
[rw
], &tg_to_put
);
1135 sq
->nr_queued
[rw
]--;
1137 throtl_charge_bio(tg
, bio
);
1140 * If our parent is another tg, we just need to transfer @bio to
1141 * the parent using throtl_add_bio_tg(). If our parent is
1142 * @td->service_queue, @bio is ready to be issued. Put it on its
1143 * bio_lists[] and decrease total number queued. The caller is
1144 * responsible for issuing these bios.
1147 throtl_add_bio_tg(bio
, &tg
->qnode_on_parent
[rw
], parent_tg
);
1148 start_parent_slice_with_credit(tg
, parent_tg
, rw
);
1150 throtl_qnode_add_bio(bio
, &tg
->qnode_on_parent
[rw
],
1151 &parent_sq
->queued
[rw
]);
1152 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
1153 tg
->td
->nr_queued
[rw
]--;
1156 throtl_trim_slice(tg
, rw
);
1159 blkg_put(tg_to_blkg(tg_to_put
));
1162 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
1164 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1165 unsigned int nr_reads
= 0, nr_writes
= 0;
1166 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
1167 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
1170 /* Try to dispatch 75% READS and 25% WRITES */
1172 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])) &&
1173 tg_may_dispatch(tg
, bio
, NULL
)) {
1175 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1178 if (nr_reads
>= max_nr_reads
)
1182 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])) &&
1183 tg_may_dispatch(tg
, bio
, NULL
)) {
1185 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1188 if (nr_writes
>= max_nr_writes
)
1192 return nr_reads
+ nr_writes
;
1195 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
1197 unsigned int nr_disp
= 0;
1200 struct throtl_grp
*tg
= throtl_rb_first(parent_sq
);
1201 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1206 if (time_before(jiffies
, tg
->disptime
))
1209 throtl_dequeue_tg(tg
);
1211 nr_disp
+= throtl_dispatch_tg(tg
);
1213 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
1214 tg_update_disptime(tg
);
1216 if (nr_disp
>= throtl_quantum
)
1223 static bool throtl_can_upgrade(struct throtl_data
*td
,
1224 struct throtl_grp
*this_tg
);
1226 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1227 * @arg: the throtl_service_queue being serviced
1229 * This timer is armed when a child throtl_grp with active bio's become
1230 * pending and queued on the service_queue's pending_tree and expires when
1231 * the first child throtl_grp should be dispatched. This function
1232 * dispatches bio's from the children throtl_grps to the parent
1235 * If the parent's parent is another throtl_grp, dispatching is propagated
1236 * by either arming its pending_timer or repeating dispatch directly. If
1237 * the top-level service_tree is reached, throtl_data->dispatch_work is
1238 * kicked so that the ready bio's are issued.
1240 static void throtl_pending_timer_fn(unsigned long arg
)
1242 struct throtl_service_queue
*sq
= (void *)arg
;
1243 struct throtl_grp
*tg
= sq_to_tg(sq
);
1244 struct throtl_data
*td
= sq_to_td(sq
);
1245 struct request_queue
*q
= td
->queue
;
1246 struct throtl_service_queue
*parent_sq
;
1250 spin_lock_irq(q
->queue_lock
);
1251 if (throtl_can_upgrade(td
, NULL
))
1252 throtl_upgrade_state(td
);
1255 parent_sq
= sq
->parent_sq
;
1259 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
1260 sq
->nr_queued
[READ
] + sq
->nr_queued
[WRITE
],
1261 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1263 ret
= throtl_select_dispatch(sq
);
1265 throtl_log(sq
, "bios disp=%u", ret
);
1269 if (throtl_schedule_next_dispatch(sq
, false))
1272 /* this dispatch windows is still open, relax and repeat */
1273 spin_unlock_irq(q
->queue_lock
);
1275 spin_lock_irq(q
->queue_lock
);
1282 /* @parent_sq is another throl_grp, propagate dispatch */
1283 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1284 tg_update_disptime(tg
);
1285 if (!throtl_schedule_next_dispatch(parent_sq
, false)) {
1286 /* window is already open, repeat dispatching */
1293 /* reached the top-level, queue issueing */
1294 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1297 spin_unlock_irq(q
->queue_lock
);
1301 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1302 * @work: work item being executed
1304 * This function is queued for execution when bio's reach the bio_lists[]
1305 * of throtl_data->service_queue. Those bio's are ready and issued by this
1308 static void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1310 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1312 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1313 struct request_queue
*q
= td
->queue
;
1314 struct bio_list bio_list_on_stack
;
1316 struct blk_plug plug
;
1319 bio_list_init(&bio_list_on_stack
);
1321 spin_lock_irq(q
->queue_lock
);
1322 for (rw
= READ
; rw
<= WRITE
; rw
++)
1323 while ((bio
= throtl_pop_queued(&td_sq
->queued
[rw
], NULL
)))
1324 bio_list_add(&bio_list_on_stack
, bio
);
1325 spin_unlock_irq(q
->queue_lock
);
1327 if (!bio_list_empty(&bio_list_on_stack
)) {
1328 blk_start_plug(&plug
);
1329 while((bio
= bio_list_pop(&bio_list_on_stack
)))
1330 generic_make_request(bio
);
1331 blk_finish_plug(&plug
);
1335 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1338 struct throtl_grp
*tg
= pd_to_tg(pd
);
1339 u64 v
= *(u64
*)((void *)tg
+ off
);
1343 return __blkg_prfill_u64(sf
, pd
, v
);
1346 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1349 struct throtl_grp
*tg
= pd_to_tg(pd
);
1350 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1354 return __blkg_prfill_u64(sf
, pd
, v
);
1357 static int tg_print_conf_u64(struct seq_file
*sf
, void *v
)
1359 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_u64
,
1360 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1364 static int tg_print_conf_uint(struct seq_file
*sf
, void *v
)
1366 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_uint
,
1367 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1371 static void tg_conf_updated(struct throtl_grp
*tg
, bool global
)
1373 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1374 struct cgroup_subsys_state
*pos_css
;
1375 struct blkcg_gq
*blkg
;
1377 throtl_log(&tg
->service_queue
,
1378 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1379 tg_bps_limit(tg
, READ
), tg_bps_limit(tg
, WRITE
),
1380 tg_iops_limit(tg
, READ
), tg_iops_limit(tg
, WRITE
));
1383 * Update has_rules[] flags for the updated tg's subtree. A tg is
1384 * considered to have rules if either the tg itself or any of its
1385 * ancestors has rules. This identifies groups without any
1386 * restrictions in the whole hierarchy and allows them to bypass
1389 blkg_for_each_descendant_pre(blkg
, pos_css
,
1390 global
? tg
->td
->queue
->root_blkg
: tg_to_blkg(tg
)) {
1391 struct throtl_grp
*this_tg
= blkg_to_tg(blkg
);
1392 struct throtl_grp
*parent_tg
;
1394 tg_update_has_rules(this_tg
);
1395 /* ignore root/second level */
1396 if (!cgroup_subsys_on_dfl(io_cgrp_subsys
) || !blkg
->parent
||
1397 !blkg
->parent
->parent
)
1399 parent_tg
= blkg_to_tg(blkg
->parent
);
1401 * make sure all children has lower idle time threshold and
1402 * higher latency target
1404 this_tg
->idletime_threshold
= min(this_tg
->idletime_threshold
,
1405 parent_tg
->idletime_threshold
);
1406 this_tg
->latency_target
= max(this_tg
->latency_target
,
1407 parent_tg
->latency_target
);
1411 * We're already holding queue_lock and know @tg is valid. Let's
1412 * apply the new config directly.
1414 * Restart the slices for both READ and WRITES. It might happen
1415 * that a group's limit are dropped suddenly and we don't want to
1416 * account recently dispatched IO with new low rate.
1418 throtl_start_new_slice(tg
, 0);
1419 throtl_start_new_slice(tg
, 1);
1421 if (tg
->flags
& THROTL_TG_PENDING
) {
1422 tg_update_disptime(tg
);
1423 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1427 static ssize_t
tg_set_conf(struct kernfs_open_file
*of
,
1428 char *buf
, size_t nbytes
, loff_t off
, bool is_u64
)
1430 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1431 struct blkg_conf_ctx ctx
;
1432 struct throtl_grp
*tg
;
1436 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1441 if (sscanf(ctx
.body
, "%llu", &v
) != 1)
1446 tg
= blkg_to_tg(ctx
.blkg
);
1449 *(u64
*)((void *)tg
+ of_cft(of
)->private) = v
;
1451 *(unsigned int *)((void *)tg
+ of_cft(of
)->private) = v
;
1453 tg_conf_updated(tg
, false);
1456 blkg_conf_finish(&ctx
);
1457 return ret
?: nbytes
;
1460 static ssize_t
tg_set_conf_u64(struct kernfs_open_file
*of
,
1461 char *buf
, size_t nbytes
, loff_t off
)
1463 return tg_set_conf(of
, buf
, nbytes
, off
, true);
1466 static ssize_t
tg_set_conf_uint(struct kernfs_open_file
*of
,
1467 char *buf
, size_t nbytes
, loff_t off
)
1469 return tg_set_conf(of
, buf
, nbytes
, off
, false);
1472 static struct cftype throtl_legacy_files
[] = {
1474 .name
= "throttle.read_bps_device",
1475 .private = offsetof(struct throtl_grp
, bps
[READ
][LIMIT_MAX
]),
1476 .seq_show
= tg_print_conf_u64
,
1477 .write
= tg_set_conf_u64
,
1480 .name
= "throttle.write_bps_device",
1481 .private = offsetof(struct throtl_grp
, bps
[WRITE
][LIMIT_MAX
]),
1482 .seq_show
= tg_print_conf_u64
,
1483 .write
= tg_set_conf_u64
,
1486 .name
= "throttle.read_iops_device",
1487 .private = offsetof(struct throtl_grp
, iops
[READ
][LIMIT_MAX
]),
1488 .seq_show
= tg_print_conf_uint
,
1489 .write
= tg_set_conf_uint
,
1492 .name
= "throttle.write_iops_device",
1493 .private = offsetof(struct throtl_grp
, iops
[WRITE
][LIMIT_MAX
]),
1494 .seq_show
= tg_print_conf_uint
,
1495 .write
= tg_set_conf_uint
,
1498 .name
= "throttle.io_service_bytes",
1499 .private = (unsigned long)&blkcg_policy_throtl
,
1500 .seq_show
= blkg_print_stat_bytes
,
1503 .name
= "throttle.io_serviced",
1504 .private = (unsigned long)&blkcg_policy_throtl
,
1505 .seq_show
= blkg_print_stat_ios
,
1510 static u64
tg_prfill_limit(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1513 struct throtl_grp
*tg
= pd_to_tg(pd
);
1514 const char *dname
= blkg_dev_name(pd
->blkg
);
1515 char bufs
[4][21] = { "max", "max", "max", "max" };
1517 unsigned int iops_dft
;
1518 char idle_time
[26] = "";
1519 char latency_time
[26] = "";
1524 if (off
== LIMIT_LOW
) {
1529 iops_dft
= UINT_MAX
;
1532 if (tg
->bps_conf
[READ
][off
] == bps_dft
&&
1533 tg
->bps_conf
[WRITE
][off
] == bps_dft
&&
1534 tg
->iops_conf
[READ
][off
] == iops_dft
&&
1535 tg
->iops_conf
[WRITE
][off
] == iops_dft
&&
1536 (off
!= LIMIT_LOW
||
1537 (tg
->idletime_threshold_conf
== tg
->td
->dft_idletime_threshold
&&
1538 tg
->latency_target_conf
== DFL_LATENCY_TARGET
)))
1541 if (tg
->bps_conf
[READ
][off
] != U64_MAX
)
1542 snprintf(bufs
[0], sizeof(bufs
[0]), "%llu",
1543 tg
->bps_conf
[READ
][off
]);
1544 if (tg
->bps_conf
[WRITE
][off
] != U64_MAX
)
1545 snprintf(bufs
[1], sizeof(bufs
[1]), "%llu",
1546 tg
->bps_conf
[WRITE
][off
]);
1547 if (tg
->iops_conf
[READ
][off
] != UINT_MAX
)
1548 snprintf(bufs
[2], sizeof(bufs
[2]), "%u",
1549 tg
->iops_conf
[READ
][off
]);
1550 if (tg
->iops_conf
[WRITE
][off
] != UINT_MAX
)
1551 snprintf(bufs
[3], sizeof(bufs
[3]), "%u",
1552 tg
->iops_conf
[WRITE
][off
]);
1553 if (off
== LIMIT_LOW
) {
1554 if (tg
->idletime_threshold_conf
== ULONG_MAX
)
1555 strcpy(idle_time
, " idle=max");
1557 snprintf(idle_time
, sizeof(idle_time
), " idle=%lu",
1558 tg
->idletime_threshold_conf
);
1560 if (tg
->latency_target_conf
== ULONG_MAX
)
1561 strcpy(latency_time
, " latency=max");
1563 snprintf(latency_time
, sizeof(latency_time
),
1564 " latency=%lu", tg
->latency_target_conf
);
1567 seq_printf(sf
, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1568 dname
, bufs
[0], bufs
[1], bufs
[2], bufs
[3], idle_time
,
1573 static int tg_print_limit(struct seq_file
*sf
, void *v
)
1575 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_limit
,
1576 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1580 static ssize_t
tg_set_limit(struct kernfs_open_file
*of
,
1581 char *buf
, size_t nbytes
, loff_t off
)
1583 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1584 struct blkg_conf_ctx ctx
;
1585 struct throtl_grp
*tg
;
1587 unsigned long idle_time
;
1588 unsigned long latency_time
;
1590 int index
= of_cft(of
)->private;
1592 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1596 tg
= blkg_to_tg(ctx
.blkg
);
1598 v
[0] = tg
->bps_conf
[READ
][index
];
1599 v
[1] = tg
->bps_conf
[WRITE
][index
];
1600 v
[2] = tg
->iops_conf
[READ
][index
];
1601 v
[3] = tg
->iops_conf
[WRITE
][index
];
1603 idle_time
= tg
->idletime_threshold_conf
;
1604 latency_time
= tg
->latency_target_conf
;
1606 char tok
[27]; /* wiops=18446744073709551616 */
1611 if (sscanf(ctx
.body
, "%26s%n", tok
, &len
) != 1)
1620 if (!p
|| (sscanf(p
, "%llu", &val
) != 1 && strcmp(p
, "max")))
1628 if (!strcmp(tok
, "rbps"))
1630 else if (!strcmp(tok
, "wbps"))
1632 else if (!strcmp(tok
, "riops"))
1633 v
[2] = min_t(u64
, val
, UINT_MAX
);
1634 else if (!strcmp(tok
, "wiops"))
1635 v
[3] = min_t(u64
, val
, UINT_MAX
);
1636 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "idle"))
1638 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "latency"))
1644 tg
->bps_conf
[READ
][index
] = v
[0];
1645 tg
->bps_conf
[WRITE
][index
] = v
[1];
1646 tg
->iops_conf
[READ
][index
] = v
[2];
1647 tg
->iops_conf
[WRITE
][index
] = v
[3];
1649 if (index
== LIMIT_MAX
) {
1650 tg
->bps
[READ
][index
] = v
[0];
1651 tg
->bps
[WRITE
][index
] = v
[1];
1652 tg
->iops
[READ
][index
] = v
[2];
1653 tg
->iops
[WRITE
][index
] = v
[3];
1655 tg
->bps
[READ
][LIMIT_LOW
] = min(tg
->bps_conf
[READ
][LIMIT_LOW
],
1656 tg
->bps_conf
[READ
][LIMIT_MAX
]);
1657 tg
->bps
[WRITE
][LIMIT_LOW
] = min(tg
->bps_conf
[WRITE
][LIMIT_LOW
],
1658 tg
->bps_conf
[WRITE
][LIMIT_MAX
]);
1659 tg
->iops
[READ
][LIMIT_LOW
] = min(tg
->iops_conf
[READ
][LIMIT_LOW
],
1660 tg
->iops_conf
[READ
][LIMIT_MAX
]);
1661 tg
->iops
[WRITE
][LIMIT_LOW
] = min(tg
->iops_conf
[WRITE
][LIMIT_LOW
],
1662 tg
->iops_conf
[WRITE
][LIMIT_MAX
]);
1664 if (index
== LIMIT_LOW
) {
1665 blk_throtl_update_limit_valid(tg
->td
);
1666 if (tg
->td
->limit_valid
[LIMIT_LOW
])
1667 tg
->td
->limit_index
= LIMIT_LOW
;
1668 tg
->idletime_threshold_conf
= idle_time
;
1669 tg
->idletime_threshold
= tg
->idletime_threshold_conf
;
1670 tg
->latency_target_conf
= latency_time
;
1671 tg
->latency_target
= tg
->latency_target_conf
;
1673 tg_conf_updated(tg
, index
== LIMIT_LOW
&&
1674 tg
->td
->limit_valid
[LIMIT_LOW
]);
1677 blkg_conf_finish(&ctx
);
1678 return ret
?: nbytes
;
1681 static struct cftype throtl_files
[] = {
1682 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1685 .flags
= CFTYPE_NOT_ON_ROOT
,
1686 .seq_show
= tg_print_limit
,
1687 .write
= tg_set_limit
,
1688 .private = LIMIT_LOW
,
1693 .flags
= CFTYPE_NOT_ON_ROOT
,
1694 .seq_show
= tg_print_limit
,
1695 .write
= tg_set_limit
,
1696 .private = LIMIT_MAX
,
1701 static void throtl_shutdown_wq(struct request_queue
*q
)
1703 struct throtl_data
*td
= q
->td
;
1705 cancel_work_sync(&td
->dispatch_work
);
1708 static struct blkcg_policy blkcg_policy_throtl
= {
1709 .dfl_cftypes
= throtl_files
,
1710 .legacy_cftypes
= throtl_legacy_files
,
1712 .pd_alloc_fn
= throtl_pd_alloc
,
1713 .pd_init_fn
= throtl_pd_init
,
1714 .pd_online_fn
= throtl_pd_online
,
1715 .pd_offline_fn
= throtl_pd_offline
,
1716 .pd_free_fn
= throtl_pd_free
,
1719 static unsigned long __tg_last_low_overflow_time(struct throtl_grp
*tg
)
1721 unsigned long rtime
= jiffies
, wtime
= jiffies
;
1723 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
])
1724 rtime
= tg
->last_low_overflow_time
[READ
];
1725 if (tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
1726 wtime
= tg
->last_low_overflow_time
[WRITE
];
1727 return min(rtime
, wtime
);
1730 /* tg should not be an intermediate node */
1731 static unsigned long tg_last_low_overflow_time(struct throtl_grp
*tg
)
1733 struct throtl_service_queue
*parent_sq
;
1734 struct throtl_grp
*parent
= tg
;
1735 unsigned long ret
= __tg_last_low_overflow_time(tg
);
1738 parent_sq
= parent
->service_queue
.parent_sq
;
1739 parent
= sq_to_tg(parent_sq
);
1744 * The parent doesn't have low limit, it always reaches low
1745 * limit. Its overflow time is useless for children
1747 if (!parent
->bps
[READ
][LIMIT_LOW
] &&
1748 !parent
->iops
[READ
][LIMIT_LOW
] &&
1749 !parent
->bps
[WRITE
][LIMIT_LOW
] &&
1750 !parent
->iops
[WRITE
][LIMIT_LOW
])
1752 if (time_after(__tg_last_low_overflow_time(parent
), ret
))
1753 ret
= __tg_last_low_overflow_time(parent
);
1758 static bool throtl_tg_is_idle(struct throtl_grp
*tg
)
1761 * cgroup is idle if:
1762 * - single idle is too long, longer than a fixed value (in case user
1763 * configure a too big threshold) or 4 times of slice
1764 * - average think time is more than threshold
1765 * - IO latency is largely below threshold
1767 unsigned long time
= jiffies_to_usecs(4 * tg
->td
->throtl_slice
);
1770 time
= min_t(unsigned long, MAX_IDLE_TIME
, time
);
1771 ret
= (ktime_get_ns() >> 10) - tg
->last_finish_time
> time
||
1772 tg
->avg_idletime
> tg
->idletime_threshold
||
1773 (tg
->latency_target
&& tg
->bio_cnt
&&
1774 tg
->bad_bio_cnt
* 5 < tg
->bio_cnt
);
1775 throtl_log(&tg
->service_queue
,
1776 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1777 tg
->avg_idletime
, tg
->idletime_threshold
, tg
->bad_bio_cnt
,
1778 tg
->bio_cnt
, ret
, tg
->td
->scale
);
1782 static bool throtl_tg_can_upgrade(struct throtl_grp
*tg
)
1784 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1785 bool read_limit
, write_limit
;
1788 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1789 * reaches), it's ok to upgrade to next limit
1791 read_limit
= tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
];
1792 write_limit
= tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
];
1793 if (!read_limit
&& !write_limit
)
1795 if (read_limit
&& sq
->nr_queued
[READ
] &&
1796 (!write_limit
|| sq
->nr_queued
[WRITE
]))
1798 if (write_limit
&& sq
->nr_queued
[WRITE
] &&
1799 (!read_limit
|| sq
->nr_queued
[READ
]))
1802 if (time_after_eq(jiffies
,
1803 tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
) &&
1804 throtl_tg_is_idle(tg
))
1809 static bool throtl_hierarchy_can_upgrade(struct throtl_grp
*tg
)
1812 if (throtl_tg_can_upgrade(tg
))
1814 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1815 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1821 static bool throtl_can_upgrade(struct throtl_data
*td
,
1822 struct throtl_grp
*this_tg
)
1824 struct cgroup_subsys_state
*pos_css
;
1825 struct blkcg_gq
*blkg
;
1827 if (td
->limit_index
!= LIMIT_LOW
)
1830 if (time_before(jiffies
, td
->low_downgrade_time
+ td
->throtl_slice
))
1834 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1835 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1839 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1841 if (!throtl_hierarchy_can_upgrade(tg
)) {
1850 static void throtl_upgrade_check(struct throtl_grp
*tg
)
1852 unsigned long now
= jiffies
;
1854 if (tg
->td
->limit_index
!= LIMIT_LOW
)
1857 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
1860 tg
->last_check_time
= now
;
1862 if (!time_after_eq(now
,
1863 __tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
))
1866 if (throtl_can_upgrade(tg
->td
, NULL
))
1867 throtl_upgrade_state(tg
->td
);
1870 static void throtl_upgrade_state(struct throtl_data
*td
)
1872 struct cgroup_subsys_state
*pos_css
;
1873 struct blkcg_gq
*blkg
;
1875 throtl_log(&td
->service_queue
, "upgrade to max");
1876 td
->limit_index
= LIMIT_MAX
;
1877 td
->low_upgrade_time
= jiffies
;
1880 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1881 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1882 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1884 tg
->disptime
= jiffies
- 1;
1885 throtl_select_dispatch(sq
);
1886 throtl_schedule_next_dispatch(sq
, false);
1889 throtl_select_dispatch(&td
->service_queue
);
1890 throtl_schedule_next_dispatch(&td
->service_queue
, false);
1891 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1894 static void throtl_downgrade_state(struct throtl_data
*td
, int new)
1898 throtl_log(&td
->service_queue
, "downgrade, scale %d", td
->scale
);
1900 td
->low_upgrade_time
= jiffies
- td
->scale
* td
->throtl_slice
;
1904 td
->limit_index
= new;
1905 td
->low_downgrade_time
= jiffies
;
1908 static bool throtl_tg_can_downgrade(struct throtl_grp
*tg
)
1910 struct throtl_data
*td
= tg
->td
;
1911 unsigned long now
= jiffies
;
1914 * If cgroup is below low limit, consider downgrade and throttle other
1917 if (time_after_eq(now
, td
->low_upgrade_time
+ td
->throtl_slice
) &&
1918 time_after_eq(now
, tg_last_low_overflow_time(tg
) +
1919 td
->throtl_slice
) &&
1920 (!throtl_tg_is_idle(tg
) ||
1921 !list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
)))
1926 static bool throtl_hierarchy_can_downgrade(struct throtl_grp
*tg
)
1929 if (!throtl_tg_can_downgrade(tg
))
1931 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1932 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1938 static void throtl_downgrade_check(struct throtl_grp
*tg
)
1942 unsigned long elapsed_time
;
1943 unsigned long now
= jiffies
;
1945 if (tg
->td
->limit_index
!= LIMIT_MAX
||
1946 !tg
->td
->limit_valid
[LIMIT_LOW
])
1948 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1950 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
1953 elapsed_time
= now
- tg
->last_check_time
;
1954 tg
->last_check_time
= now
;
1956 if (time_before(now
, tg_last_low_overflow_time(tg
) +
1957 tg
->td
->throtl_slice
))
1960 if (tg
->bps
[READ
][LIMIT_LOW
]) {
1961 bps
= tg
->last_bytes_disp
[READ
] * HZ
;
1962 do_div(bps
, elapsed_time
);
1963 if (bps
>= tg
->bps
[READ
][LIMIT_LOW
])
1964 tg
->last_low_overflow_time
[READ
] = now
;
1967 if (tg
->bps
[WRITE
][LIMIT_LOW
]) {
1968 bps
= tg
->last_bytes_disp
[WRITE
] * HZ
;
1969 do_div(bps
, elapsed_time
);
1970 if (bps
>= tg
->bps
[WRITE
][LIMIT_LOW
])
1971 tg
->last_low_overflow_time
[WRITE
] = now
;
1974 if (tg
->iops
[READ
][LIMIT_LOW
]) {
1975 iops
= tg
->last_io_disp
[READ
] * HZ
/ elapsed_time
;
1976 if (iops
>= tg
->iops
[READ
][LIMIT_LOW
])
1977 tg
->last_low_overflow_time
[READ
] = now
;
1980 if (tg
->iops
[WRITE
][LIMIT_LOW
]) {
1981 iops
= tg
->last_io_disp
[WRITE
] * HZ
/ elapsed_time
;
1982 if (iops
>= tg
->iops
[WRITE
][LIMIT_LOW
])
1983 tg
->last_low_overflow_time
[WRITE
] = now
;
1987 * If cgroup is below low limit, consider downgrade and throttle other
1990 if (throtl_hierarchy_can_downgrade(tg
))
1991 throtl_downgrade_state(tg
->td
, LIMIT_LOW
);
1993 tg
->last_bytes_disp
[READ
] = 0;
1994 tg
->last_bytes_disp
[WRITE
] = 0;
1995 tg
->last_io_disp
[READ
] = 0;
1996 tg
->last_io_disp
[WRITE
] = 0;
1999 static void blk_throtl_update_idletime(struct throtl_grp
*tg
)
2001 unsigned long now
= ktime_get_ns() >> 10;
2002 unsigned long last_finish_time
= tg
->last_finish_time
;
2004 if (now
<= last_finish_time
|| last_finish_time
== 0 ||
2005 last_finish_time
== tg
->checked_last_finish_time
)
2008 tg
->avg_idletime
= (tg
->avg_idletime
* 7 + now
- last_finish_time
) >> 3;
2009 tg
->checked_last_finish_time
= last_finish_time
;
2012 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2013 static void throtl_update_latency_buckets(struct throtl_data
*td
)
2015 struct avg_latency_bucket avg_latency
[LATENCY_BUCKET_SIZE
];
2017 unsigned long last_latency
= 0;
2018 unsigned long latency
;
2020 if (!blk_queue_nonrot(td
->queue
))
2022 if (time_before(jiffies
, td
->last_calculate_time
+ HZ
))
2024 td
->last_calculate_time
= jiffies
;
2026 memset(avg_latency
, 0, sizeof(avg_latency
));
2027 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2028 struct latency_bucket
*tmp
= &td
->tmp_buckets
[i
];
2030 for_each_possible_cpu(cpu
) {
2031 struct latency_bucket
*bucket
;
2033 /* this isn't race free, but ok in practice */
2034 bucket
= per_cpu_ptr(td
->latency_buckets
, cpu
);
2035 tmp
->total_latency
+= bucket
[i
].total_latency
;
2036 tmp
->samples
+= bucket
[i
].samples
;
2037 bucket
[i
].total_latency
= 0;
2038 bucket
[i
].samples
= 0;
2041 if (tmp
->samples
>= 32) {
2042 int samples
= tmp
->samples
;
2044 latency
= tmp
->total_latency
;
2046 tmp
->total_latency
= 0;
2051 avg_latency
[i
].latency
= latency
;
2055 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2056 if (!avg_latency
[i
].latency
) {
2057 if (td
->avg_buckets
[i
].latency
< last_latency
)
2058 td
->avg_buckets
[i
].latency
= last_latency
;
2062 if (!td
->avg_buckets
[i
].valid
)
2063 latency
= avg_latency
[i
].latency
;
2065 latency
= (td
->avg_buckets
[i
].latency
* 7 +
2066 avg_latency
[i
].latency
) >> 3;
2068 td
->avg_buckets
[i
].latency
= max(latency
, last_latency
);
2069 td
->avg_buckets
[i
].valid
= true;
2070 last_latency
= td
->avg_buckets
[i
].latency
;
2073 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++)
2074 throtl_log(&td
->service_queue
,
2075 "Latency bucket %d: latency=%ld, valid=%d", i
,
2076 td
->avg_buckets
[i
].latency
, td
->avg_buckets
[i
].valid
);
2079 static inline void throtl_update_latency_buckets(struct throtl_data
*td
)
2084 static void blk_throtl_assoc_bio(struct throtl_grp
*tg
, struct bio
*bio
)
2086 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2089 ret
= bio_associate_current(bio
);
2090 if (ret
== 0 || ret
== -EBUSY
)
2091 bio
->bi_cg_private
= tg
;
2092 blk_stat_set_issue(&bio
->bi_issue_stat
, bio_sectors(bio
));
2094 bio_associate_current(bio
);
2098 bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
2101 struct throtl_qnode
*qn
= NULL
;
2102 struct throtl_grp
*tg
= blkg_to_tg(blkg
?: q
->root_blkg
);
2103 struct throtl_service_queue
*sq
;
2104 bool rw
= bio_data_dir(bio
);
2105 bool throttled
= false;
2106 struct throtl_data
*td
= tg
->td
;
2108 WARN_ON_ONCE(!rcu_read_lock_held());
2110 /* see throtl_charge_bio() */
2111 if (bio_flagged(bio
, BIO_THROTTLED
) || !tg
->has_rules
[rw
])
2114 spin_lock_irq(q
->queue_lock
);
2116 throtl_update_latency_buckets(td
);
2118 if (unlikely(blk_queue_bypass(q
)))
2121 blk_throtl_assoc_bio(tg
, bio
);
2122 blk_throtl_update_idletime(tg
);
2124 sq
= &tg
->service_queue
;
2128 if (tg
->last_low_overflow_time
[rw
] == 0)
2129 tg
->last_low_overflow_time
[rw
] = jiffies
;
2130 throtl_downgrade_check(tg
);
2131 throtl_upgrade_check(tg
);
2132 /* throtl is FIFO - if bios are already queued, should queue */
2133 if (sq
->nr_queued
[rw
])
2136 /* if above limits, break to queue */
2137 if (!tg_may_dispatch(tg
, bio
, NULL
)) {
2138 tg
->last_low_overflow_time
[rw
] = jiffies
;
2139 if (throtl_can_upgrade(td
, tg
)) {
2140 throtl_upgrade_state(td
);
2146 /* within limits, let's charge and dispatch directly */
2147 throtl_charge_bio(tg
, bio
);
2150 * We need to trim slice even when bios are not being queued
2151 * otherwise it might happen that a bio is not queued for
2152 * a long time and slice keeps on extending and trim is not
2153 * called for a long time. Now if limits are reduced suddenly
2154 * we take into account all the IO dispatched so far at new
2155 * low rate and * newly queued IO gets a really long dispatch
2158 * So keep on trimming slice even if bio is not queued.
2160 throtl_trim_slice(tg
, rw
);
2163 * @bio passed through this layer without being throttled.
2164 * Climb up the ladder. If we''re already at the top, it
2165 * can be executed directly.
2167 qn
= &tg
->qnode_on_parent
[rw
];
2174 /* out-of-limit, queue to @tg */
2175 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2176 rw
== READ
? 'R' : 'W',
2177 tg
->bytes_disp
[rw
], bio
->bi_iter
.bi_size
,
2178 tg_bps_limit(tg
, rw
),
2179 tg
->io_disp
[rw
], tg_iops_limit(tg
, rw
),
2180 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
2182 tg
->last_low_overflow_time
[rw
] = jiffies
;
2184 td
->nr_queued
[rw
]++;
2185 throtl_add_bio_tg(bio
, qn
, tg
);
2189 * Update @tg's dispatch time and force schedule dispatch if @tg
2190 * was empty before @bio. The forced scheduling isn't likely to
2191 * cause undue delay as @bio is likely to be dispatched directly if
2192 * its @tg's disptime is not in the future.
2194 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
2195 tg_update_disptime(tg
);
2196 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
2200 spin_unlock_irq(q
->queue_lock
);
2203 * As multiple blk-throtls may stack in the same issue path, we
2204 * don't want bios to leave with the flag set. Clear the flag if
2208 bio_clear_flag(bio
, BIO_THROTTLED
);
2210 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2211 if (throttled
|| !td
->track_bio_latency
)
2212 bio
->bi_issue_stat
.stat
|= SKIP_LATENCY
;
2217 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2218 static void throtl_track_latency(struct throtl_data
*td
, sector_t size
,
2219 int op
, unsigned long time
)
2221 struct latency_bucket
*latency
;
2224 if (!td
|| td
->limit_index
!= LIMIT_LOW
|| op
!= REQ_OP_READ
||
2225 !blk_queue_nonrot(td
->queue
))
2228 index
= request_bucket_index(size
);
2230 latency
= get_cpu_ptr(td
->latency_buckets
);
2231 latency
[index
].total_latency
+= time
;
2232 latency
[index
].samples
++;
2233 put_cpu_ptr(td
->latency_buckets
);
2236 void blk_throtl_stat_add(struct request
*rq
, u64 time_ns
)
2238 struct request_queue
*q
= rq
->q
;
2239 struct throtl_data
*td
= q
->td
;
2241 throtl_track_latency(td
, blk_stat_size(&rq
->issue_stat
),
2242 req_op(rq
), time_ns
>> 10);
2245 void blk_throtl_bio_endio(struct bio
*bio
)
2247 struct throtl_grp
*tg
;
2249 unsigned long finish_time
;
2250 unsigned long start_time
;
2253 tg
= bio
->bi_cg_private
;
2256 bio
->bi_cg_private
= NULL
;
2258 finish_time_ns
= ktime_get_ns();
2259 tg
->last_finish_time
= finish_time_ns
>> 10;
2261 start_time
= blk_stat_time(&bio
->bi_issue_stat
) >> 10;
2262 finish_time
= __blk_stat_time(finish_time_ns
) >> 10;
2263 if (!start_time
|| finish_time
<= start_time
)
2266 lat
= finish_time
- start_time
;
2267 /* this is only for bio based driver */
2268 if (!(bio
->bi_issue_stat
.stat
& SKIP_LATENCY
))
2269 throtl_track_latency(tg
->td
, blk_stat_size(&bio
->bi_issue_stat
),
2272 if (tg
->latency_target
) {
2274 unsigned int threshold
;
2276 bucket
= request_bucket_index(
2277 blk_stat_size(&bio
->bi_issue_stat
));
2278 threshold
= tg
->td
->avg_buckets
[bucket
].latency
+
2280 if (lat
> threshold
)
2283 * Not race free, could get wrong count, which means cgroups
2289 if (time_after(jiffies
, tg
->bio_cnt_reset_time
) || tg
->bio_cnt
> 1024) {
2290 tg
->bio_cnt_reset_time
= tg
->td
->throtl_slice
+ jiffies
;
2292 tg
->bad_bio_cnt
/= 2;
2298 * Dispatch all bios from all children tg's queued on @parent_sq. On
2299 * return, @parent_sq is guaranteed to not have any active children tg's
2300 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2302 static void tg_drain_bios(struct throtl_service_queue
*parent_sq
)
2304 struct throtl_grp
*tg
;
2306 while ((tg
= throtl_rb_first(parent_sq
))) {
2307 struct throtl_service_queue
*sq
= &tg
->service_queue
;
2310 throtl_dequeue_tg(tg
);
2312 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])))
2313 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
2314 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])))
2315 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
2320 * blk_throtl_drain - drain throttled bios
2321 * @q: request_queue to drain throttled bios for
2323 * Dispatch all currently throttled bios on @q through ->make_request_fn().
2325 void blk_throtl_drain(struct request_queue
*q
)
2326 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
2328 struct throtl_data
*td
= q
->td
;
2329 struct blkcg_gq
*blkg
;
2330 struct cgroup_subsys_state
*pos_css
;
2334 queue_lockdep_assert_held(q
);
2338 * Drain each tg while doing post-order walk on the blkg tree, so
2339 * that all bios are propagated to td->service_queue. It'd be
2340 * better to walk service_queue tree directly but blkg walk is
2343 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
)
2344 tg_drain_bios(&blkg_to_tg(blkg
)->service_queue
);
2346 /* finally, transfer bios from top-level tg's into the td */
2347 tg_drain_bios(&td
->service_queue
);
2350 spin_unlock_irq(q
->queue_lock
);
2352 /* all bios now should be in td->service_queue, issue them */
2353 for (rw
= READ
; rw
<= WRITE
; rw
++)
2354 while ((bio
= throtl_pop_queued(&td
->service_queue
.queued
[rw
],
2356 generic_make_request(bio
);
2358 spin_lock_irq(q
->queue_lock
);
2361 int blk_throtl_init(struct request_queue
*q
)
2363 struct throtl_data
*td
;
2366 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
2369 td
->latency_buckets
= __alloc_percpu(sizeof(struct latency_bucket
) *
2370 LATENCY_BUCKET_SIZE
, __alignof__(u64
));
2371 if (!td
->latency_buckets
) {
2376 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
2377 throtl_service_queue_init(&td
->service_queue
);
2382 td
->limit_valid
[LIMIT_MAX
] = true;
2383 td
->limit_index
= LIMIT_MAX
;
2384 td
->low_upgrade_time
= jiffies
;
2385 td
->low_downgrade_time
= jiffies
;
2387 /* activate policy */
2388 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
2390 free_percpu(td
->latency_buckets
);
2396 void blk_throtl_exit(struct request_queue
*q
)
2399 throtl_shutdown_wq(q
);
2400 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
2401 free_percpu(q
->td
->latency_buckets
);
2405 void blk_throtl_register_queue(struct request_queue
*q
)
2407 struct throtl_data
*td
;
2408 struct cgroup_subsys_state
*pos_css
;
2409 struct blkcg_gq
*blkg
;
2414 if (blk_queue_nonrot(q
)) {
2415 td
->throtl_slice
= DFL_THROTL_SLICE_SSD
;
2416 td
->dft_idletime_threshold
= DFL_IDLE_THRESHOLD_SSD
;
2418 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2419 td
->dft_idletime_threshold
= DFL_IDLE_THRESHOLD_HD
;
2421 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2422 /* if no low limit, use previous default */
2423 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2426 td
->track_bio_latency
= !q
->mq_ops
&& !q
->request_fn
;
2427 if (!td
->track_bio_latency
)
2428 blk_stat_enable_accounting(q
);
2431 * some tg are created before queue is fully initialized, eg, nonrot
2432 * isn't initialized yet
2435 blkg_for_each_descendant_post(blkg
, pos_css
, q
->root_blkg
) {
2436 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
2438 tg
->idletime_threshold
= td
->dft_idletime_threshold
;
2439 tg
->idletime_threshold_conf
= td
->dft_idletime_threshold
;
2444 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2445 ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
)
2449 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->td
->throtl_slice
));
2452 ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
2453 const char *page
, size_t count
)
2460 if (kstrtoul(page
, 10, &v
))
2462 t
= msecs_to_jiffies(v
);
2463 if (t
== 0 || t
> MAX_THROTL_SLICE
)
2465 q
->td
->throtl_slice
= t
;
2470 static int __init
throtl_init(void)
2472 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
2473 if (!kthrotld_workqueue
)
2474 panic("Failed to create kthrotld\n");
2476 return blkcg_policy_register(&blkcg_policy_throtl
);
2479 module_init(throtl_init
);