2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-cgroup.h>
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over a slice and after that slice is renewed */
22 #define DFL_THROTL_SLICE_HD (HZ / 10)
23 #define DFL_THROTL_SLICE_SSD (HZ / 50)
24 #define MAX_THROTL_SLICE (HZ)
25 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
26 #define MIN_THROTL_BPS (320 * 1024)
27 #define MIN_THROTL_IOPS (10)
28 #define DFL_LATENCY_TARGET (-1L)
29 #define DFL_IDLE_THRESHOLD (0)
31 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
33 static struct blkcg_policy blkcg_policy_throtl
;
35 /* A workqueue to queue throttle related work */
36 static struct workqueue_struct
*kthrotld_workqueue
;
39 * To implement hierarchical throttling, throtl_grps form a tree and bios
40 * are dispatched upwards level by level until they reach the top and get
41 * issued. When dispatching bios from the children and local group at each
42 * level, if the bios are dispatched into a single bio_list, there's a risk
43 * of a local or child group which can queue many bios at once filling up
44 * the list starving others.
46 * To avoid such starvation, dispatched bios are queued separately
47 * according to where they came from. When they are again dispatched to
48 * the parent, they're popped in round-robin order so that no single source
49 * hogs the dispatch window.
51 * throtl_qnode is used to keep the queued bios separated by their sources.
52 * Bios are queued to throtl_qnode which in turn is queued to
53 * throtl_service_queue and then dispatched in round-robin order.
55 * It's also used to track the reference counts on blkg's. A qnode always
56 * belongs to a throtl_grp and gets queued on itself or the parent, so
57 * incrementing the reference of the associated throtl_grp when a qnode is
58 * queued and decrementing when dequeued is enough to keep the whole blkg
59 * tree pinned while bios are in flight.
62 struct list_head node
; /* service_queue->queued[] */
63 struct bio_list bios
; /* queued bios */
64 struct throtl_grp
*tg
; /* tg this qnode belongs to */
67 struct throtl_service_queue
{
68 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
71 * Bios queued directly to this service_queue or dispatched from
72 * children throtl_grp's.
74 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
75 unsigned int nr_queued
[2]; /* number of queued bios */
78 * RB tree of active children throtl_grp's, which are sorted by
81 struct rb_root pending_tree
; /* RB tree of active tgs */
82 struct rb_node
*first_pending
; /* first node in the tree */
83 unsigned int nr_pending
; /* # queued in the tree */
84 unsigned long first_pending_disptime
; /* disptime of the first tg */
85 struct timer_list pending_timer
; /* fires on first_pending_disptime */
89 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
90 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
93 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
102 /* must be the first member */
103 struct blkg_policy_data pd
;
105 /* active throtl group service_queue member */
106 struct rb_node rb_node
;
108 /* throtl_data this group belongs to */
109 struct throtl_data
*td
;
111 /* this group's service queue */
112 struct throtl_service_queue service_queue
;
115 * qnode_on_self is used when bios are directly queued to this
116 * throtl_grp so that local bios compete fairly with bios
117 * dispatched from children. qnode_on_parent is used when bios are
118 * dispatched from this throtl_grp into its parent and will compete
119 * with the sibling qnode_on_parents and the parent's
122 struct throtl_qnode qnode_on_self
[2];
123 struct throtl_qnode qnode_on_parent
[2];
126 * Dispatch time in jiffies. This is the estimated time when group
127 * will unthrottle and is ready to dispatch more bio. It is used as
128 * key to sort active groups in service tree.
130 unsigned long disptime
;
134 /* are there any throtl rules between this group and td? */
137 /* internally used bytes per second rate limits */
138 uint64_t bps
[2][LIMIT_CNT
];
139 /* user configured bps limits */
140 uint64_t bps_conf
[2][LIMIT_CNT
];
142 /* internally used IOPS limits */
143 unsigned int iops
[2][LIMIT_CNT
];
144 /* user configured IOPS limits */
145 unsigned int iops_conf
[2][LIMIT_CNT
];
147 /* Number of bytes disptached in current slice */
148 uint64_t bytes_disp
[2];
149 /* Number of bio's dispatched in current slice */
150 unsigned int io_disp
[2];
152 unsigned long last_low_overflow_time
[2];
154 uint64_t last_bytes_disp
[2];
155 unsigned int last_io_disp
[2];
157 unsigned long last_check_time
;
159 unsigned long latency_target
; /* us */
160 unsigned long latency_target_conf
; /* us */
161 /* When did we start a new slice */
162 unsigned long slice_start
[2];
163 unsigned long slice_end
[2];
165 unsigned long last_finish_time
; /* ns / 1024 */
166 unsigned long checked_last_finish_time
; /* ns / 1024 */
167 unsigned long avg_idletime
; /* ns / 1024 */
168 unsigned long idletime_threshold
; /* us */
169 unsigned long idletime_threshold_conf
; /* us */
171 unsigned int bio_cnt
; /* total bios */
172 unsigned int bad_bio_cnt
; /* bios exceeding latency threshold */
173 unsigned long bio_cnt_reset_time
;
176 /* We measure latency for request size from <= 4k to >= 1M */
177 #define LATENCY_BUCKET_SIZE 9
179 struct latency_bucket
{
180 unsigned long total_latency
; /* ns / 1024 */
184 struct avg_latency_bucket
{
185 unsigned long latency
; /* ns / 1024 */
191 /* service tree for active throtl groups */
192 struct throtl_service_queue service_queue
;
194 struct request_queue
*queue
;
196 /* Total Number of queued bios on READ and WRITE lists */
197 unsigned int nr_queued
[2];
199 unsigned int throtl_slice
;
201 /* Work for dispatching throttled bios */
202 struct work_struct dispatch_work
;
203 unsigned int limit_index
;
204 bool limit_valid
[LIMIT_CNT
];
206 unsigned long low_upgrade_time
;
207 unsigned long low_downgrade_time
;
211 struct latency_bucket tmp_buckets
[LATENCY_BUCKET_SIZE
];
212 struct avg_latency_bucket avg_buckets
[LATENCY_BUCKET_SIZE
];
213 struct latency_bucket __percpu
*latency_buckets
;
214 unsigned long last_calculate_time
;
216 bool track_bio_latency
;
219 static void throtl_pending_timer_fn(unsigned long arg
);
221 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
223 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
226 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
228 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
231 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
233 return pd_to_blkg(&tg
->pd
);
237 * sq_to_tg - return the throl_grp the specified service queue belongs to
238 * @sq: the throtl_service_queue of interest
240 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
241 * embedded in throtl_data, %NULL is returned.
243 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
245 if (sq
&& sq
->parent_sq
)
246 return container_of(sq
, struct throtl_grp
, service_queue
);
252 * sq_to_td - return throtl_data the specified service queue belongs to
253 * @sq: the throtl_service_queue of interest
255 * A service_queue can be embedded in either a throtl_grp or throtl_data.
256 * Determine the associated throtl_data accordingly and return it.
258 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
260 struct throtl_grp
*tg
= sq_to_tg(sq
);
265 return container_of(sq
, struct throtl_data
, service_queue
);
269 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
270 * make the IO dispatch more smooth.
271 * Scale up: linearly scale up according to lapsed time since upgrade. For
272 * every throtl_slice, the limit scales up 1/2 .low limit till the
273 * limit hits .max limit
274 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
276 static uint64_t throtl_adjusted_limit(uint64_t low
, struct throtl_data
*td
)
278 /* arbitrary value to avoid too big scale */
279 if (td
->scale
< 4096 && time_after_eq(jiffies
,
280 td
->low_upgrade_time
+ td
->scale
* td
->throtl_slice
))
281 td
->scale
= (jiffies
- td
->low_upgrade_time
) / td
->throtl_slice
;
283 return low
+ (low
>> 1) * td
->scale
;
286 static uint64_t tg_bps_limit(struct throtl_grp
*tg
, int rw
)
288 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
289 struct throtl_data
*td
;
292 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
296 ret
= tg
->bps
[rw
][td
->limit_index
];
297 if (ret
== 0 && td
->limit_index
== LIMIT_LOW
) {
298 /* intermediate node or iops isn't 0 */
299 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
300 tg
->iops
[rw
][td
->limit_index
])
303 return MIN_THROTL_BPS
;
306 if (td
->limit_index
== LIMIT_MAX
&& tg
->bps
[rw
][LIMIT_LOW
] &&
307 tg
->bps
[rw
][LIMIT_LOW
] != tg
->bps
[rw
][LIMIT_MAX
]) {
310 adjusted
= throtl_adjusted_limit(tg
->bps
[rw
][LIMIT_LOW
], td
);
311 ret
= min(tg
->bps
[rw
][LIMIT_MAX
], adjusted
);
316 static unsigned int tg_iops_limit(struct throtl_grp
*tg
, int rw
)
318 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
319 struct throtl_data
*td
;
322 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && !blkg
->parent
)
326 ret
= tg
->iops
[rw
][td
->limit_index
];
327 if (ret
== 0 && tg
->td
->limit_index
== LIMIT_LOW
) {
328 /* intermediate node or bps isn't 0 */
329 if (!list_empty(&blkg
->blkcg
->css
.children
) ||
330 tg
->bps
[rw
][td
->limit_index
])
333 return MIN_THROTL_IOPS
;
336 if (td
->limit_index
== LIMIT_MAX
&& tg
->iops
[rw
][LIMIT_LOW
] &&
337 tg
->iops
[rw
][LIMIT_LOW
] != tg
->iops
[rw
][LIMIT_MAX
]) {
340 adjusted
= throtl_adjusted_limit(tg
->iops
[rw
][LIMIT_LOW
], td
);
341 if (adjusted
> UINT_MAX
)
343 ret
= min_t(unsigned int, tg
->iops
[rw
][LIMIT_MAX
], adjusted
);
348 #define request_bucket_index(sectors) \
349 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
352 * throtl_log - log debug message via blktrace
353 * @sq: the service_queue being reported
354 * @fmt: printf format string
357 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
358 * throtl_grp; otherwise, just "throtl".
360 #define throtl_log(sq, fmt, args...) do { \
361 struct throtl_grp *__tg = sq_to_tg((sq)); \
362 struct throtl_data *__td = sq_to_td((sq)); \
365 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
370 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
371 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
373 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
377 static void throtl_qnode_init(struct throtl_qnode
*qn
, struct throtl_grp
*tg
)
379 INIT_LIST_HEAD(&qn
->node
);
380 bio_list_init(&qn
->bios
);
385 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
386 * @bio: bio being added
387 * @qn: qnode to add bio to
388 * @queued: the service_queue->queued[] list @qn belongs to
390 * Add @bio to @qn and put @qn on @queued if it's not already on.
391 * @qn->tg's reference count is bumped when @qn is activated. See the
392 * comment on top of throtl_qnode definition for details.
394 static void throtl_qnode_add_bio(struct bio
*bio
, struct throtl_qnode
*qn
,
395 struct list_head
*queued
)
397 bio_list_add(&qn
->bios
, bio
);
398 if (list_empty(&qn
->node
)) {
399 list_add_tail(&qn
->node
, queued
);
400 blkg_get(tg_to_blkg(qn
->tg
));
405 * throtl_peek_queued - peek the first bio on a qnode list
406 * @queued: the qnode list to peek
408 static struct bio
*throtl_peek_queued(struct list_head
*queued
)
410 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
413 if (list_empty(queued
))
416 bio
= bio_list_peek(&qn
->bios
);
422 * throtl_pop_queued - pop the first bio form a qnode list
423 * @queued: the qnode list to pop a bio from
424 * @tg_to_put: optional out argument for throtl_grp to put
426 * Pop the first bio from the qnode list @queued. After popping, the first
427 * qnode is removed from @queued if empty or moved to the end of @queued so
428 * that the popping order is round-robin.
430 * When the first qnode is removed, its associated throtl_grp should be put
431 * too. If @tg_to_put is NULL, this function automatically puts it;
432 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
433 * responsible for putting it.
435 static struct bio
*throtl_pop_queued(struct list_head
*queued
,
436 struct throtl_grp
**tg_to_put
)
438 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
441 if (list_empty(queued
))
444 bio
= bio_list_pop(&qn
->bios
);
447 if (bio_list_empty(&qn
->bios
)) {
448 list_del_init(&qn
->node
);
452 blkg_put(tg_to_blkg(qn
->tg
));
454 list_move_tail(&qn
->node
, queued
);
460 /* init a service_queue, assumes the caller zeroed it */
461 static void throtl_service_queue_init(struct throtl_service_queue
*sq
)
463 INIT_LIST_HEAD(&sq
->queued
[0]);
464 INIT_LIST_HEAD(&sq
->queued
[1]);
465 sq
->pending_tree
= RB_ROOT
;
466 setup_timer(&sq
->pending_timer
, throtl_pending_timer_fn
,
470 static struct blkg_policy_data
*throtl_pd_alloc(gfp_t gfp
, int node
)
472 struct throtl_grp
*tg
;
475 tg
= kzalloc_node(sizeof(*tg
), gfp
, node
);
479 throtl_service_queue_init(&tg
->service_queue
);
481 for (rw
= READ
; rw
<= WRITE
; rw
++) {
482 throtl_qnode_init(&tg
->qnode_on_self
[rw
], tg
);
483 throtl_qnode_init(&tg
->qnode_on_parent
[rw
], tg
);
486 RB_CLEAR_NODE(&tg
->rb_node
);
487 tg
->bps
[READ
][LIMIT_MAX
] = U64_MAX
;
488 tg
->bps
[WRITE
][LIMIT_MAX
] = U64_MAX
;
489 tg
->iops
[READ
][LIMIT_MAX
] = UINT_MAX
;
490 tg
->iops
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
491 tg
->bps_conf
[READ
][LIMIT_MAX
] = U64_MAX
;
492 tg
->bps_conf
[WRITE
][LIMIT_MAX
] = U64_MAX
;
493 tg
->iops_conf
[READ
][LIMIT_MAX
] = UINT_MAX
;
494 tg
->iops_conf
[WRITE
][LIMIT_MAX
] = UINT_MAX
;
495 /* LIMIT_LOW will have default value 0 */
497 tg
->latency_target
= DFL_LATENCY_TARGET
;
498 tg
->latency_target_conf
= DFL_LATENCY_TARGET
;
499 tg
->idletime_threshold
= DFL_IDLE_THRESHOLD
;
500 tg
->idletime_threshold_conf
= DFL_IDLE_THRESHOLD
;
505 static void throtl_pd_init(struct blkg_policy_data
*pd
)
507 struct throtl_grp
*tg
= pd_to_tg(pd
);
508 struct blkcg_gq
*blkg
= tg_to_blkg(tg
);
509 struct throtl_data
*td
= blkg
->q
->td
;
510 struct throtl_service_queue
*sq
= &tg
->service_queue
;
513 * If on the default hierarchy, we switch to properly hierarchical
514 * behavior where limits on a given throtl_grp are applied to the
515 * whole subtree rather than just the group itself. e.g. If 16M
516 * read_bps limit is set on the root group, the whole system can't
517 * exceed 16M for the device.
519 * If not on the default hierarchy, the broken flat hierarchy
520 * behavior is retained where all throtl_grps are treated as if
521 * they're all separate root groups right below throtl_data.
522 * Limits of a group don't interact with limits of other groups
523 * regardless of the position of the group in the hierarchy.
525 sq
->parent_sq
= &td
->service_queue
;
526 if (cgroup_subsys_on_dfl(io_cgrp_subsys
) && blkg
->parent
)
527 sq
->parent_sq
= &blkg_to_tg(blkg
->parent
)->service_queue
;
532 * Set has_rules[] if @tg or any of its parents have limits configured.
533 * This doesn't require walking up to the top of the hierarchy as the
534 * parent's has_rules[] is guaranteed to be correct.
536 static void tg_update_has_rules(struct throtl_grp
*tg
)
538 struct throtl_grp
*parent_tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
539 struct throtl_data
*td
= tg
->td
;
542 for (rw
= READ
; rw
<= WRITE
; rw
++)
543 tg
->has_rules
[rw
] = (parent_tg
&& parent_tg
->has_rules
[rw
]) ||
544 (td
->limit_valid
[td
->limit_index
] &&
545 (tg_bps_limit(tg
, rw
) != U64_MAX
||
546 tg_iops_limit(tg
, rw
) != UINT_MAX
));
549 static void throtl_pd_online(struct blkg_policy_data
*pd
)
551 struct throtl_grp
*tg
= pd_to_tg(pd
);
553 * We don't want new groups to escape the limits of its ancestors.
554 * Update has_rules[] after a new group is brought online.
556 tg_update_has_rules(tg
);
559 static void blk_throtl_update_limit_valid(struct throtl_data
*td
)
561 struct cgroup_subsys_state
*pos_css
;
562 struct blkcg_gq
*blkg
;
563 bool low_valid
= false;
566 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
567 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
569 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->bps
[WRITE
][LIMIT_LOW
] ||
570 tg
->iops
[READ
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
575 td
->limit_valid
[LIMIT_LOW
] = low_valid
;
578 static void throtl_upgrade_state(struct throtl_data
*td
);
579 static void throtl_pd_offline(struct blkg_policy_data
*pd
)
581 struct throtl_grp
*tg
= pd_to_tg(pd
);
583 tg
->bps
[READ
][LIMIT_LOW
] = 0;
584 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
585 tg
->iops
[READ
][LIMIT_LOW
] = 0;
586 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
588 blk_throtl_update_limit_valid(tg
->td
);
590 if (!tg
->td
->limit_valid
[tg
->td
->limit_index
])
591 throtl_upgrade_state(tg
->td
);
594 static void throtl_pd_free(struct blkg_policy_data
*pd
)
596 struct throtl_grp
*tg
= pd_to_tg(pd
);
598 del_timer_sync(&tg
->service_queue
.pending_timer
);
602 static struct throtl_grp
*
603 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
605 /* Service tree is empty */
606 if (!parent_sq
->nr_pending
)
609 if (!parent_sq
->first_pending
)
610 parent_sq
->first_pending
= rb_first(&parent_sq
->pending_tree
);
612 if (parent_sq
->first_pending
)
613 return rb_entry_tg(parent_sq
->first_pending
);
618 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
624 static void throtl_rb_erase(struct rb_node
*n
,
625 struct throtl_service_queue
*parent_sq
)
627 if (parent_sq
->first_pending
== n
)
628 parent_sq
->first_pending
= NULL
;
629 rb_erase_init(n
, &parent_sq
->pending_tree
);
630 --parent_sq
->nr_pending
;
633 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
635 struct throtl_grp
*tg
;
637 tg
= throtl_rb_first(parent_sq
);
641 parent_sq
->first_pending_disptime
= tg
->disptime
;
644 static void tg_service_queue_add(struct throtl_grp
*tg
)
646 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
647 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_node
;
648 struct rb_node
*parent
= NULL
;
649 struct throtl_grp
*__tg
;
650 unsigned long key
= tg
->disptime
;
653 while (*node
!= NULL
) {
655 __tg
= rb_entry_tg(parent
);
657 if (time_before(key
, __tg
->disptime
))
658 node
= &parent
->rb_left
;
660 node
= &parent
->rb_right
;
666 parent_sq
->first_pending
= &tg
->rb_node
;
668 rb_link_node(&tg
->rb_node
, parent
, node
);
669 rb_insert_color(&tg
->rb_node
, &parent_sq
->pending_tree
);
672 static void __throtl_enqueue_tg(struct throtl_grp
*tg
)
674 tg_service_queue_add(tg
);
675 tg
->flags
|= THROTL_TG_PENDING
;
676 tg
->service_queue
.parent_sq
->nr_pending
++;
679 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
681 if (!(tg
->flags
& THROTL_TG_PENDING
))
682 __throtl_enqueue_tg(tg
);
685 static void __throtl_dequeue_tg(struct throtl_grp
*tg
)
687 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
688 tg
->flags
&= ~THROTL_TG_PENDING
;
691 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
693 if (tg
->flags
& THROTL_TG_PENDING
)
694 __throtl_dequeue_tg(tg
);
697 /* Call with queue lock held */
698 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
699 unsigned long expires
)
701 unsigned long max_expire
= jiffies
+ 8 * sq_to_td(sq
)->throtl_slice
;
704 * Since we are adjusting the throttle limit dynamically, the sleep
705 * time calculated according to previous limit might be invalid. It's
706 * possible the cgroup sleep time is very long and no other cgroups
707 * have IO running so notify the limit changes. Make sure the cgroup
708 * doesn't sleep too long to avoid the missed notification.
710 if (time_after(expires
, max_expire
))
711 expires
= max_expire
;
712 mod_timer(&sq
->pending_timer
, expires
);
713 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
714 expires
- jiffies
, jiffies
);
718 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
719 * @sq: the service_queue to schedule dispatch for
720 * @force: force scheduling
722 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
723 * dispatch time of the first pending child. Returns %true if either timer
724 * is armed or there's no pending child left. %false if the current
725 * dispatch window is still open and the caller should continue
728 * If @force is %true, the dispatch timer is always scheduled and this
729 * function is guaranteed to return %true. This is to be used when the
730 * caller can't dispatch itself and needs to invoke pending_timer
731 * unconditionally. Note that forced scheduling is likely to induce short
732 * delay before dispatch starts even if @sq->first_pending_disptime is not
733 * in the future and thus shouldn't be used in hot paths.
735 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
738 /* any pending children left? */
742 update_min_dispatch_time(sq
);
744 /* is the next dispatch time in the future? */
745 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
746 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
750 /* tell the caller to continue dispatching */
754 static inline void throtl_start_new_slice_with_credit(struct throtl_grp
*tg
,
755 bool rw
, unsigned long start
)
757 tg
->bytes_disp
[rw
] = 0;
761 * Previous slice has expired. We must have trimmed it after last
762 * bio dispatch. That means since start of last slice, we never used
763 * that bandwidth. Do try to make use of that bandwidth while giving
766 if (time_after_eq(start
, tg
->slice_start
[rw
]))
767 tg
->slice_start
[rw
] = start
;
769 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
770 throtl_log(&tg
->service_queue
,
771 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
772 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
773 tg
->slice_end
[rw
], jiffies
);
776 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
778 tg
->bytes_disp
[rw
] = 0;
780 tg
->slice_start
[rw
] = jiffies
;
781 tg
->slice_end
[rw
] = jiffies
+ tg
->td
->throtl_slice
;
782 throtl_log(&tg
->service_queue
,
783 "[%c] new slice start=%lu end=%lu jiffies=%lu",
784 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
785 tg
->slice_end
[rw
], jiffies
);
788 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
789 unsigned long jiffy_end
)
791 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
794 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
795 unsigned long jiffy_end
)
797 tg
->slice_end
[rw
] = roundup(jiffy_end
, tg
->td
->throtl_slice
);
798 throtl_log(&tg
->service_queue
,
799 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
800 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
801 tg
->slice_end
[rw
], jiffies
);
804 /* Determine if previously allocated or extended slice is complete or not */
805 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
807 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
813 /* Trim the used slices and adjust slice start accordingly */
814 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
816 unsigned long nr_slices
, time_elapsed
, io_trim
;
819 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
822 * If bps are unlimited (-1), then time slice don't get
823 * renewed. Don't try to trim the slice if slice is used. A new
824 * slice will start when appropriate.
826 if (throtl_slice_used(tg
, rw
))
830 * A bio has been dispatched. Also adjust slice_end. It might happen
831 * that initially cgroup limit was very low resulting in high
832 * slice_end, but later limit was bumped up and bio was dispached
833 * sooner, then we need to reduce slice_end. A high bogus slice_end
834 * is bad because it does not allow new slice to start.
837 throtl_set_slice_end(tg
, rw
, jiffies
+ tg
->td
->throtl_slice
);
839 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
841 nr_slices
= time_elapsed
/ tg
->td
->throtl_slice
;
845 tmp
= tg_bps_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
;
849 io_trim
= (tg_iops_limit(tg
, rw
) * tg
->td
->throtl_slice
* nr_slices
) /
852 if (!bytes_trim
&& !io_trim
)
855 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
856 tg
->bytes_disp
[rw
] -= bytes_trim
;
858 tg
->bytes_disp
[rw
] = 0;
860 if (tg
->io_disp
[rw
] >= io_trim
)
861 tg
->io_disp
[rw
] -= io_trim
;
865 tg
->slice_start
[rw
] += nr_slices
* tg
->td
->throtl_slice
;
867 throtl_log(&tg
->service_queue
,
868 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
869 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
870 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
873 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
876 bool rw
= bio_data_dir(bio
);
877 unsigned int io_allowed
;
878 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
881 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
883 /* Slice has just started. Consider one slice interval */
885 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
887 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
890 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
891 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
892 * will allow dispatch after 1 second and after that slice should
896 tmp
= (u64
)tg_iops_limit(tg
, rw
) * jiffy_elapsed_rnd
;
900 io_allowed
= UINT_MAX
;
904 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
910 /* Calc approx time to dispatch */
911 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
) / tg_iops_limit(tg
, rw
) + 1;
913 if (jiffy_wait
> jiffy_elapsed
)
914 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
923 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
926 bool rw
= bio_data_dir(bio
);
927 u64 bytes_allowed
, extra_bytes
, tmp
;
928 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
930 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
932 /* Slice has just started. Consider one slice interval */
934 jiffy_elapsed_rnd
= tg
->td
->throtl_slice
;
936 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, tg
->td
->throtl_slice
);
938 tmp
= tg_bps_limit(tg
, rw
) * jiffy_elapsed_rnd
;
942 if (tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
<= bytes_allowed
) {
948 /* Calc approx time to dispatch */
949 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_iter
.bi_size
- bytes_allowed
;
950 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg_bps_limit(tg
, rw
));
956 * This wait time is without taking into consideration the rounding
957 * up we did. Add that time also.
959 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
966 * Returns whether one can dispatch a bio or not. Also returns approx number
967 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
969 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
972 bool rw
= bio_data_dir(bio
);
973 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
976 * Currently whole state machine of group depends on first bio
977 * queued in the group bio list. So one should not be calling
978 * this function with a different bio if there are other bios
981 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
982 bio
!= throtl_peek_queued(&tg
->service_queue
.queued
[rw
]));
984 /* If tg->bps = -1, then BW is unlimited */
985 if (tg_bps_limit(tg
, rw
) == U64_MAX
&&
986 tg_iops_limit(tg
, rw
) == UINT_MAX
) {
993 * If previous slice expired, start a new one otherwise renew/extend
994 * existing slice to make sure it is at least throtl_slice interval
995 * long since now. New slice is started only for empty throttle group.
996 * If there is queued bio, that means there should be an active
997 * slice and it should be extended instead.
999 if (throtl_slice_used(tg
, rw
) && !(tg
->service_queue
.nr_queued
[rw
]))
1000 throtl_start_new_slice(tg
, rw
);
1002 if (time_before(tg
->slice_end
[rw
],
1003 jiffies
+ tg
->td
->throtl_slice
))
1004 throtl_extend_slice(tg
, rw
,
1005 jiffies
+ tg
->td
->throtl_slice
);
1008 if (tg_with_in_bps_limit(tg
, bio
, &bps_wait
) &&
1009 tg_with_in_iops_limit(tg
, bio
, &iops_wait
)) {
1015 max_wait
= max(bps_wait
, iops_wait
);
1020 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
1021 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
1026 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
1028 bool rw
= bio_data_dir(bio
);
1030 /* Charge the bio to the group */
1031 tg
->bytes_disp
[rw
] += bio
->bi_iter
.bi_size
;
1033 tg
->last_bytes_disp
[rw
] += bio
->bi_iter
.bi_size
;
1034 tg
->last_io_disp
[rw
]++;
1037 * BIO_THROTTLED is used to prevent the same bio to be throttled
1038 * more than once as a throttled bio will go through blk-throtl the
1039 * second time when it eventually gets issued. Set it when a bio
1040 * is being charged to a tg.
1042 if (!bio_flagged(bio
, BIO_THROTTLED
))
1043 bio_set_flag(bio
, BIO_THROTTLED
);
1047 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1050 * @tg: the target throtl_grp
1052 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1053 * tg->qnode_on_self[] is used.
1055 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_qnode
*qn
,
1056 struct throtl_grp
*tg
)
1058 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1059 bool rw
= bio_data_dir(bio
);
1062 qn
= &tg
->qnode_on_self
[rw
];
1065 * If @tg doesn't currently have any bios queued in the same
1066 * direction, queueing @bio can change when @tg should be
1067 * dispatched. Mark that @tg was empty. This is automatically
1068 * cleaered on the next tg_update_disptime().
1070 if (!sq
->nr_queued
[rw
])
1071 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
1073 throtl_qnode_add_bio(bio
, qn
, &sq
->queued
[rw
]);
1075 sq
->nr_queued
[rw
]++;
1076 throtl_enqueue_tg(tg
);
1079 static void tg_update_disptime(struct throtl_grp
*tg
)
1081 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1082 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
1085 bio
= throtl_peek_queued(&sq
->queued
[READ
]);
1087 tg_may_dispatch(tg
, bio
, &read_wait
);
1089 bio
= throtl_peek_queued(&sq
->queued
[WRITE
]);
1091 tg_may_dispatch(tg
, bio
, &write_wait
);
1093 min_wait
= min(read_wait
, write_wait
);
1094 disptime
= jiffies
+ min_wait
;
1096 /* Update dispatch time */
1097 throtl_dequeue_tg(tg
);
1098 tg
->disptime
= disptime
;
1099 throtl_enqueue_tg(tg
);
1101 /* see throtl_add_bio_tg() */
1102 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
1105 static void start_parent_slice_with_credit(struct throtl_grp
*child_tg
,
1106 struct throtl_grp
*parent_tg
, bool rw
)
1108 if (throtl_slice_used(parent_tg
, rw
)) {
1109 throtl_start_new_slice_with_credit(parent_tg
, rw
,
1110 child_tg
->slice_start
[rw
]);
1115 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
1117 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1118 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
1119 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
1120 struct throtl_grp
*tg_to_put
= NULL
;
1124 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1125 * from @tg may put its reference and @parent_sq might end up
1126 * getting released prematurely. Remember the tg to put and put it
1127 * after @bio is transferred to @parent_sq.
1129 bio
= throtl_pop_queued(&sq
->queued
[rw
], &tg_to_put
);
1130 sq
->nr_queued
[rw
]--;
1132 throtl_charge_bio(tg
, bio
);
1135 * If our parent is another tg, we just need to transfer @bio to
1136 * the parent using throtl_add_bio_tg(). If our parent is
1137 * @td->service_queue, @bio is ready to be issued. Put it on its
1138 * bio_lists[] and decrease total number queued. The caller is
1139 * responsible for issuing these bios.
1142 throtl_add_bio_tg(bio
, &tg
->qnode_on_parent
[rw
], parent_tg
);
1143 start_parent_slice_with_credit(tg
, parent_tg
, rw
);
1145 throtl_qnode_add_bio(bio
, &tg
->qnode_on_parent
[rw
],
1146 &parent_sq
->queued
[rw
]);
1147 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
1148 tg
->td
->nr_queued
[rw
]--;
1151 throtl_trim_slice(tg
, rw
);
1154 blkg_put(tg_to_blkg(tg_to_put
));
1157 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
1159 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1160 unsigned int nr_reads
= 0, nr_writes
= 0;
1161 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
1162 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
1165 /* Try to dispatch 75% READS and 25% WRITES */
1167 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])) &&
1168 tg_may_dispatch(tg
, bio
, NULL
)) {
1170 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1173 if (nr_reads
>= max_nr_reads
)
1177 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])) &&
1178 tg_may_dispatch(tg
, bio
, NULL
)) {
1180 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1183 if (nr_writes
>= max_nr_writes
)
1187 return nr_reads
+ nr_writes
;
1190 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
1192 unsigned int nr_disp
= 0;
1195 struct throtl_grp
*tg
= throtl_rb_first(parent_sq
);
1196 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1201 if (time_before(jiffies
, tg
->disptime
))
1204 throtl_dequeue_tg(tg
);
1206 nr_disp
+= throtl_dispatch_tg(tg
);
1208 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
1209 tg_update_disptime(tg
);
1211 if (nr_disp
>= throtl_quantum
)
1218 static bool throtl_can_upgrade(struct throtl_data
*td
,
1219 struct throtl_grp
*this_tg
);
1221 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1222 * @arg: the throtl_service_queue being serviced
1224 * This timer is armed when a child throtl_grp with active bio's become
1225 * pending and queued on the service_queue's pending_tree and expires when
1226 * the first child throtl_grp should be dispatched. This function
1227 * dispatches bio's from the children throtl_grps to the parent
1230 * If the parent's parent is another throtl_grp, dispatching is propagated
1231 * by either arming its pending_timer or repeating dispatch directly. If
1232 * the top-level service_tree is reached, throtl_data->dispatch_work is
1233 * kicked so that the ready bio's are issued.
1235 static void throtl_pending_timer_fn(unsigned long arg
)
1237 struct throtl_service_queue
*sq
= (void *)arg
;
1238 struct throtl_grp
*tg
= sq_to_tg(sq
);
1239 struct throtl_data
*td
= sq_to_td(sq
);
1240 struct request_queue
*q
= td
->queue
;
1241 struct throtl_service_queue
*parent_sq
;
1245 spin_lock_irq(q
->queue_lock
);
1246 if (throtl_can_upgrade(td
, NULL
))
1247 throtl_upgrade_state(td
);
1250 parent_sq
= sq
->parent_sq
;
1254 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
1255 sq
->nr_queued
[READ
] + sq
->nr_queued
[WRITE
],
1256 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1258 ret
= throtl_select_dispatch(sq
);
1260 throtl_log(sq
, "bios disp=%u", ret
);
1264 if (throtl_schedule_next_dispatch(sq
, false))
1267 /* this dispatch windows is still open, relax and repeat */
1268 spin_unlock_irq(q
->queue_lock
);
1270 spin_lock_irq(q
->queue_lock
);
1277 /* @parent_sq is another throl_grp, propagate dispatch */
1278 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1279 tg_update_disptime(tg
);
1280 if (!throtl_schedule_next_dispatch(parent_sq
, false)) {
1281 /* window is already open, repeat dispatching */
1288 /* reached the top-level, queue issueing */
1289 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1292 spin_unlock_irq(q
->queue_lock
);
1296 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1297 * @work: work item being executed
1299 * This function is queued for execution when bio's reach the bio_lists[]
1300 * of throtl_data->service_queue. Those bio's are ready and issued by this
1303 static void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1305 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1307 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1308 struct request_queue
*q
= td
->queue
;
1309 struct bio_list bio_list_on_stack
;
1311 struct blk_plug plug
;
1314 bio_list_init(&bio_list_on_stack
);
1316 spin_lock_irq(q
->queue_lock
);
1317 for (rw
= READ
; rw
<= WRITE
; rw
++)
1318 while ((bio
= throtl_pop_queued(&td_sq
->queued
[rw
], NULL
)))
1319 bio_list_add(&bio_list_on_stack
, bio
);
1320 spin_unlock_irq(q
->queue_lock
);
1322 if (!bio_list_empty(&bio_list_on_stack
)) {
1323 blk_start_plug(&plug
);
1324 while((bio
= bio_list_pop(&bio_list_on_stack
)))
1325 generic_make_request(bio
);
1326 blk_finish_plug(&plug
);
1330 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1333 struct throtl_grp
*tg
= pd_to_tg(pd
);
1334 u64 v
= *(u64
*)((void *)tg
+ off
);
1338 return __blkg_prfill_u64(sf
, pd
, v
);
1341 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1344 struct throtl_grp
*tg
= pd_to_tg(pd
);
1345 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1349 return __blkg_prfill_u64(sf
, pd
, v
);
1352 static int tg_print_conf_u64(struct seq_file
*sf
, void *v
)
1354 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_u64
,
1355 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1359 static int tg_print_conf_uint(struct seq_file
*sf
, void *v
)
1361 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_conf_uint
,
1362 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1366 static void tg_conf_updated(struct throtl_grp
*tg
, bool global
)
1368 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1369 struct cgroup_subsys_state
*pos_css
;
1370 struct blkcg_gq
*blkg
;
1372 throtl_log(&tg
->service_queue
,
1373 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1374 tg_bps_limit(tg
, READ
), tg_bps_limit(tg
, WRITE
),
1375 tg_iops_limit(tg
, READ
), tg_iops_limit(tg
, WRITE
));
1378 * Update has_rules[] flags for the updated tg's subtree. A tg is
1379 * considered to have rules if either the tg itself or any of its
1380 * ancestors has rules. This identifies groups without any
1381 * restrictions in the whole hierarchy and allows them to bypass
1384 blkg_for_each_descendant_pre(blkg
, pos_css
,
1385 global
? tg
->td
->queue
->root_blkg
: tg_to_blkg(tg
)) {
1386 struct throtl_grp
*this_tg
= blkg_to_tg(blkg
);
1387 struct throtl_grp
*parent_tg
;
1389 tg_update_has_rules(this_tg
);
1390 /* ignore root/second level */
1391 if (!cgroup_subsys_on_dfl(io_cgrp_subsys
) || !blkg
->parent
||
1392 !blkg
->parent
->parent
)
1394 parent_tg
= blkg_to_tg(blkg
->parent
);
1396 * make sure all children has lower idle time threshold and
1397 * higher latency target
1399 this_tg
->idletime_threshold
= min(this_tg
->idletime_threshold
,
1400 parent_tg
->idletime_threshold
);
1401 this_tg
->latency_target
= max(this_tg
->latency_target
,
1402 parent_tg
->latency_target
);
1406 * We're already holding queue_lock and know @tg is valid. Let's
1407 * apply the new config directly.
1409 * Restart the slices for both READ and WRITES. It might happen
1410 * that a group's limit are dropped suddenly and we don't want to
1411 * account recently dispatched IO with new low rate.
1413 throtl_start_new_slice(tg
, 0);
1414 throtl_start_new_slice(tg
, 1);
1416 if (tg
->flags
& THROTL_TG_PENDING
) {
1417 tg_update_disptime(tg
);
1418 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1422 static ssize_t
tg_set_conf(struct kernfs_open_file
*of
,
1423 char *buf
, size_t nbytes
, loff_t off
, bool is_u64
)
1425 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1426 struct blkg_conf_ctx ctx
;
1427 struct throtl_grp
*tg
;
1431 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1436 if (sscanf(ctx
.body
, "%llu", &v
) != 1)
1441 tg
= blkg_to_tg(ctx
.blkg
);
1444 *(u64
*)((void *)tg
+ of_cft(of
)->private) = v
;
1446 *(unsigned int *)((void *)tg
+ of_cft(of
)->private) = v
;
1448 tg_conf_updated(tg
, false);
1451 blkg_conf_finish(&ctx
);
1452 return ret
?: nbytes
;
1455 static ssize_t
tg_set_conf_u64(struct kernfs_open_file
*of
,
1456 char *buf
, size_t nbytes
, loff_t off
)
1458 return tg_set_conf(of
, buf
, nbytes
, off
, true);
1461 static ssize_t
tg_set_conf_uint(struct kernfs_open_file
*of
,
1462 char *buf
, size_t nbytes
, loff_t off
)
1464 return tg_set_conf(of
, buf
, nbytes
, off
, false);
1467 static struct cftype throtl_legacy_files
[] = {
1469 .name
= "throttle.read_bps_device",
1470 .private = offsetof(struct throtl_grp
, bps
[READ
][LIMIT_MAX
]),
1471 .seq_show
= tg_print_conf_u64
,
1472 .write
= tg_set_conf_u64
,
1475 .name
= "throttle.write_bps_device",
1476 .private = offsetof(struct throtl_grp
, bps
[WRITE
][LIMIT_MAX
]),
1477 .seq_show
= tg_print_conf_u64
,
1478 .write
= tg_set_conf_u64
,
1481 .name
= "throttle.read_iops_device",
1482 .private = offsetof(struct throtl_grp
, iops
[READ
][LIMIT_MAX
]),
1483 .seq_show
= tg_print_conf_uint
,
1484 .write
= tg_set_conf_uint
,
1487 .name
= "throttle.write_iops_device",
1488 .private = offsetof(struct throtl_grp
, iops
[WRITE
][LIMIT_MAX
]),
1489 .seq_show
= tg_print_conf_uint
,
1490 .write
= tg_set_conf_uint
,
1493 .name
= "throttle.io_service_bytes",
1494 .private = (unsigned long)&blkcg_policy_throtl
,
1495 .seq_show
= blkg_print_stat_bytes
,
1498 .name
= "throttle.io_serviced",
1499 .private = (unsigned long)&blkcg_policy_throtl
,
1500 .seq_show
= blkg_print_stat_ios
,
1505 static u64
tg_prfill_limit(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1508 struct throtl_grp
*tg
= pd_to_tg(pd
);
1509 const char *dname
= blkg_dev_name(pd
->blkg
);
1510 char bufs
[4][21] = { "max", "max", "max", "max" };
1512 unsigned int iops_dft
;
1513 char idle_time
[26] = "";
1514 char latency_time
[26] = "";
1519 if (off
== LIMIT_LOW
) {
1524 iops_dft
= UINT_MAX
;
1527 if (tg
->bps_conf
[READ
][off
] == bps_dft
&&
1528 tg
->bps_conf
[WRITE
][off
] == bps_dft
&&
1529 tg
->iops_conf
[READ
][off
] == iops_dft
&&
1530 tg
->iops_conf
[WRITE
][off
] == iops_dft
&&
1531 (off
!= LIMIT_LOW
||
1532 (tg
->idletime_threshold_conf
== DFL_IDLE_THRESHOLD
&&
1533 tg
->latency_target_conf
== DFL_LATENCY_TARGET
)))
1536 if (tg
->bps_conf
[READ
][off
] != U64_MAX
)
1537 snprintf(bufs
[0], sizeof(bufs
[0]), "%llu",
1538 tg
->bps_conf
[READ
][off
]);
1539 if (tg
->bps_conf
[WRITE
][off
] != U64_MAX
)
1540 snprintf(bufs
[1], sizeof(bufs
[1]), "%llu",
1541 tg
->bps_conf
[WRITE
][off
]);
1542 if (tg
->iops_conf
[READ
][off
] != UINT_MAX
)
1543 snprintf(bufs
[2], sizeof(bufs
[2]), "%u",
1544 tg
->iops_conf
[READ
][off
]);
1545 if (tg
->iops_conf
[WRITE
][off
] != UINT_MAX
)
1546 snprintf(bufs
[3], sizeof(bufs
[3]), "%u",
1547 tg
->iops_conf
[WRITE
][off
]);
1548 if (off
== LIMIT_LOW
) {
1549 if (tg
->idletime_threshold_conf
== ULONG_MAX
)
1550 strcpy(idle_time
, " idle=max");
1552 snprintf(idle_time
, sizeof(idle_time
), " idle=%lu",
1553 tg
->idletime_threshold_conf
);
1555 if (tg
->latency_target_conf
== ULONG_MAX
)
1556 strcpy(latency_time
, " latency=max");
1558 snprintf(latency_time
, sizeof(latency_time
),
1559 " latency=%lu", tg
->latency_target_conf
);
1562 seq_printf(sf
, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1563 dname
, bufs
[0], bufs
[1], bufs
[2], bufs
[3], idle_time
,
1568 static int tg_print_limit(struct seq_file
*sf
, void *v
)
1570 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), tg_prfill_limit
,
1571 &blkcg_policy_throtl
, seq_cft(sf
)->private, false);
1575 static ssize_t
tg_set_limit(struct kernfs_open_file
*of
,
1576 char *buf
, size_t nbytes
, loff_t off
)
1578 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
1579 struct blkg_conf_ctx ctx
;
1580 struct throtl_grp
*tg
;
1582 unsigned long idle_time
;
1583 unsigned long latency_time
;
1585 int index
= of_cft(of
)->private;
1587 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1591 tg
= blkg_to_tg(ctx
.blkg
);
1593 v
[0] = tg
->bps_conf
[READ
][index
];
1594 v
[1] = tg
->bps_conf
[WRITE
][index
];
1595 v
[2] = tg
->iops_conf
[READ
][index
];
1596 v
[3] = tg
->iops_conf
[WRITE
][index
];
1598 idle_time
= tg
->idletime_threshold_conf
;
1599 latency_time
= tg
->latency_target_conf
;
1601 char tok
[27]; /* wiops=18446744073709551616 */
1606 if (sscanf(ctx
.body
, "%26s%n", tok
, &len
) != 1)
1615 if (!p
|| (sscanf(p
, "%llu", &val
) != 1 && strcmp(p
, "max")))
1623 if (!strcmp(tok
, "rbps"))
1625 else if (!strcmp(tok
, "wbps"))
1627 else if (!strcmp(tok
, "riops"))
1628 v
[2] = min_t(u64
, val
, UINT_MAX
);
1629 else if (!strcmp(tok
, "wiops"))
1630 v
[3] = min_t(u64
, val
, UINT_MAX
);
1631 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "idle"))
1633 else if (off
== LIMIT_LOW
&& !strcmp(tok
, "latency"))
1639 tg
->bps_conf
[READ
][index
] = v
[0];
1640 tg
->bps_conf
[WRITE
][index
] = v
[1];
1641 tg
->iops_conf
[READ
][index
] = v
[2];
1642 tg
->iops_conf
[WRITE
][index
] = v
[3];
1644 if (index
== LIMIT_MAX
) {
1645 tg
->bps
[READ
][index
] = v
[0];
1646 tg
->bps
[WRITE
][index
] = v
[1];
1647 tg
->iops
[READ
][index
] = v
[2];
1648 tg
->iops
[WRITE
][index
] = v
[3];
1650 tg
->bps
[READ
][LIMIT_LOW
] = min(tg
->bps_conf
[READ
][LIMIT_LOW
],
1651 tg
->bps_conf
[READ
][LIMIT_MAX
]);
1652 tg
->bps
[WRITE
][LIMIT_LOW
] = min(tg
->bps_conf
[WRITE
][LIMIT_LOW
],
1653 tg
->bps_conf
[WRITE
][LIMIT_MAX
]);
1654 tg
->iops
[READ
][LIMIT_LOW
] = min(tg
->iops_conf
[READ
][LIMIT_LOW
],
1655 tg
->iops_conf
[READ
][LIMIT_MAX
]);
1656 tg
->iops
[WRITE
][LIMIT_LOW
] = min(tg
->iops_conf
[WRITE
][LIMIT_LOW
],
1657 tg
->iops_conf
[WRITE
][LIMIT_MAX
]);
1658 tg
->idletime_threshold_conf
= idle_time
;
1659 tg
->latency_target_conf
= latency_time
;
1661 /* force user to configure all settings for low limit */
1662 if (!(tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
] ||
1663 tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
]) ||
1664 tg
->idletime_threshold_conf
== DFL_IDLE_THRESHOLD
||
1665 tg
->latency_target_conf
== DFL_LATENCY_TARGET
) {
1666 tg
->bps
[READ
][LIMIT_LOW
] = 0;
1667 tg
->bps
[WRITE
][LIMIT_LOW
] = 0;
1668 tg
->iops
[READ
][LIMIT_LOW
] = 0;
1669 tg
->iops
[WRITE
][LIMIT_LOW
] = 0;
1670 tg
->idletime_threshold
= DFL_IDLE_THRESHOLD
;
1671 tg
->latency_target
= DFL_LATENCY_TARGET
;
1672 } else if (index
== LIMIT_LOW
) {
1673 tg
->idletime_threshold
= tg
->idletime_threshold_conf
;
1674 tg
->latency_target
= tg
->latency_target_conf
;
1677 blk_throtl_update_limit_valid(tg
->td
);
1678 if (tg
->td
->limit_valid
[LIMIT_LOW
]) {
1679 if (index
== LIMIT_LOW
)
1680 tg
->td
->limit_index
= LIMIT_LOW
;
1682 tg
->td
->limit_index
= LIMIT_MAX
;
1683 tg_conf_updated(tg
, index
== LIMIT_LOW
&&
1684 tg
->td
->limit_valid
[LIMIT_LOW
]);
1687 blkg_conf_finish(&ctx
);
1688 return ret
?: nbytes
;
1691 static struct cftype throtl_files
[] = {
1692 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1695 .flags
= CFTYPE_NOT_ON_ROOT
,
1696 .seq_show
= tg_print_limit
,
1697 .write
= tg_set_limit
,
1698 .private = LIMIT_LOW
,
1703 .flags
= CFTYPE_NOT_ON_ROOT
,
1704 .seq_show
= tg_print_limit
,
1705 .write
= tg_set_limit
,
1706 .private = LIMIT_MAX
,
1711 static void throtl_shutdown_wq(struct request_queue
*q
)
1713 struct throtl_data
*td
= q
->td
;
1715 cancel_work_sync(&td
->dispatch_work
);
1718 static struct blkcg_policy blkcg_policy_throtl
= {
1719 .dfl_cftypes
= throtl_files
,
1720 .legacy_cftypes
= throtl_legacy_files
,
1722 .pd_alloc_fn
= throtl_pd_alloc
,
1723 .pd_init_fn
= throtl_pd_init
,
1724 .pd_online_fn
= throtl_pd_online
,
1725 .pd_offline_fn
= throtl_pd_offline
,
1726 .pd_free_fn
= throtl_pd_free
,
1729 static unsigned long __tg_last_low_overflow_time(struct throtl_grp
*tg
)
1731 unsigned long rtime
= jiffies
, wtime
= jiffies
;
1733 if (tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
])
1734 rtime
= tg
->last_low_overflow_time
[READ
];
1735 if (tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
])
1736 wtime
= tg
->last_low_overflow_time
[WRITE
];
1737 return min(rtime
, wtime
);
1740 /* tg should not be an intermediate node */
1741 static unsigned long tg_last_low_overflow_time(struct throtl_grp
*tg
)
1743 struct throtl_service_queue
*parent_sq
;
1744 struct throtl_grp
*parent
= tg
;
1745 unsigned long ret
= __tg_last_low_overflow_time(tg
);
1748 parent_sq
= parent
->service_queue
.parent_sq
;
1749 parent
= sq_to_tg(parent_sq
);
1754 * The parent doesn't have low limit, it always reaches low
1755 * limit. Its overflow time is useless for children
1757 if (!parent
->bps
[READ
][LIMIT_LOW
] &&
1758 !parent
->iops
[READ
][LIMIT_LOW
] &&
1759 !parent
->bps
[WRITE
][LIMIT_LOW
] &&
1760 !parent
->iops
[WRITE
][LIMIT_LOW
])
1762 if (time_after(__tg_last_low_overflow_time(parent
), ret
))
1763 ret
= __tg_last_low_overflow_time(parent
);
1768 static bool throtl_tg_is_idle(struct throtl_grp
*tg
)
1771 * cgroup is idle if:
1772 * - single idle is too long, longer than a fixed value (in case user
1773 * configure a too big threshold) or 4 times of idletime threshold
1774 * - average think time is more than threshold
1775 * - IO latency is largely below threshold
1780 time
= min_t(unsigned long, MAX_IDLE_TIME
, 4 * tg
->idletime_threshold
);
1781 ret
= tg
->latency_target
== DFL_LATENCY_TARGET
||
1782 tg
->idletime_threshold
== DFL_IDLE_THRESHOLD
||
1783 (ktime_get_ns() >> 10) - tg
->last_finish_time
> time
||
1784 tg
->avg_idletime
> tg
->idletime_threshold
||
1785 (tg
->latency_target
&& tg
->bio_cnt
&&
1786 tg
->bad_bio_cnt
* 5 < tg
->bio_cnt
);
1787 throtl_log(&tg
->service_queue
,
1788 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1789 tg
->avg_idletime
, tg
->idletime_threshold
, tg
->bad_bio_cnt
,
1790 tg
->bio_cnt
, ret
, tg
->td
->scale
);
1794 static bool throtl_tg_can_upgrade(struct throtl_grp
*tg
)
1796 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1797 bool read_limit
, write_limit
;
1800 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1801 * reaches), it's ok to upgrade to next limit
1803 read_limit
= tg
->bps
[READ
][LIMIT_LOW
] || tg
->iops
[READ
][LIMIT_LOW
];
1804 write_limit
= tg
->bps
[WRITE
][LIMIT_LOW
] || tg
->iops
[WRITE
][LIMIT_LOW
];
1805 if (!read_limit
&& !write_limit
)
1807 if (read_limit
&& sq
->nr_queued
[READ
] &&
1808 (!write_limit
|| sq
->nr_queued
[WRITE
]))
1810 if (write_limit
&& sq
->nr_queued
[WRITE
] &&
1811 (!read_limit
|| sq
->nr_queued
[READ
]))
1814 if (time_after_eq(jiffies
,
1815 tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
) &&
1816 throtl_tg_is_idle(tg
))
1821 static bool throtl_hierarchy_can_upgrade(struct throtl_grp
*tg
)
1824 if (throtl_tg_can_upgrade(tg
))
1826 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1827 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1833 static bool throtl_can_upgrade(struct throtl_data
*td
,
1834 struct throtl_grp
*this_tg
)
1836 struct cgroup_subsys_state
*pos_css
;
1837 struct blkcg_gq
*blkg
;
1839 if (td
->limit_index
!= LIMIT_LOW
)
1842 if (time_before(jiffies
, td
->low_downgrade_time
+ td
->throtl_slice
))
1846 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1847 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1851 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1853 if (!throtl_hierarchy_can_upgrade(tg
)) {
1862 static void throtl_upgrade_check(struct throtl_grp
*tg
)
1864 unsigned long now
= jiffies
;
1866 if (tg
->td
->limit_index
!= LIMIT_LOW
)
1869 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
1872 tg
->last_check_time
= now
;
1874 if (!time_after_eq(now
,
1875 __tg_last_low_overflow_time(tg
) + tg
->td
->throtl_slice
))
1878 if (throtl_can_upgrade(tg
->td
, NULL
))
1879 throtl_upgrade_state(tg
->td
);
1882 static void throtl_upgrade_state(struct throtl_data
*td
)
1884 struct cgroup_subsys_state
*pos_css
;
1885 struct blkcg_gq
*blkg
;
1887 throtl_log(&td
->service_queue
, "upgrade to max");
1888 td
->limit_index
= LIMIT_MAX
;
1889 td
->low_upgrade_time
= jiffies
;
1892 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
) {
1893 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
1894 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1896 tg
->disptime
= jiffies
- 1;
1897 throtl_select_dispatch(sq
);
1898 throtl_schedule_next_dispatch(sq
, false);
1901 throtl_select_dispatch(&td
->service_queue
);
1902 throtl_schedule_next_dispatch(&td
->service_queue
, false);
1903 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1906 static void throtl_downgrade_state(struct throtl_data
*td
, int new)
1910 throtl_log(&td
->service_queue
, "downgrade, scale %d", td
->scale
);
1912 td
->low_upgrade_time
= jiffies
- td
->scale
* td
->throtl_slice
;
1916 td
->limit_index
= new;
1917 td
->low_downgrade_time
= jiffies
;
1920 static bool throtl_tg_can_downgrade(struct throtl_grp
*tg
)
1922 struct throtl_data
*td
= tg
->td
;
1923 unsigned long now
= jiffies
;
1926 * If cgroup is below low limit, consider downgrade and throttle other
1929 if (time_after_eq(now
, td
->low_upgrade_time
+ td
->throtl_slice
) &&
1930 time_after_eq(now
, tg_last_low_overflow_time(tg
) +
1931 td
->throtl_slice
) &&
1932 (!throtl_tg_is_idle(tg
) ||
1933 !list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
)))
1938 static bool throtl_hierarchy_can_downgrade(struct throtl_grp
*tg
)
1941 if (!throtl_tg_can_downgrade(tg
))
1943 tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
1944 if (!tg
|| !tg_to_blkg(tg
)->parent
)
1950 static void throtl_downgrade_check(struct throtl_grp
*tg
)
1954 unsigned long elapsed_time
;
1955 unsigned long now
= jiffies
;
1957 if (tg
->td
->limit_index
!= LIMIT_MAX
||
1958 !tg
->td
->limit_valid
[LIMIT_LOW
])
1960 if (!list_empty(&tg_to_blkg(tg
)->blkcg
->css
.children
))
1962 if (time_after(tg
->last_check_time
+ tg
->td
->throtl_slice
, now
))
1965 elapsed_time
= now
- tg
->last_check_time
;
1966 tg
->last_check_time
= now
;
1968 if (time_before(now
, tg_last_low_overflow_time(tg
) +
1969 tg
->td
->throtl_slice
))
1972 if (tg
->bps
[READ
][LIMIT_LOW
]) {
1973 bps
= tg
->last_bytes_disp
[READ
] * HZ
;
1974 do_div(bps
, elapsed_time
);
1975 if (bps
>= tg
->bps
[READ
][LIMIT_LOW
])
1976 tg
->last_low_overflow_time
[READ
] = now
;
1979 if (tg
->bps
[WRITE
][LIMIT_LOW
]) {
1980 bps
= tg
->last_bytes_disp
[WRITE
] * HZ
;
1981 do_div(bps
, elapsed_time
);
1982 if (bps
>= tg
->bps
[WRITE
][LIMIT_LOW
])
1983 tg
->last_low_overflow_time
[WRITE
] = now
;
1986 if (tg
->iops
[READ
][LIMIT_LOW
]) {
1987 iops
= tg
->last_io_disp
[READ
] * HZ
/ elapsed_time
;
1988 if (iops
>= tg
->iops
[READ
][LIMIT_LOW
])
1989 tg
->last_low_overflow_time
[READ
] = now
;
1992 if (tg
->iops
[WRITE
][LIMIT_LOW
]) {
1993 iops
= tg
->last_io_disp
[WRITE
] * HZ
/ elapsed_time
;
1994 if (iops
>= tg
->iops
[WRITE
][LIMIT_LOW
])
1995 tg
->last_low_overflow_time
[WRITE
] = now
;
1999 * If cgroup is below low limit, consider downgrade and throttle other
2002 if (throtl_hierarchy_can_downgrade(tg
))
2003 throtl_downgrade_state(tg
->td
, LIMIT_LOW
);
2005 tg
->last_bytes_disp
[READ
] = 0;
2006 tg
->last_bytes_disp
[WRITE
] = 0;
2007 tg
->last_io_disp
[READ
] = 0;
2008 tg
->last_io_disp
[WRITE
] = 0;
2011 static void blk_throtl_update_idletime(struct throtl_grp
*tg
)
2013 unsigned long now
= ktime_get_ns() >> 10;
2014 unsigned long last_finish_time
= tg
->last_finish_time
;
2016 if (now
<= last_finish_time
|| last_finish_time
== 0 ||
2017 last_finish_time
== tg
->checked_last_finish_time
)
2020 tg
->avg_idletime
= (tg
->avg_idletime
* 7 + now
- last_finish_time
) >> 3;
2021 tg
->checked_last_finish_time
= last_finish_time
;
2024 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2025 static void throtl_update_latency_buckets(struct throtl_data
*td
)
2027 struct avg_latency_bucket avg_latency
[LATENCY_BUCKET_SIZE
];
2029 unsigned long last_latency
= 0;
2030 unsigned long latency
;
2032 if (!blk_queue_nonrot(td
->queue
))
2034 if (time_before(jiffies
, td
->last_calculate_time
+ HZ
))
2036 td
->last_calculate_time
= jiffies
;
2038 memset(avg_latency
, 0, sizeof(avg_latency
));
2039 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2040 struct latency_bucket
*tmp
= &td
->tmp_buckets
[i
];
2042 for_each_possible_cpu(cpu
) {
2043 struct latency_bucket
*bucket
;
2045 /* this isn't race free, but ok in practice */
2046 bucket
= per_cpu_ptr(td
->latency_buckets
, cpu
);
2047 tmp
->total_latency
+= bucket
[i
].total_latency
;
2048 tmp
->samples
+= bucket
[i
].samples
;
2049 bucket
[i
].total_latency
= 0;
2050 bucket
[i
].samples
= 0;
2053 if (tmp
->samples
>= 32) {
2054 int samples
= tmp
->samples
;
2056 latency
= tmp
->total_latency
;
2058 tmp
->total_latency
= 0;
2063 avg_latency
[i
].latency
= latency
;
2067 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++) {
2068 if (!avg_latency
[i
].latency
) {
2069 if (td
->avg_buckets
[i
].latency
< last_latency
)
2070 td
->avg_buckets
[i
].latency
= last_latency
;
2074 if (!td
->avg_buckets
[i
].valid
)
2075 latency
= avg_latency
[i
].latency
;
2077 latency
= (td
->avg_buckets
[i
].latency
* 7 +
2078 avg_latency
[i
].latency
) >> 3;
2080 td
->avg_buckets
[i
].latency
= max(latency
, last_latency
);
2081 td
->avg_buckets
[i
].valid
= true;
2082 last_latency
= td
->avg_buckets
[i
].latency
;
2085 for (i
= 0; i
< LATENCY_BUCKET_SIZE
; i
++)
2086 throtl_log(&td
->service_queue
,
2087 "Latency bucket %d: latency=%ld, valid=%d", i
,
2088 td
->avg_buckets
[i
].latency
, td
->avg_buckets
[i
].valid
);
2091 static inline void throtl_update_latency_buckets(struct throtl_data
*td
)
2096 static void blk_throtl_assoc_bio(struct throtl_grp
*tg
, struct bio
*bio
)
2098 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2101 ret
= bio_associate_current(bio
);
2102 if (ret
== 0 || ret
== -EBUSY
)
2103 bio
->bi_cg_private
= tg
;
2104 blk_stat_set_issue(&bio
->bi_issue_stat
, bio_sectors(bio
));
2106 bio_associate_current(bio
);
2110 bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
2113 struct throtl_qnode
*qn
= NULL
;
2114 struct throtl_grp
*tg
= blkg_to_tg(blkg
?: q
->root_blkg
);
2115 struct throtl_service_queue
*sq
;
2116 bool rw
= bio_data_dir(bio
);
2117 bool throttled
= false;
2118 struct throtl_data
*td
= tg
->td
;
2120 WARN_ON_ONCE(!rcu_read_lock_held());
2122 /* see throtl_charge_bio() */
2123 if (bio_flagged(bio
, BIO_THROTTLED
) || !tg
->has_rules
[rw
])
2126 spin_lock_irq(q
->queue_lock
);
2128 throtl_update_latency_buckets(td
);
2130 if (unlikely(blk_queue_bypass(q
)))
2133 blk_throtl_assoc_bio(tg
, bio
);
2134 blk_throtl_update_idletime(tg
);
2136 sq
= &tg
->service_queue
;
2140 if (tg
->last_low_overflow_time
[rw
] == 0)
2141 tg
->last_low_overflow_time
[rw
] = jiffies
;
2142 throtl_downgrade_check(tg
);
2143 throtl_upgrade_check(tg
);
2144 /* throtl is FIFO - if bios are already queued, should queue */
2145 if (sq
->nr_queued
[rw
])
2148 /* if above limits, break to queue */
2149 if (!tg_may_dispatch(tg
, bio
, NULL
)) {
2150 tg
->last_low_overflow_time
[rw
] = jiffies
;
2151 if (throtl_can_upgrade(td
, tg
)) {
2152 throtl_upgrade_state(td
);
2158 /* within limits, let's charge and dispatch directly */
2159 throtl_charge_bio(tg
, bio
);
2162 * We need to trim slice even when bios are not being queued
2163 * otherwise it might happen that a bio is not queued for
2164 * a long time and slice keeps on extending and trim is not
2165 * called for a long time. Now if limits are reduced suddenly
2166 * we take into account all the IO dispatched so far at new
2167 * low rate and * newly queued IO gets a really long dispatch
2170 * So keep on trimming slice even if bio is not queued.
2172 throtl_trim_slice(tg
, rw
);
2175 * @bio passed through this layer without being throttled.
2176 * Climb up the ladder. If we''re already at the top, it
2177 * can be executed directly.
2179 qn
= &tg
->qnode_on_parent
[rw
];
2186 /* out-of-limit, queue to @tg */
2187 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2188 rw
== READ
? 'R' : 'W',
2189 tg
->bytes_disp
[rw
], bio
->bi_iter
.bi_size
,
2190 tg_bps_limit(tg
, rw
),
2191 tg
->io_disp
[rw
], tg_iops_limit(tg
, rw
),
2192 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
2194 tg
->last_low_overflow_time
[rw
] = jiffies
;
2196 td
->nr_queued
[rw
]++;
2197 throtl_add_bio_tg(bio
, qn
, tg
);
2201 * Update @tg's dispatch time and force schedule dispatch if @tg
2202 * was empty before @bio. The forced scheduling isn't likely to
2203 * cause undue delay as @bio is likely to be dispatched directly if
2204 * its @tg's disptime is not in the future.
2206 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
2207 tg_update_disptime(tg
);
2208 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
2212 spin_unlock_irq(q
->queue_lock
);
2215 * As multiple blk-throtls may stack in the same issue path, we
2216 * don't want bios to leave with the flag set. Clear the flag if
2220 bio_clear_flag(bio
, BIO_THROTTLED
);
2222 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2223 if (throttled
|| !td
->track_bio_latency
)
2224 bio
->bi_issue_stat
.stat
|= SKIP_LATENCY
;
2229 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2230 static void throtl_track_latency(struct throtl_data
*td
, sector_t size
,
2231 int op
, unsigned long time
)
2233 struct latency_bucket
*latency
;
2236 if (!td
|| td
->limit_index
!= LIMIT_LOW
|| op
!= REQ_OP_READ
||
2237 !blk_queue_nonrot(td
->queue
))
2240 index
= request_bucket_index(size
);
2242 latency
= get_cpu_ptr(td
->latency_buckets
);
2243 latency
[index
].total_latency
+= time
;
2244 latency
[index
].samples
++;
2245 put_cpu_ptr(td
->latency_buckets
);
2248 void blk_throtl_stat_add(struct request
*rq
, u64 time_ns
)
2250 struct request_queue
*q
= rq
->q
;
2251 struct throtl_data
*td
= q
->td
;
2253 throtl_track_latency(td
, blk_stat_size(&rq
->issue_stat
),
2254 req_op(rq
), time_ns
>> 10);
2257 void blk_throtl_bio_endio(struct bio
*bio
)
2259 struct throtl_grp
*tg
;
2261 unsigned long finish_time
;
2262 unsigned long start_time
;
2265 tg
= bio
->bi_cg_private
;
2268 bio
->bi_cg_private
= NULL
;
2270 finish_time_ns
= ktime_get_ns();
2271 tg
->last_finish_time
= finish_time_ns
>> 10;
2273 start_time
= blk_stat_time(&bio
->bi_issue_stat
) >> 10;
2274 finish_time
= __blk_stat_time(finish_time_ns
) >> 10;
2275 if (!start_time
|| finish_time
<= start_time
)
2278 lat
= finish_time
- start_time
;
2279 /* this is only for bio based driver */
2280 if (!(bio
->bi_issue_stat
.stat
& SKIP_LATENCY
))
2281 throtl_track_latency(tg
->td
, blk_stat_size(&bio
->bi_issue_stat
),
2284 if (tg
->latency_target
) {
2286 unsigned int threshold
;
2288 bucket
= request_bucket_index(
2289 blk_stat_size(&bio
->bi_issue_stat
));
2290 threshold
= tg
->td
->avg_buckets
[bucket
].latency
+
2292 if (lat
> threshold
)
2295 * Not race free, could get wrong count, which means cgroups
2301 if (time_after(jiffies
, tg
->bio_cnt_reset_time
) || tg
->bio_cnt
> 1024) {
2302 tg
->bio_cnt_reset_time
= tg
->td
->throtl_slice
+ jiffies
;
2304 tg
->bad_bio_cnt
/= 2;
2310 * Dispatch all bios from all children tg's queued on @parent_sq. On
2311 * return, @parent_sq is guaranteed to not have any active children tg's
2312 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2314 static void tg_drain_bios(struct throtl_service_queue
*parent_sq
)
2316 struct throtl_grp
*tg
;
2318 while ((tg
= throtl_rb_first(parent_sq
))) {
2319 struct throtl_service_queue
*sq
= &tg
->service_queue
;
2322 throtl_dequeue_tg(tg
);
2324 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])))
2325 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
2326 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])))
2327 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
2332 * blk_throtl_drain - drain throttled bios
2333 * @q: request_queue to drain throttled bios for
2335 * Dispatch all currently throttled bios on @q through ->make_request_fn().
2337 void blk_throtl_drain(struct request_queue
*q
)
2338 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
2340 struct throtl_data
*td
= q
->td
;
2341 struct blkcg_gq
*blkg
;
2342 struct cgroup_subsys_state
*pos_css
;
2346 queue_lockdep_assert_held(q
);
2350 * Drain each tg while doing post-order walk on the blkg tree, so
2351 * that all bios are propagated to td->service_queue. It'd be
2352 * better to walk service_queue tree directly but blkg walk is
2355 blkg_for_each_descendant_post(blkg
, pos_css
, td
->queue
->root_blkg
)
2356 tg_drain_bios(&blkg_to_tg(blkg
)->service_queue
);
2358 /* finally, transfer bios from top-level tg's into the td */
2359 tg_drain_bios(&td
->service_queue
);
2362 spin_unlock_irq(q
->queue_lock
);
2364 /* all bios now should be in td->service_queue, issue them */
2365 for (rw
= READ
; rw
<= WRITE
; rw
++)
2366 while ((bio
= throtl_pop_queued(&td
->service_queue
.queued
[rw
],
2368 generic_make_request(bio
);
2370 spin_lock_irq(q
->queue_lock
);
2373 int blk_throtl_init(struct request_queue
*q
)
2375 struct throtl_data
*td
;
2378 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
2381 td
->latency_buckets
= __alloc_percpu(sizeof(struct latency_bucket
) *
2382 LATENCY_BUCKET_SIZE
, __alignof__(u64
));
2383 if (!td
->latency_buckets
) {
2388 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
2389 throtl_service_queue_init(&td
->service_queue
);
2394 td
->limit_valid
[LIMIT_MAX
] = true;
2395 td
->limit_index
= LIMIT_MAX
;
2396 td
->low_upgrade_time
= jiffies
;
2397 td
->low_downgrade_time
= jiffies
;
2399 /* activate policy */
2400 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
2402 free_percpu(td
->latency_buckets
);
2408 void blk_throtl_exit(struct request_queue
*q
)
2411 throtl_shutdown_wq(q
);
2412 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
2413 free_percpu(q
->td
->latency_buckets
);
2417 void blk_throtl_register_queue(struct request_queue
*q
)
2419 struct throtl_data
*td
;
2424 if (blk_queue_nonrot(q
))
2425 td
->throtl_slice
= DFL_THROTL_SLICE_SSD
;
2427 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2428 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2429 /* if no low limit, use previous default */
2430 td
->throtl_slice
= DFL_THROTL_SLICE_HD
;
2433 td
->track_bio_latency
= !q
->mq_ops
&& !q
->request_fn
;
2434 if (!td
->track_bio_latency
)
2435 blk_stat_enable_accounting(q
);
2438 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2439 ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
)
2443 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->td
->throtl_slice
));
2446 ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
2447 const char *page
, size_t count
)
2454 if (kstrtoul(page
, 10, &v
))
2456 t
= msecs_to_jiffies(v
);
2457 if (t
== 0 || t
> MAX_THROTL_SLICE
)
2459 q
->td
->throtl_slice
= t
;
2464 static int __init
throtl_init(void)
2466 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
2467 if (!kthrotld_workqueue
)
2468 panic("Failed to create kthrotld\n");
2470 return blkcg_policy_register(&blkcg_policy_throtl
);
2473 module_init(throtl_init
);