2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl
;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct
*kthrotld_workqueue
;
30 * To implement hierarchical throttling, throtl_grps form a tree and bios
31 * are dispatched upwards level by level until they reach the top and get
32 * issued. When dispatching bios from the children and local group at each
33 * level, if the bios are dispatched into a single bio_list, there's a risk
34 * of a local or child group which can queue many bios at once filling up
35 * the list starving others.
37 * To avoid such starvation, dispatched bios are queued separately
38 * according to where they came from. When they are again dispatched to
39 * the parent, they're popped in round-robin order so that no single source
40 * hogs the dispatch window.
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
44 * throtl_service_queue and then dispatched in round-robin order.
46 * It's also used to track the reference counts on blkg's. A qnode always
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
48 * incrementing the reference of the associated throtl_grp when a qnode is
49 * queued and decrementing when dequeued is enough to keep the whole blkg
50 * tree pinned while bios are in flight.
53 struct list_head node
; /* service_queue->queued[] */
54 struct bio_list bios
; /* queued bios */
55 struct throtl_grp
*tg
; /* tg this qnode belongs to */
58 struct throtl_service_queue
{
59 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
62 * Bios queued directly to this service_queue or dispatched from
63 * children throtl_grp's.
65 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
66 unsigned int nr_queued
[2]; /* number of queued bios */
69 * RB tree of active children throtl_grp's, which are sorted by
72 struct rb_root pending_tree
; /* RB tree of active tgs */
73 struct rb_node
*first_pending
; /* first node in the tree */
74 unsigned int nr_pending
; /* # queued in the tree */
75 unsigned long first_pending_disptime
; /* disptime of the first tg */
76 struct timer_list pending_timer
; /* fires on first_pending_disptime */
80 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
81 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
84 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
86 /* Per-cpu group stats */
88 /* total bytes transferred */
89 struct blkg_rwstat service_bytes
;
90 /* total IOs serviced, post merge */
91 struct blkg_rwstat serviced
;
95 /* must be the first member */
96 struct blkg_policy_data pd
;
98 /* active throtl group service_queue member */
99 struct rb_node rb_node
;
101 /* throtl_data this group belongs to */
102 struct throtl_data
*td
;
104 /* this group's service queue */
105 struct throtl_service_queue service_queue
;
108 * qnode_on_self is used when bios are directly queued to this
109 * throtl_grp so that local bios compete fairly with bios
110 * dispatched from children. qnode_on_parent is used when bios are
111 * dispatched from this throtl_grp into its parent and will compete
112 * with the sibling qnode_on_parents and the parent's
115 struct throtl_qnode qnode_on_self
[2];
116 struct throtl_qnode qnode_on_parent
[2];
119 * Dispatch time in jiffies. This is the estimated time when group
120 * will unthrottle and is ready to dispatch more bio. It is used as
121 * key to sort active groups in service tree.
123 unsigned long disptime
;
127 /* are there any throtl rules between this group and td? */
130 /* bytes per second rate limits */
134 unsigned int iops
[2];
136 /* Number of bytes disptached in current slice */
137 uint64_t bytes_disp
[2];
138 /* Number of bio's dispatched in current slice */
139 unsigned int io_disp
[2];
141 /* When did we start a new slice */
142 unsigned long slice_start
[2];
143 unsigned long slice_end
[2];
145 /* Per cpu stats pointer */
146 struct tg_stats_cpu __percpu
*stats_cpu
;
148 /* List of tgs waiting for per cpu stats memory to be allocated */
149 struct list_head stats_alloc_node
;
154 /* service tree for active throtl groups */
155 struct throtl_service_queue service_queue
;
157 struct request_queue
*queue
;
159 /* Total Number of queued bios on READ and WRITE lists */
160 unsigned int nr_queued
[2];
163 * number of total undestroyed groups
165 unsigned int nr_undestroyed_grps
;
167 /* Work for dispatching throttled bios */
168 struct work_struct dispatch_work
;
171 /* list and work item to allocate percpu group stats */
172 static DEFINE_SPINLOCK(tg_stats_alloc_lock
);
173 static LIST_HEAD(tg_stats_alloc_list
);
175 static void tg_stats_alloc_fn(struct work_struct
*);
176 static DECLARE_DELAYED_WORK(tg_stats_alloc_work
, tg_stats_alloc_fn
);
178 static void throtl_pending_timer_fn(unsigned long arg
);
180 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
182 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
185 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
187 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
190 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
192 return pd_to_blkg(&tg
->pd
);
195 static inline struct throtl_grp
*td_root_tg(struct throtl_data
*td
)
197 return blkg_to_tg(td
->queue
->root_blkg
);
201 * sq_to_tg - return the throl_grp the specified service queue belongs to
202 * @sq: the throtl_service_queue of interest
204 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
205 * embedded in throtl_data, %NULL is returned.
207 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
209 if (sq
&& sq
->parent_sq
)
210 return container_of(sq
, struct throtl_grp
, service_queue
);
216 * sq_to_td - return throtl_data the specified service queue belongs to
217 * @sq: the throtl_service_queue of interest
219 * A service_queue can be embeded in either a throtl_grp or throtl_data.
220 * Determine the associated throtl_data accordingly and return it.
222 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
224 struct throtl_grp
*tg
= sq_to_tg(sq
);
229 return container_of(sq
, struct throtl_data
, service_queue
);
233 * throtl_log - log debug message via blktrace
234 * @sq: the service_queue being reported
235 * @fmt: printf format string
238 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
239 * throtl_grp; otherwise, just "throtl".
241 * TODO: this should be made a function and name formatting should happen
242 * after testing whether blktrace is enabled.
244 #define throtl_log(sq, fmt, args...) do { \
245 struct throtl_grp *__tg = sq_to_tg((sq)); \
246 struct throtl_data *__td = sq_to_td((sq)); \
252 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
253 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
255 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
260 * Worker for allocating per cpu stat for tgs. This is scheduled on the
261 * system_wq once there are some groups on the alloc_list waiting for
264 static void tg_stats_alloc_fn(struct work_struct
*work
)
266 static struct tg_stats_cpu
*stats_cpu
; /* this fn is non-reentrant */
267 struct delayed_work
*dwork
= to_delayed_work(work
);
272 stats_cpu
= alloc_percpu(struct tg_stats_cpu
);
274 /* allocation failed, try again after some time */
275 schedule_delayed_work(dwork
, msecs_to_jiffies(10));
280 spin_lock_irq(&tg_stats_alloc_lock
);
282 if (!list_empty(&tg_stats_alloc_list
)) {
283 struct throtl_grp
*tg
= list_first_entry(&tg_stats_alloc_list
,
286 swap(tg
->stats_cpu
, stats_cpu
);
287 list_del_init(&tg
->stats_alloc_node
);
290 empty
= list_empty(&tg_stats_alloc_list
);
291 spin_unlock_irq(&tg_stats_alloc_lock
);
296 static void throtl_qnode_init(struct throtl_qnode
*qn
, struct throtl_grp
*tg
)
298 INIT_LIST_HEAD(&qn
->node
);
299 bio_list_init(&qn
->bios
);
304 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
305 * @bio: bio being added
306 * @qn: qnode to add bio to
307 * @queued: the service_queue->queued[] list @qn belongs to
309 * Add @bio to @qn and put @qn on @queued if it's not already on.
310 * @qn->tg's reference count is bumped when @qn is activated. See the
311 * comment on top of throtl_qnode definition for details.
313 static void throtl_qnode_add_bio(struct bio
*bio
, struct throtl_qnode
*qn
,
314 struct list_head
*queued
)
316 bio_list_add(&qn
->bios
, bio
);
317 if (list_empty(&qn
->node
)) {
318 list_add_tail(&qn
->node
, queued
);
319 blkg_get(tg_to_blkg(qn
->tg
));
324 * throtl_peek_queued - peek the first bio on a qnode list
325 * @queued: the qnode list to peek
327 static struct bio
*throtl_peek_queued(struct list_head
*queued
)
329 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
332 if (list_empty(queued
))
335 bio
= bio_list_peek(&qn
->bios
);
341 * throtl_pop_queued - pop the first bio form a qnode list
342 * @queued: the qnode list to pop a bio from
343 * @tg_to_put: optional out argument for throtl_grp to put
345 * Pop the first bio from the qnode list @queued. After popping, the first
346 * qnode is removed from @queued if empty or moved to the end of @queued so
347 * that the popping order is round-robin.
349 * When the first qnode is removed, its associated throtl_grp should be put
350 * too. If @tg_to_put is NULL, this function automatically puts it;
351 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
352 * responsible for putting it.
354 static struct bio
*throtl_pop_queued(struct list_head
*queued
,
355 struct throtl_grp
**tg_to_put
)
357 struct throtl_qnode
*qn
= list_first_entry(queued
, struct throtl_qnode
, node
);
360 if (list_empty(queued
))
363 bio
= bio_list_pop(&qn
->bios
);
366 if (bio_list_empty(&qn
->bios
)) {
367 list_del_init(&qn
->node
);
371 blkg_put(tg_to_blkg(qn
->tg
));
373 list_move_tail(&qn
->node
, queued
);
379 /* init a service_queue, assumes the caller zeroed it */
380 static void throtl_service_queue_init(struct throtl_service_queue
*sq
,
381 struct throtl_service_queue
*parent_sq
)
383 INIT_LIST_HEAD(&sq
->queued
[0]);
384 INIT_LIST_HEAD(&sq
->queued
[1]);
385 sq
->pending_tree
= RB_ROOT
;
386 sq
->parent_sq
= parent_sq
;
387 setup_timer(&sq
->pending_timer
, throtl_pending_timer_fn
,
391 static void throtl_service_queue_exit(struct throtl_service_queue
*sq
)
393 del_timer_sync(&sq
->pending_timer
);
396 static void throtl_pd_init(struct blkcg_gq
*blkg
)
398 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
399 struct throtl_data
*td
= blkg
->q
->td
;
403 throtl_service_queue_init(&tg
->service_queue
, &td
->service_queue
);
404 for (rw
= READ
; rw
<= WRITE
; rw
++) {
405 throtl_qnode_init(&tg
->qnode_on_self
[rw
], tg
);
406 throtl_qnode_init(&tg
->qnode_on_parent
[rw
], tg
);
409 RB_CLEAR_NODE(&tg
->rb_node
);
415 tg
->iops
[WRITE
] = -1;
418 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
419 * but percpu allocator can't be called from IO path. Queue tg on
420 * tg_stats_alloc_list and allocate from work item.
422 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
423 list_add(&tg
->stats_alloc_node
, &tg_stats_alloc_list
);
424 schedule_delayed_work(&tg_stats_alloc_work
, 0);
425 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
429 * Set has_rules[] if @tg or any of its parents have limits configured.
430 * This doesn't require walking up to the top of the hierarchy as the
431 * parent's has_rules[] is guaranteed to be correct.
433 static void tg_update_has_rules(struct throtl_grp
*tg
)
435 struct throtl_grp
*parent_tg
= sq_to_tg(tg
->service_queue
.parent_sq
);
438 for (rw
= READ
; rw
<= WRITE
; rw
++)
439 tg
->has_rules
[rw
] = (parent_tg
&& parent_tg
->has_rules
[rw
]) ||
440 (tg
->bps
[rw
] != -1 || tg
->iops
[rw
] != -1);
443 static void throtl_pd_online(struct blkcg_gq
*blkg
)
446 * We don't want new groups to escape the limits of its ancestors.
447 * Update has_rules[] after a new group is brought online.
449 tg_update_has_rules(blkg_to_tg(blkg
));
452 static void throtl_pd_exit(struct blkcg_gq
*blkg
)
454 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
457 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
458 list_del_init(&tg
->stats_alloc_node
);
459 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
461 free_percpu(tg
->stats_cpu
);
463 throtl_service_queue_exit(&tg
->service_queue
);
466 static void throtl_pd_reset_stats(struct blkcg_gq
*blkg
)
468 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
471 if (tg
->stats_cpu
== NULL
)
474 for_each_possible_cpu(cpu
) {
475 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
477 blkg_rwstat_reset(&sc
->service_bytes
);
478 blkg_rwstat_reset(&sc
->serviced
);
482 static struct throtl_grp
*throtl_lookup_tg(struct throtl_data
*td
,
486 * This is the common case when there are no blkcgs. Avoid lookup
489 if (blkcg
== &blkcg_root
)
490 return td_root_tg(td
);
492 return blkg_to_tg(blkg_lookup(blkcg
, td
->queue
));
495 static struct throtl_grp
*throtl_lookup_create_tg(struct throtl_data
*td
,
498 struct request_queue
*q
= td
->queue
;
499 struct throtl_grp
*tg
= NULL
;
502 * This is the common case when there are no blkcgs. Avoid lookup
505 if (blkcg
== &blkcg_root
) {
508 struct blkcg_gq
*blkg
;
510 blkg
= blkg_lookup_create(blkcg
, q
);
512 /* if %NULL and @q is alive, fall back to root_tg */
514 tg
= blkg_to_tg(blkg
);
515 else if (!blk_queue_dying(q
))
522 static struct throtl_grp
*
523 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
525 /* Service tree is empty */
526 if (!parent_sq
->nr_pending
)
529 if (!parent_sq
->first_pending
)
530 parent_sq
->first_pending
= rb_first(&parent_sq
->pending_tree
);
532 if (parent_sq
->first_pending
)
533 return rb_entry_tg(parent_sq
->first_pending
);
538 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
544 static void throtl_rb_erase(struct rb_node
*n
,
545 struct throtl_service_queue
*parent_sq
)
547 if (parent_sq
->first_pending
== n
)
548 parent_sq
->first_pending
= NULL
;
549 rb_erase_init(n
, &parent_sq
->pending_tree
);
550 --parent_sq
->nr_pending
;
553 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
555 struct throtl_grp
*tg
;
557 tg
= throtl_rb_first(parent_sq
);
561 parent_sq
->first_pending_disptime
= tg
->disptime
;
564 static void tg_service_queue_add(struct throtl_grp
*tg
)
566 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
567 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_node
;
568 struct rb_node
*parent
= NULL
;
569 struct throtl_grp
*__tg
;
570 unsigned long key
= tg
->disptime
;
573 while (*node
!= NULL
) {
575 __tg
= rb_entry_tg(parent
);
577 if (time_before(key
, __tg
->disptime
))
578 node
= &parent
->rb_left
;
580 node
= &parent
->rb_right
;
586 parent_sq
->first_pending
= &tg
->rb_node
;
588 rb_link_node(&tg
->rb_node
, parent
, node
);
589 rb_insert_color(&tg
->rb_node
, &parent_sq
->pending_tree
);
592 static void __throtl_enqueue_tg(struct throtl_grp
*tg
)
594 tg_service_queue_add(tg
);
595 tg
->flags
|= THROTL_TG_PENDING
;
596 tg
->service_queue
.parent_sq
->nr_pending
++;
599 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
601 if (!(tg
->flags
& THROTL_TG_PENDING
))
602 __throtl_enqueue_tg(tg
);
605 static void __throtl_dequeue_tg(struct throtl_grp
*tg
)
607 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
608 tg
->flags
&= ~THROTL_TG_PENDING
;
611 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
613 if (tg
->flags
& THROTL_TG_PENDING
)
614 __throtl_dequeue_tg(tg
);
617 /* Call with queue lock held */
618 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
619 unsigned long expires
)
621 mod_timer(&sq
->pending_timer
, expires
);
622 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
623 expires
- jiffies
, jiffies
);
627 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
628 * @sq: the service_queue to schedule dispatch for
629 * @force: force scheduling
631 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
632 * dispatch time of the first pending child. Returns %true if either timer
633 * is armed or there's no pending child left. %false if the current
634 * dispatch window is still open and the caller should continue
637 * If @force is %true, the dispatch timer is always scheduled and this
638 * function is guaranteed to return %true. This is to be used when the
639 * caller can't dispatch itself and needs to invoke pending_timer
640 * unconditionally. Note that forced scheduling is likely to induce short
641 * delay before dispatch starts even if @sq->first_pending_disptime is not
642 * in the future and thus shouldn't be used in hot paths.
644 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
647 /* any pending children left? */
651 update_min_dispatch_time(sq
);
653 /* is the next dispatch time in the future? */
654 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
655 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
659 /* tell the caller to continue dispatching */
663 static inline void throtl_start_new_slice_with_credit(struct throtl_grp
*tg
,
664 bool rw
, unsigned long start
)
666 tg
->bytes_disp
[rw
] = 0;
670 * Previous slice has expired. We must have trimmed it after last
671 * bio dispatch. That means since start of last slice, we never used
672 * that bandwidth. Do try to make use of that bandwidth while giving
675 if (time_after_eq(start
, tg
->slice_start
[rw
]))
676 tg
->slice_start
[rw
] = start
;
678 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
679 throtl_log(&tg
->service_queue
,
680 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
681 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
682 tg
->slice_end
[rw
], jiffies
);
685 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
687 tg
->bytes_disp
[rw
] = 0;
689 tg
->slice_start
[rw
] = jiffies
;
690 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
691 throtl_log(&tg
->service_queue
,
692 "[%c] new slice start=%lu end=%lu jiffies=%lu",
693 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
694 tg
->slice_end
[rw
], jiffies
);
697 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
698 unsigned long jiffy_end
)
700 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
703 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
704 unsigned long jiffy_end
)
706 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
707 throtl_log(&tg
->service_queue
,
708 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
709 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
710 tg
->slice_end
[rw
], jiffies
);
713 /* Determine if previously allocated or extended slice is complete or not */
714 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
716 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
722 /* Trim the used slices and adjust slice start accordingly */
723 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
725 unsigned long nr_slices
, time_elapsed
, io_trim
;
728 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
731 * If bps are unlimited (-1), then time slice don't get
732 * renewed. Don't try to trim the slice if slice is used. A new
733 * slice will start when appropriate.
735 if (throtl_slice_used(tg
, rw
))
739 * A bio has been dispatched. Also adjust slice_end. It might happen
740 * that initially cgroup limit was very low resulting in high
741 * slice_end, but later limit was bumped up and bio was dispached
742 * sooner, then we need to reduce slice_end. A high bogus slice_end
743 * is bad because it does not allow new slice to start.
746 throtl_set_slice_end(tg
, rw
, jiffies
+ throtl_slice
);
748 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
750 nr_slices
= time_elapsed
/ throtl_slice
;
754 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
758 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
760 if (!bytes_trim
&& !io_trim
)
763 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
764 tg
->bytes_disp
[rw
] -= bytes_trim
;
766 tg
->bytes_disp
[rw
] = 0;
768 if (tg
->io_disp
[rw
] >= io_trim
)
769 tg
->io_disp
[rw
] -= io_trim
;
773 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
775 throtl_log(&tg
->service_queue
,
776 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
777 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
778 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
781 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
784 bool rw
= bio_data_dir(bio
);
785 unsigned int io_allowed
;
786 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
789 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
791 /* Slice has just started. Consider one slice interval */
793 jiffy_elapsed_rnd
= throtl_slice
;
795 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
798 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
799 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
800 * will allow dispatch after 1 second and after that slice should
804 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
808 io_allowed
= UINT_MAX
;
812 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
818 /* Calc approx time to dispatch */
819 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
821 if (jiffy_wait
> jiffy_elapsed
)
822 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
831 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
834 bool rw
= bio_data_dir(bio
);
835 u64 bytes_allowed
, extra_bytes
, tmp
;
836 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
838 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
840 /* Slice has just started. Consider one slice interval */
842 jiffy_elapsed_rnd
= throtl_slice
;
844 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
846 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
850 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
856 /* Calc approx time to dispatch */
857 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
858 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
864 * This wait time is without taking into consideration the rounding
865 * up we did. Add that time also.
867 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
874 * Returns whether one can dispatch a bio or not. Also returns approx number
875 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
877 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
880 bool rw
= bio_data_dir(bio
);
881 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
884 * Currently whole state machine of group depends on first bio
885 * queued in the group bio list. So one should not be calling
886 * this function with a different bio if there are other bios
889 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
890 bio
!= throtl_peek_queued(&tg
->service_queue
.queued
[rw
]));
892 /* If tg->bps = -1, then BW is unlimited */
893 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
900 * If previous slice expired, start a new one otherwise renew/extend
901 * existing slice to make sure it is at least throtl_slice interval
904 if (throtl_slice_used(tg
, rw
))
905 throtl_start_new_slice(tg
, rw
);
907 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
908 throtl_extend_slice(tg
, rw
, jiffies
+ throtl_slice
);
911 if (tg_with_in_bps_limit(tg
, bio
, &bps_wait
) &&
912 tg_with_in_iops_limit(tg
, bio
, &iops_wait
)) {
918 max_wait
= max(bps_wait
, iops_wait
);
923 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
924 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
929 static void throtl_update_dispatch_stats(struct blkcg_gq
*blkg
, u64 bytes
,
932 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
933 struct tg_stats_cpu
*stats_cpu
;
936 /* If per cpu stats are not allocated yet, don't do any accounting. */
937 if (tg
->stats_cpu
== NULL
)
941 * Disabling interrupts to provide mutual exclusion between two
942 * writes on same cpu. It probably is not needed for 64bit. Not
943 * optimizing that case yet.
945 local_irq_save(flags
);
947 stats_cpu
= this_cpu_ptr(tg
->stats_cpu
);
949 blkg_rwstat_add(&stats_cpu
->serviced
, rw
, 1);
950 blkg_rwstat_add(&stats_cpu
->service_bytes
, rw
, bytes
);
952 local_irq_restore(flags
);
955 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
957 bool rw
= bio_data_dir(bio
);
959 /* Charge the bio to the group */
960 tg
->bytes_disp
[rw
] += bio
->bi_size
;
964 * REQ_THROTTLED is used to prevent the same bio to be throttled
965 * more than once as a throttled bio will go through blk-throtl the
966 * second time when it eventually gets issued. Set it when a bio
967 * is being charged to a tg.
969 * Dispatch stats aren't recursive and each @bio should only be
970 * accounted by the @tg it was originally associated with. Let's
971 * update the stats when setting REQ_THROTTLED for the first time
972 * which is guaranteed to be for the @bio's original tg.
974 if (!(bio
->bi_rw
& REQ_THROTTLED
)) {
975 bio
->bi_rw
|= REQ_THROTTLED
;
976 throtl_update_dispatch_stats(tg_to_blkg(tg
), bio
->bi_size
,
982 * throtl_add_bio_tg - add a bio to the specified throtl_grp
985 * @tg: the target throtl_grp
987 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
988 * tg->qnode_on_self[] is used.
990 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_qnode
*qn
,
991 struct throtl_grp
*tg
)
993 struct throtl_service_queue
*sq
= &tg
->service_queue
;
994 bool rw
= bio_data_dir(bio
);
997 qn
= &tg
->qnode_on_self
[rw
];
1000 * If @tg doesn't currently have any bios queued in the same
1001 * direction, queueing @bio can change when @tg should be
1002 * dispatched. Mark that @tg was empty. This is automatically
1003 * cleaered on the next tg_update_disptime().
1005 if (!sq
->nr_queued
[rw
])
1006 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
1008 throtl_qnode_add_bio(bio
, qn
, &sq
->queued
[rw
]);
1010 sq
->nr_queued
[rw
]++;
1011 throtl_enqueue_tg(tg
);
1014 static void tg_update_disptime(struct throtl_grp
*tg
)
1016 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1017 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
1020 if ((bio
= throtl_peek_queued(&sq
->queued
[READ
])))
1021 tg_may_dispatch(tg
, bio
, &read_wait
);
1023 if ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])))
1024 tg_may_dispatch(tg
, bio
, &write_wait
);
1026 min_wait
= min(read_wait
, write_wait
);
1027 disptime
= jiffies
+ min_wait
;
1029 /* Update dispatch time */
1030 throtl_dequeue_tg(tg
);
1031 tg
->disptime
= disptime
;
1032 throtl_enqueue_tg(tg
);
1034 /* see throtl_add_bio_tg() */
1035 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
1038 static void start_parent_slice_with_credit(struct throtl_grp
*child_tg
,
1039 struct throtl_grp
*parent_tg
, bool rw
)
1041 if (throtl_slice_used(parent_tg
, rw
)) {
1042 throtl_start_new_slice_with_credit(parent_tg
, rw
,
1043 child_tg
->slice_start
[rw
]);
1048 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
1050 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1051 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
1052 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
1053 struct throtl_grp
*tg_to_put
= NULL
;
1057 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1058 * from @tg may put its reference and @parent_sq might end up
1059 * getting released prematurely. Remember the tg to put and put it
1060 * after @bio is transferred to @parent_sq.
1062 bio
= throtl_pop_queued(&sq
->queued
[rw
], &tg_to_put
);
1063 sq
->nr_queued
[rw
]--;
1065 throtl_charge_bio(tg
, bio
);
1068 * If our parent is another tg, we just need to transfer @bio to
1069 * the parent using throtl_add_bio_tg(). If our parent is
1070 * @td->service_queue, @bio is ready to be issued. Put it on its
1071 * bio_lists[] and decrease total number queued. The caller is
1072 * responsible for issuing these bios.
1075 throtl_add_bio_tg(bio
, &tg
->qnode_on_parent
[rw
], parent_tg
);
1076 start_parent_slice_with_credit(tg
, parent_tg
, rw
);
1078 throtl_qnode_add_bio(bio
, &tg
->qnode_on_parent
[rw
],
1079 &parent_sq
->queued
[rw
]);
1080 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
1081 tg
->td
->nr_queued
[rw
]--;
1084 throtl_trim_slice(tg
, rw
);
1087 blkg_put(tg_to_blkg(tg_to_put
));
1090 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
1092 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1093 unsigned int nr_reads
= 0, nr_writes
= 0;
1094 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
1095 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
1098 /* Try to dispatch 75% READS and 25% WRITES */
1100 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])) &&
1101 tg_may_dispatch(tg
, bio
, NULL
)) {
1103 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1106 if (nr_reads
>= max_nr_reads
)
1110 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])) &&
1111 tg_may_dispatch(tg
, bio
, NULL
)) {
1113 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1116 if (nr_writes
>= max_nr_writes
)
1120 return nr_reads
+ nr_writes
;
1123 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
1125 unsigned int nr_disp
= 0;
1128 struct throtl_grp
*tg
= throtl_rb_first(parent_sq
);
1129 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1134 if (time_before(jiffies
, tg
->disptime
))
1137 throtl_dequeue_tg(tg
);
1139 nr_disp
+= throtl_dispatch_tg(tg
);
1141 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
1142 tg_update_disptime(tg
);
1144 if (nr_disp
>= throtl_quantum
)
1152 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1153 * @arg: the throtl_service_queue being serviced
1155 * This timer is armed when a child throtl_grp with active bio's become
1156 * pending and queued on the service_queue's pending_tree and expires when
1157 * the first child throtl_grp should be dispatched. This function
1158 * dispatches bio's from the children throtl_grps to the parent
1161 * If the parent's parent is another throtl_grp, dispatching is propagated
1162 * by either arming its pending_timer or repeating dispatch directly. If
1163 * the top-level service_tree is reached, throtl_data->dispatch_work is
1164 * kicked so that the ready bio's are issued.
1166 static void throtl_pending_timer_fn(unsigned long arg
)
1168 struct throtl_service_queue
*sq
= (void *)arg
;
1169 struct throtl_grp
*tg
= sq_to_tg(sq
);
1170 struct throtl_data
*td
= sq_to_td(sq
);
1171 struct request_queue
*q
= td
->queue
;
1172 struct throtl_service_queue
*parent_sq
;
1176 spin_lock_irq(q
->queue_lock
);
1178 parent_sq
= sq
->parent_sq
;
1182 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
1183 sq
->nr_queued
[READ
] + sq
->nr_queued
[WRITE
],
1184 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1186 ret
= throtl_select_dispatch(sq
);
1188 throtl_log(sq
, "bios disp=%u", ret
);
1192 if (throtl_schedule_next_dispatch(sq
, false))
1195 /* this dispatch windows is still open, relax and repeat */
1196 spin_unlock_irq(q
->queue_lock
);
1198 spin_lock_irq(q
->queue_lock
);
1205 /* @parent_sq is another throl_grp, propagate dispatch */
1206 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1207 tg_update_disptime(tg
);
1208 if (!throtl_schedule_next_dispatch(parent_sq
, false)) {
1209 /* window is already open, repeat dispatching */
1216 /* reached the top-level, queue issueing */
1217 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
1220 spin_unlock_irq(q
->queue_lock
);
1224 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1225 * @work: work item being executed
1227 * This function is queued for execution when bio's reach the bio_lists[]
1228 * of throtl_data->service_queue. Those bio's are ready and issued by this
1231 void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1233 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1235 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1236 struct request_queue
*q
= td
->queue
;
1237 struct bio_list bio_list_on_stack
;
1239 struct blk_plug plug
;
1242 bio_list_init(&bio_list_on_stack
);
1244 spin_lock_irq(q
->queue_lock
);
1245 for (rw
= READ
; rw
<= WRITE
; rw
++)
1246 while ((bio
= throtl_pop_queued(&td_sq
->queued
[rw
], NULL
)))
1247 bio_list_add(&bio_list_on_stack
, bio
);
1248 spin_unlock_irq(q
->queue_lock
);
1250 if (!bio_list_empty(&bio_list_on_stack
)) {
1251 blk_start_plug(&plug
);
1252 while((bio
= bio_list_pop(&bio_list_on_stack
)))
1253 generic_make_request(bio
);
1254 blk_finish_plug(&plug
);
1258 static u64
tg_prfill_cpu_rwstat(struct seq_file
*sf
,
1259 struct blkg_policy_data
*pd
, int off
)
1261 struct throtl_grp
*tg
= pd_to_tg(pd
);
1262 struct blkg_rwstat rwstat
= { }, tmp
;
1265 for_each_possible_cpu(cpu
) {
1266 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
1268 tmp
= blkg_rwstat_read((void *)sc
+ off
);
1269 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
1270 rwstat
.cnt
[i
] += tmp
.cnt
[i
];
1273 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
1276 static int tg_print_cpu_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
1277 struct seq_file
*sf
)
1279 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
1281 blkcg_print_blkgs(sf
, blkcg
, tg_prfill_cpu_rwstat
, &blkcg_policy_throtl
,
1282 cft
->private, true);
1286 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1289 struct throtl_grp
*tg
= pd_to_tg(pd
);
1290 u64 v
= *(u64
*)((void *)tg
+ off
);
1294 return __blkg_prfill_u64(sf
, pd
, v
);
1297 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1300 struct throtl_grp
*tg
= pd_to_tg(pd
);
1301 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1305 return __blkg_prfill_u64(sf
, pd
, v
);
1308 static int tg_print_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
1309 struct seq_file
*sf
)
1311 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_u64
,
1312 &blkcg_policy_throtl
, cft
->private, false);
1316 static int tg_print_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
1317 struct seq_file
*sf
)
1319 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_uint
,
1320 &blkcg_policy_throtl
, cft
->private, false);
1324 static int tg_set_conf(struct cgroup
*cgrp
, struct cftype
*cft
, const char *buf
,
1327 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
1328 struct blkg_conf_ctx ctx
;
1329 struct throtl_grp
*tg
;
1330 struct throtl_service_queue
*sq
;
1331 struct blkcg_gq
*blkg
;
1332 struct cgroup
*pos_cgrp
;
1335 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1339 tg
= blkg_to_tg(ctx
.blkg
);
1340 sq
= &tg
->service_queue
;
1346 *(u64
*)((void *)tg
+ cft
->private) = ctx
.v
;
1348 *(unsigned int *)((void *)tg
+ cft
->private) = ctx
.v
;
1350 throtl_log(&tg
->service_queue
,
1351 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1352 tg
->bps
[READ
], tg
->bps
[WRITE
],
1353 tg
->iops
[READ
], tg
->iops
[WRITE
]);
1356 * Update has_rules[] flags for the updated tg's subtree. A tg is
1357 * considered to have rules if either the tg itself or any of its
1358 * ancestors has rules. This identifies groups without any
1359 * restrictions in the whole hierarchy and allows them to bypass
1362 tg_update_has_rules(tg
);
1363 blkg_for_each_descendant_pre(blkg
, pos_cgrp
, ctx
.blkg
)
1364 tg_update_has_rules(blkg_to_tg(blkg
));
1367 * We're already holding queue_lock and know @tg is valid. Let's
1368 * apply the new config directly.
1370 * Restart the slices for both READ and WRITES. It might happen
1371 * that a group's limit are dropped suddenly and we don't want to
1372 * account recently dispatched IO with new low rate.
1374 throtl_start_new_slice(tg
, 0);
1375 throtl_start_new_slice(tg
, 1);
1377 if (tg
->flags
& THROTL_TG_PENDING
) {
1378 tg_update_disptime(tg
);
1379 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1382 blkg_conf_finish(&ctx
);
1386 static int tg_set_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
1389 return tg_set_conf(cgrp
, cft
, buf
, true);
1392 static int tg_set_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
1395 return tg_set_conf(cgrp
, cft
, buf
, false);
1398 static struct cftype throtl_files
[] = {
1400 .name
= "throttle.read_bps_device",
1401 .private = offsetof(struct throtl_grp
, bps
[READ
]),
1402 .read_seq_string
= tg_print_conf_u64
,
1403 .write_string
= tg_set_conf_u64
,
1404 .max_write_len
= 256,
1407 .name
= "throttle.write_bps_device",
1408 .private = offsetof(struct throtl_grp
, bps
[WRITE
]),
1409 .read_seq_string
= tg_print_conf_u64
,
1410 .write_string
= tg_set_conf_u64
,
1411 .max_write_len
= 256,
1414 .name
= "throttle.read_iops_device",
1415 .private = offsetof(struct throtl_grp
, iops
[READ
]),
1416 .read_seq_string
= tg_print_conf_uint
,
1417 .write_string
= tg_set_conf_uint
,
1418 .max_write_len
= 256,
1421 .name
= "throttle.write_iops_device",
1422 .private = offsetof(struct throtl_grp
, iops
[WRITE
]),
1423 .read_seq_string
= tg_print_conf_uint
,
1424 .write_string
= tg_set_conf_uint
,
1425 .max_write_len
= 256,
1428 .name
= "throttle.io_service_bytes",
1429 .private = offsetof(struct tg_stats_cpu
, service_bytes
),
1430 .read_seq_string
= tg_print_cpu_rwstat
,
1433 .name
= "throttle.io_serviced",
1434 .private = offsetof(struct tg_stats_cpu
, serviced
),
1435 .read_seq_string
= tg_print_cpu_rwstat
,
1440 static void throtl_shutdown_wq(struct request_queue
*q
)
1442 struct throtl_data
*td
= q
->td
;
1444 cancel_work_sync(&td
->dispatch_work
);
1447 static struct blkcg_policy blkcg_policy_throtl
= {
1448 .pd_size
= sizeof(struct throtl_grp
),
1449 .cftypes
= throtl_files
,
1451 .pd_init_fn
= throtl_pd_init
,
1452 .pd_online_fn
= throtl_pd_online
,
1453 .pd_exit_fn
= throtl_pd_exit
,
1454 .pd_reset_stats_fn
= throtl_pd_reset_stats
,
1457 bool blk_throtl_bio(struct request_queue
*q
, struct bio
*bio
)
1459 struct throtl_data
*td
= q
->td
;
1460 struct throtl_qnode
*qn
= NULL
;
1461 struct throtl_grp
*tg
;
1462 struct throtl_service_queue
*sq
;
1463 bool rw
= bio_data_dir(bio
);
1464 struct blkcg
*blkcg
;
1465 bool throttled
= false;
1467 /* see throtl_charge_bio() */
1468 if (bio
->bi_rw
& REQ_THROTTLED
)
1472 * A throtl_grp pointer retrieved under rcu can be used to access
1473 * basic fields like stats and io rates. If a group has no rules,
1474 * just update the dispatch stats in lockless manner and return.
1477 blkcg
= bio_blkcg(bio
);
1478 tg
= throtl_lookup_tg(td
, blkcg
);
1480 if (!tg
->has_rules
[rw
]) {
1481 throtl_update_dispatch_stats(tg_to_blkg(tg
),
1482 bio
->bi_size
, bio
->bi_rw
);
1483 goto out_unlock_rcu
;
1488 * Either group has not been allocated yet or it is not an unlimited
1491 spin_lock_irq(q
->queue_lock
);
1492 tg
= throtl_lookup_create_tg(td
, blkcg
);
1496 sq
= &tg
->service_queue
;
1499 /* throtl is FIFO - if bios are already queued, should queue */
1500 if (sq
->nr_queued
[rw
])
1503 /* if above limits, break to queue */
1504 if (!tg_may_dispatch(tg
, bio
, NULL
))
1507 /* within limits, let's charge and dispatch directly */
1508 throtl_charge_bio(tg
, bio
);
1511 * We need to trim slice even when bios are not being queued
1512 * otherwise it might happen that a bio is not queued for
1513 * a long time and slice keeps on extending and trim is not
1514 * called for a long time. Now if limits are reduced suddenly
1515 * we take into account all the IO dispatched so far at new
1516 * low rate and * newly queued IO gets a really long dispatch
1519 * So keep on trimming slice even if bio is not queued.
1521 throtl_trim_slice(tg
, rw
);
1524 * @bio passed through this layer without being throttled.
1525 * Climb up the ladder. If we''re already at the top, it
1526 * can be executed directly.
1528 qn
= &tg
->qnode_on_parent
[rw
];
1535 /* out-of-limit, queue to @tg */
1536 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1537 rw
== READ
? 'R' : 'W',
1538 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
1539 tg
->io_disp
[rw
], tg
->iops
[rw
],
1540 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1542 bio_associate_current(bio
);
1543 tg
->td
->nr_queued
[rw
]++;
1544 throtl_add_bio_tg(bio
, qn
, tg
);
1548 * Update @tg's dispatch time and force schedule dispatch if @tg
1549 * was empty before @bio. The forced scheduling isn't likely to
1550 * cause undue delay as @bio is likely to be dispatched directly if
1551 * its @tg's disptime is not in the future.
1553 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1554 tg_update_disptime(tg
);
1555 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
1559 spin_unlock_irq(q
->queue_lock
);
1564 * As multiple blk-throtls may stack in the same issue path, we
1565 * don't want bios to leave with the flag set. Clear the flag if
1569 bio
->bi_rw
&= ~REQ_THROTTLED
;
1574 * Dispatch all bios from all children tg's queued on @parent_sq. On
1575 * return, @parent_sq is guaranteed to not have any active children tg's
1576 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1578 static void tg_drain_bios(struct throtl_service_queue
*parent_sq
)
1580 struct throtl_grp
*tg
;
1582 while ((tg
= throtl_rb_first(parent_sq
))) {
1583 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1586 throtl_dequeue_tg(tg
);
1588 while ((bio
= throtl_peek_queued(&sq
->queued
[READ
])))
1589 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1590 while ((bio
= throtl_peek_queued(&sq
->queued
[WRITE
])))
1591 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1596 * blk_throtl_drain - drain throttled bios
1597 * @q: request_queue to drain throttled bios for
1599 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1601 void blk_throtl_drain(struct request_queue
*q
)
1602 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1604 struct throtl_data
*td
= q
->td
;
1605 struct blkcg_gq
*blkg
;
1606 struct cgroup
*pos_cgrp
;
1610 queue_lockdep_assert_held(q
);
1614 * Drain each tg while doing post-order walk on the blkg tree, so
1615 * that all bios are propagated to td->service_queue. It'd be
1616 * better to walk service_queue tree directly but blkg walk is
1619 blkg_for_each_descendant_post(blkg
, pos_cgrp
, td
->queue
->root_blkg
)
1620 tg_drain_bios(&blkg_to_tg(blkg
)->service_queue
);
1622 tg_drain_bios(&td_root_tg(td
)->service_queue
);
1624 /* finally, transfer bios from top-level tg's into the td */
1625 tg_drain_bios(&td
->service_queue
);
1628 spin_unlock_irq(q
->queue_lock
);
1630 /* all bios now should be in td->service_queue, issue them */
1631 for (rw
= READ
; rw
<= WRITE
; rw
++)
1632 while ((bio
= throtl_pop_queued(&td
->service_queue
.queued
[rw
],
1634 generic_make_request(bio
);
1636 spin_lock_irq(q
->queue_lock
);
1639 int blk_throtl_init(struct request_queue
*q
)
1641 struct throtl_data
*td
;
1644 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1648 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
1649 throtl_service_queue_init(&td
->service_queue
, NULL
);
1654 /* activate policy */
1655 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
1661 void blk_throtl_exit(struct request_queue
*q
)
1664 throtl_shutdown_wq(q
);
1665 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
1669 static int __init
throtl_init(void)
1671 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1672 if (!kthrotld_workqueue
)
1673 panic("Failed to create kthrotld\n");
1675 return blkcg_policy_register(&blkcg_policy_throtl
);
1678 module_init(throtl_init
);