2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl
;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct
*kthrotld_workqueue
;
29 struct throtl_service_queue
{
30 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
33 * Bios queued directly to this service_queue or dispatched from
34 * children throtl_grp's.
36 struct bio_list bio_lists
[2]; /* queued bios [READ/WRITE] */
37 unsigned int nr_queued
[2]; /* number of queued bios */
40 * RB tree of active children throtl_grp's, which are sorted by
43 struct rb_root pending_tree
; /* RB tree of active tgs */
44 struct rb_node
*first_pending
; /* first node in the tree */
45 unsigned int nr_pending
; /* # queued in the tree */
46 unsigned long first_pending_disptime
; /* disptime of the first tg */
47 struct timer_list pending_timer
; /* fires on first_pending_disptime */
51 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
52 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
55 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
57 /* Per-cpu group stats */
59 /* total bytes transferred */
60 struct blkg_rwstat service_bytes
;
61 /* total IOs serviced, post merge */
62 struct blkg_rwstat serviced
;
66 /* must be the first member */
67 struct blkg_policy_data pd
;
69 /* active throtl group service_queue member */
70 struct rb_node rb_node
;
72 /* throtl_data this group belongs to */
73 struct throtl_data
*td
;
75 /* this group's service queue */
76 struct throtl_service_queue service_queue
;
79 * Dispatch time in jiffies. This is the estimated time when group
80 * will unthrottle and is ready to dispatch more bio. It is used as
81 * key to sort active groups in service tree.
83 unsigned long disptime
;
87 /* bytes per second rate limits */
93 /* Number of bytes disptached in current slice */
94 uint64_t bytes_disp
[2];
95 /* Number of bio's dispatched in current slice */
96 unsigned int io_disp
[2];
98 /* When did we start a new slice */
99 unsigned long slice_start
[2];
100 unsigned long slice_end
[2];
102 /* Per cpu stats pointer */
103 struct tg_stats_cpu __percpu
*stats_cpu
;
105 /* List of tgs waiting for per cpu stats memory to be allocated */
106 struct list_head stats_alloc_node
;
111 /* service tree for active throtl groups */
112 struct throtl_service_queue service_queue
;
114 struct request_queue
*queue
;
116 /* Total Number of queued bios on READ and WRITE lists */
117 unsigned int nr_queued
[2];
120 * number of total undestroyed groups
122 unsigned int nr_undestroyed_grps
;
124 /* Work for dispatching throttled bios */
125 struct work_struct dispatch_work
;
128 /* list and work item to allocate percpu group stats */
129 static DEFINE_SPINLOCK(tg_stats_alloc_lock
);
130 static LIST_HEAD(tg_stats_alloc_list
);
132 static void tg_stats_alloc_fn(struct work_struct
*);
133 static DECLARE_DELAYED_WORK(tg_stats_alloc_work
, tg_stats_alloc_fn
);
135 static void throtl_pending_timer_fn(unsigned long arg
);
137 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
139 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
142 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
144 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
147 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
149 return pd_to_blkg(&tg
->pd
);
152 static inline struct throtl_grp
*td_root_tg(struct throtl_data
*td
)
154 return blkg_to_tg(td
->queue
->root_blkg
);
158 * sq_to_tg - return the throl_grp the specified service queue belongs to
159 * @sq: the throtl_service_queue of interest
161 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
162 * embedded in throtl_data, %NULL is returned.
164 static struct throtl_grp
*sq_to_tg(struct throtl_service_queue
*sq
)
166 if (sq
&& sq
->parent_sq
)
167 return container_of(sq
, struct throtl_grp
, service_queue
);
173 * sq_to_td - return throtl_data the specified service queue belongs to
174 * @sq: the throtl_service_queue of interest
176 * A service_queue can be embeded in either a throtl_grp or throtl_data.
177 * Determine the associated throtl_data accordingly and return it.
179 static struct throtl_data
*sq_to_td(struct throtl_service_queue
*sq
)
181 struct throtl_grp
*tg
= sq_to_tg(sq
);
186 return container_of(sq
, struct throtl_data
, service_queue
);
190 * throtl_log - log debug message via blktrace
191 * @sq: the service_queue being reported
192 * @fmt: printf format string
195 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
196 * throtl_grp; otherwise, just "throtl".
198 * TODO: this should be made a function and name formatting should happen
199 * after testing whether blktrace is enabled.
201 #define throtl_log(sq, fmt, args...) do { \
202 struct throtl_grp *__tg = sq_to_tg((sq)); \
203 struct throtl_data *__td = sq_to_td((sq)); \
209 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
210 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
212 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
217 * Worker for allocating per cpu stat for tgs. This is scheduled on the
218 * system_wq once there are some groups on the alloc_list waiting for
221 static void tg_stats_alloc_fn(struct work_struct
*work
)
223 static struct tg_stats_cpu
*stats_cpu
; /* this fn is non-reentrant */
224 struct delayed_work
*dwork
= to_delayed_work(work
);
229 stats_cpu
= alloc_percpu(struct tg_stats_cpu
);
231 /* allocation failed, try again after some time */
232 schedule_delayed_work(dwork
, msecs_to_jiffies(10));
237 spin_lock_irq(&tg_stats_alloc_lock
);
239 if (!list_empty(&tg_stats_alloc_list
)) {
240 struct throtl_grp
*tg
= list_first_entry(&tg_stats_alloc_list
,
243 swap(tg
->stats_cpu
, stats_cpu
);
244 list_del_init(&tg
->stats_alloc_node
);
247 empty
= list_empty(&tg_stats_alloc_list
);
248 spin_unlock_irq(&tg_stats_alloc_lock
);
253 /* init a service_queue, assumes the caller zeroed it */
254 static void throtl_service_queue_init(struct throtl_service_queue
*sq
,
255 struct throtl_service_queue
*parent_sq
)
257 bio_list_init(&sq
->bio_lists
[0]);
258 bio_list_init(&sq
->bio_lists
[1]);
259 sq
->pending_tree
= RB_ROOT
;
260 sq
->parent_sq
= parent_sq
;
261 setup_timer(&sq
->pending_timer
, throtl_pending_timer_fn
,
265 static void throtl_service_queue_exit(struct throtl_service_queue
*sq
)
267 del_timer_sync(&sq
->pending_timer
);
270 static void throtl_pd_init(struct blkcg_gq
*blkg
)
272 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
273 struct throtl_data
*td
= blkg
->q
->td
;
276 throtl_service_queue_init(&tg
->service_queue
, &td
->service_queue
);
277 RB_CLEAR_NODE(&tg
->rb_node
);
283 tg
->iops
[WRITE
] = -1;
286 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
287 * but percpu allocator can't be called from IO path. Queue tg on
288 * tg_stats_alloc_list and allocate from work item.
290 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
291 list_add(&tg
->stats_alloc_node
, &tg_stats_alloc_list
);
292 schedule_delayed_work(&tg_stats_alloc_work
, 0);
293 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
296 static void throtl_pd_exit(struct blkcg_gq
*blkg
)
298 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
301 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
302 list_del_init(&tg
->stats_alloc_node
);
303 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
305 free_percpu(tg
->stats_cpu
);
307 throtl_service_queue_exit(&tg
->service_queue
);
310 static void throtl_pd_reset_stats(struct blkcg_gq
*blkg
)
312 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
315 if (tg
->stats_cpu
== NULL
)
318 for_each_possible_cpu(cpu
) {
319 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
321 blkg_rwstat_reset(&sc
->service_bytes
);
322 blkg_rwstat_reset(&sc
->serviced
);
326 static struct throtl_grp
*throtl_lookup_tg(struct throtl_data
*td
,
330 * This is the common case when there are no blkcgs. Avoid lookup
333 if (blkcg
== &blkcg_root
)
334 return td_root_tg(td
);
336 return blkg_to_tg(blkg_lookup(blkcg
, td
->queue
));
339 static struct throtl_grp
*throtl_lookup_create_tg(struct throtl_data
*td
,
342 struct request_queue
*q
= td
->queue
;
343 struct throtl_grp
*tg
= NULL
;
346 * This is the common case when there are no blkcgs. Avoid lookup
349 if (blkcg
== &blkcg_root
) {
352 struct blkcg_gq
*blkg
;
354 blkg
= blkg_lookup_create(blkcg
, q
);
356 /* if %NULL and @q is alive, fall back to root_tg */
358 tg
= blkg_to_tg(blkg
);
359 else if (!blk_queue_dying(q
))
366 static struct throtl_grp
*
367 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
369 /* Service tree is empty */
370 if (!parent_sq
->nr_pending
)
373 if (!parent_sq
->first_pending
)
374 parent_sq
->first_pending
= rb_first(&parent_sq
->pending_tree
);
376 if (parent_sq
->first_pending
)
377 return rb_entry_tg(parent_sq
->first_pending
);
382 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
388 static void throtl_rb_erase(struct rb_node
*n
,
389 struct throtl_service_queue
*parent_sq
)
391 if (parent_sq
->first_pending
== n
)
392 parent_sq
->first_pending
= NULL
;
393 rb_erase_init(n
, &parent_sq
->pending_tree
);
394 --parent_sq
->nr_pending
;
397 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
399 struct throtl_grp
*tg
;
401 tg
= throtl_rb_first(parent_sq
);
405 parent_sq
->first_pending_disptime
= tg
->disptime
;
408 static void tg_service_queue_add(struct throtl_grp
*tg
)
410 struct throtl_service_queue
*parent_sq
= tg
->service_queue
.parent_sq
;
411 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_node
;
412 struct rb_node
*parent
= NULL
;
413 struct throtl_grp
*__tg
;
414 unsigned long key
= tg
->disptime
;
417 while (*node
!= NULL
) {
419 __tg
= rb_entry_tg(parent
);
421 if (time_before(key
, __tg
->disptime
))
422 node
= &parent
->rb_left
;
424 node
= &parent
->rb_right
;
430 parent_sq
->first_pending
= &tg
->rb_node
;
432 rb_link_node(&tg
->rb_node
, parent
, node
);
433 rb_insert_color(&tg
->rb_node
, &parent_sq
->pending_tree
);
436 static void __throtl_enqueue_tg(struct throtl_grp
*tg
)
438 tg_service_queue_add(tg
);
439 tg
->flags
|= THROTL_TG_PENDING
;
440 tg
->service_queue
.parent_sq
->nr_pending
++;
443 static void throtl_enqueue_tg(struct throtl_grp
*tg
)
445 if (!(tg
->flags
& THROTL_TG_PENDING
))
446 __throtl_enqueue_tg(tg
);
449 static void __throtl_dequeue_tg(struct throtl_grp
*tg
)
451 throtl_rb_erase(&tg
->rb_node
, tg
->service_queue
.parent_sq
);
452 tg
->flags
&= ~THROTL_TG_PENDING
;
455 static void throtl_dequeue_tg(struct throtl_grp
*tg
)
457 if (tg
->flags
& THROTL_TG_PENDING
)
458 __throtl_dequeue_tg(tg
);
461 /* Call with queue lock held */
462 static void throtl_schedule_pending_timer(struct throtl_service_queue
*sq
,
463 unsigned long expires
)
465 mod_timer(&sq
->pending_timer
, expires
);
466 throtl_log(sq
, "schedule timer. delay=%lu jiffies=%lu",
467 expires
- jiffies
, jiffies
);
471 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
472 * @sq: the service_queue to schedule dispatch for
473 * @force: force scheduling
475 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
476 * dispatch time of the first pending child. Returns %true if either timer
477 * is armed or there's no pending child left. %false if the current
478 * dispatch window is still open and the caller should continue
481 * If @force is %true, the dispatch timer is always scheduled and this
482 * function is guaranteed to return %true. This is to be used when the
483 * caller can't dispatch itself and needs to invoke pending_timer
484 * unconditionally. Note that forced scheduling is likely to induce short
485 * delay before dispatch starts even if @sq->first_pending_disptime is not
486 * in the future and thus shouldn't be used in hot paths.
488 static bool throtl_schedule_next_dispatch(struct throtl_service_queue
*sq
,
491 /* any pending children left? */
495 update_min_dispatch_time(sq
);
497 /* is the next dispatch time in the future? */
498 if (force
|| time_after(sq
->first_pending_disptime
, jiffies
)) {
499 throtl_schedule_pending_timer(sq
, sq
->first_pending_disptime
);
503 /* tell the caller to continue dispatching */
507 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
509 tg
->bytes_disp
[rw
] = 0;
511 tg
->slice_start
[rw
] = jiffies
;
512 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
513 throtl_log(&tg
->service_queue
,
514 "[%c] new slice start=%lu end=%lu jiffies=%lu",
515 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
516 tg
->slice_end
[rw
], jiffies
);
519 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
520 unsigned long jiffy_end
)
522 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
525 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
526 unsigned long jiffy_end
)
528 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
529 throtl_log(&tg
->service_queue
,
530 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
531 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
532 tg
->slice_end
[rw
], jiffies
);
535 /* Determine if previously allocated or extended slice is complete or not */
536 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
538 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
544 /* Trim the used slices and adjust slice start accordingly */
545 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
547 unsigned long nr_slices
, time_elapsed
, io_trim
;
550 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
553 * If bps are unlimited (-1), then time slice don't get
554 * renewed. Don't try to trim the slice if slice is used. A new
555 * slice will start when appropriate.
557 if (throtl_slice_used(tg
, rw
))
561 * A bio has been dispatched. Also adjust slice_end. It might happen
562 * that initially cgroup limit was very low resulting in high
563 * slice_end, but later limit was bumped up and bio was dispached
564 * sooner, then we need to reduce slice_end. A high bogus slice_end
565 * is bad because it does not allow new slice to start.
568 throtl_set_slice_end(tg
, rw
, jiffies
+ throtl_slice
);
570 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
572 nr_slices
= time_elapsed
/ throtl_slice
;
576 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
580 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
582 if (!bytes_trim
&& !io_trim
)
585 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
586 tg
->bytes_disp
[rw
] -= bytes_trim
;
588 tg
->bytes_disp
[rw
] = 0;
590 if (tg
->io_disp
[rw
] >= io_trim
)
591 tg
->io_disp
[rw
] -= io_trim
;
595 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
597 throtl_log(&tg
->service_queue
,
598 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
599 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
600 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
603 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
606 bool rw
= bio_data_dir(bio
);
607 unsigned int io_allowed
;
608 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
611 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
613 /* Slice has just started. Consider one slice interval */
615 jiffy_elapsed_rnd
= throtl_slice
;
617 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
620 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
621 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
622 * will allow dispatch after 1 second and after that slice should
626 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
630 io_allowed
= UINT_MAX
;
634 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
640 /* Calc approx time to dispatch */
641 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
643 if (jiffy_wait
> jiffy_elapsed
)
644 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
653 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
656 bool rw
= bio_data_dir(bio
);
657 u64 bytes_allowed
, extra_bytes
, tmp
;
658 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
660 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
662 /* Slice has just started. Consider one slice interval */
664 jiffy_elapsed_rnd
= throtl_slice
;
666 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
668 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
672 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
678 /* Calc approx time to dispatch */
679 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
680 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
686 * This wait time is without taking into consideration the rounding
687 * up we did. Add that time also.
689 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
695 static bool tg_no_rule_group(struct throtl_grp
*tg
, bool rw
) {
696 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1)
702 * Returns whether one can dispatch a bio or not. Also returns approx number
703 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
705 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
708 bool rw
= bio_data_dir(bio
);
709 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
712 * Currently whole state machine of group depends on first bio
713 * queued in the group bio list. So one should not be calling
714 * this function with a different bio if there are other bios
717 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
718 bio
!= bio_list_peek(&tg
->service_queue
.bio_lists
[rw
]));
720 /* If tg->bps = -1, then BW is unlimited */
721 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
728 * If previous slice expired, start a new one otherwise renew/extend
729 * existing slice to make sure it is at least throtl_slice interval
732 if (throtl_slice_used(tg
, rw
))
733 throtl_start_new_slice(tg
, rw
);
735 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
736 throtl_extend_slice(tg
, rw
, jiffies
+ throtl_slice
);
739 if (tg_with_in_bps_limit(tg
, bio
, &bps_wait
) &&
740 tg_with_in_iops_limit(tg
, bio
, &iops_wait
)) {
746 max_wait
= max(bps_wait
, iops_wait
);
751 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
752 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
757 static void throtl_update_dispatch_stats(struct blkcg_gq
*blkg
, u64 bytes
,
760 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
761 struct tg_stats_cpu
*stats_cpu
;
764 /* If per cpu stats are not allocated yet, don't do any accounting. */
765 if (tg
->stats_cpu
== NULL
)
769 * Disabling interrupts to provide mutual exclusion between two
770 * writes on same cpu. It probably is not needed for 64bit. Not
771 * optimizing that case yet.
773 local_irq_save(flags
);
775 stats_cpu
= this_cpu_ptr(tg
->stats_cpu
);
777 blkg_rwstat_add(&stats_cpu
->serviced
, rw
, 1);
778 blkg_rwstat_add(&stats_cpu
->service_bytes
, rw
, bytes
);
780 local_irq_restore(flags
);
783 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
785 bool rw
= bio_data_dir(bio
);
787 /* Charge the bio to the group */
788 tg
->bytes_disp
[rw
] += bio
->bi_size
;
792 * REQ_THROTTLED is used to prevent the same bio to be throttled
793 * more than once as a throttled bio will go through blk-throtl the
794 * second time when it eventually gets issued. Set it when a bio
795 * is being charged to a tg.
797 * Dispatch stats aren't recursive and each @bio should only be
798 * accounted by the @tg it was originally associated with. Let's
799 * update the stats when setting REQ_THROTTLED for the first time
800 * which is guaranteed to be for the @bio's original tg.
802 if (!(bio
->bi_rw
& REQ_THROTTLED
)) {
803 bio
->bi_rw
|= REQ_THROTTLED
;
804 throtl_update_dispatch_stats(tg_to_blkg(tg
), bio
->bi_size
,
809 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_grp
*tg
)
811 struct throtl_service_queue
*sq
= &tg
->service_queue
;
812 bool rw
= bio_data_dir(bio
);
815 * If @tg doesn't currently have any bios queued in the same
816 * direction, queueing @bio can change when @tg should be
817 * dispatched. Mark that @tg was empty. This is automatically
818 * cleaered on the next tg_update_disptime().
820 if (!sq
->nr_queued
[rw
])
821 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
823 bio_list_add(&sq
->bio_lists
[rw
], bio
);
824 /* Take a bio reference on tg */
825 blkg_get(tg_to_blkg(tg
));
827 throtl_enqueue_tg(tg
);
830 static void tg_update_disptime(struct throtl_grp
*tg
)
832 struct throtl_service_queue
*sq
= &tg
->service_queue
;
833 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
836 if ((bio
= bio_list_peek(&sq
->bio_lists
[READ
])))
837 tg_may_dispatch(tg
, bio
, &read_wait
);
839 if ((bio
= bio_list_peek(&sq
->bio_lists
[WRITE
])))
840 tg_may_dispatch(tg
, bio
, &write_wait
);
842 min_wait
= min(read_wait
, write_wait
);
843 disptime
= jiffies
+ min_wait
;
845 /* Update dispatch time */
846 throtl_dequeue_tg(tg
);
847 tg
->disptime
= disptime
;
848 throtl_enqueue_tg(tg
);
850 /* see throtl_add_bio_tg() */
851 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
854 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
)
856 struct throtl_service_queue
*sq
= &tg
->service_queue
;
857 struct throtl_service_queue
*parent_sq
= sq
->parent_sq
;
858 struct throtl_grp
*parent_tg
= sq_to_tg(parent_sq
);
861 bio
= bio_list_pop(&sq
->bio_lists
[rw
]);
864 throtl_charge_bio(tg
, bio
);
867 * If our parent is another tg, we just need to transfer @bio to
868 * the parent using throtl_add_bio_tg(). If our parent is
869 * @td->service_queue, @bio is ready to be issued. Put it on its
870 * bio_lists[] and decrease total number queued. The caller is
871 * responsible for issuing these bios.
874 throtl_add_bio_tg(bio
, parent_tg
);
876 bio_list_add(&parent_sq
->bio_lists
[rw
], bio
);
877 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
878 tg
->td
->nr_queued
[rw
]--;
881 throtl_trim_slice(tg
, rw
);
883 /* @bio is transferred to parent, drop its blkg reference */
884 blkg_put(tg_to_blkg(tg
));
887 static int throtl_dispatch_tg(struct throtl_grp
*tg
)
889 struct throtl_service_queue
*sq
= &tg
->service_queue
;
890 unsigned int nr_reads
= 0, nr_writes
= 0;
891 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
892 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
895 /* Try to dispatch 75% READS and 25% WRITES */
897 while ((bio
= bio_list_peek(&sq
->bio_lists
[READ
])) &&
898 tg_may_dispatch(tg
, bio
, NULL
)) {
900 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
903 if (nr_reads
>= max_nr_reads
)
907 while ((bio
= bio_list_peek(&sq
->bio_lists
[WRITE
])) &&
908 tg_may_dispatch(tg
, bio
, NULL
)) {
910 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
913 if (nr_writes
>= max_nr_writes
)
917 return nr_reads
+ nr_writes
;
920 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
922 unsigned int nr_disp
= 0;
925 struct throtl_grp
*tg
= throtl_rb_first(parent_sq
);
926 struct throtl_service_queue
*sq
= &tg
->service_queue
;
931 if (time_before(jiffies
, tg
->disptime
))
934 throtl_dequeue_tg(tg
);
936 nr_disp
+= throtl_dispatch_tg(tg
);
938 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
939 tg_update_disptime(tg
);
941 if (nr_disp
>= throtl_quantum
)
949 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
950 * @arg: the throtl_service_queue being serviced
952 * This timer is armed when a child throtl_grp with active bio's become
953 * pending and queued on the service_queue's pending_tree and expires when
954 * the first child throtl_grp should be dispatched. This function
955 * dispatches bio's from the children throtl_grps and kicks
956 * throtl_data->dispatch_work if there are bio's ready to be issued.
958 static void throtl_pending_timer_fn(unsigned long arg
)
960 struct throtl_service_queue
*sq
= (void *)arg
;
961 struct throtl_data
*td
= sq_to_td(sq
);
962 struct request_queue
*q
= td
->queue
;
963 bool dispatched
= false;
966 spin_lock_irq(q
->queue_lock
);
969 throtl_log(sq
, "dispatch nr_queued=%u read=%u write=%u",
970 td
->nr_queued
[READ
] + td
->nr_queued
[WRITE
],
971 td
->nr_queued
[READ
], td
->nr_queued
[WRITE
]);
973 ret
= throtl_select_dispatch(sq
);
975 throtl_log(sq
, "bios disp=%u", ret
);
979 if (throtl_schedule_next_dispatch(sq
, false))
982 /* this dispatch windows is still open, relax and repeat */
983 spin_unlock_irq(q
->queue_lock
);
985 spin_lock_irq(q
->queue_lock
);
989 queue_work(kthrotld_workqueue
, &td
->dispatch_work
);
991 spin_unlock_irq(q
->queue_lock
);
995 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
996 * @work: work item being executed
998 * This function is queued for execution when bio's reach the bio_lists[]
999 * of throtl_data->service_queue. Those bio's are ready and issued by this
1002 void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
1004 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
1006 struct throtl_service_queue
*td_sq
= &td
->service_queue
;
1007 struct request_queue
*q
= td
->queue
;
1008 struct bio_list bio_list_on_stack
;
1010 struct blk_plug plug
;
1013 bio_list_init(&bio_list_on_stack
);
1015 spin_lock_irq(q
->queue_lock
);
1016 for (rw
= READ
; rw
<= WRITE
; rw
++) {
1017 bio_list_merge(&bio_list_on_stack
, &td_sq
->bio_lists
[rw
]);
1018 bio_list_init(&td_sq
->bio_lists
[rw
]);
1020 spin_unlock_irq(q
->queue_lock
);
1022 if (!bio_list_empty(&bio_list_on_stack
)) {
1023 blk_start_plug(&plug
);
1024 while((bio
= bio_list_pop(&bio_list_on_stack
)))
1025 generic_make_request(bio
);
1026 blk_finish_plug(&plug
);
1030 static u64
tg_prfill_cpu_rwstat(struct seq_file
*sf
,
1031 struct blkg_policy_data
*pd
, int off
)
1033 struct throtl_grp
*tg
= pd_to_tg(pd
);
1034 struct blkg_rwstat rwstat
= { }, tmp
;
1037 for_each_possible_cpu(cpu
) {
1038 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
1040 tmp
= blkg_rwstat_read((void *)sc
+ off
);
1041 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
1042 rwstat
.cnt
[i
] += tmp
.cnt
[i
];
1045 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
1048 static int tg_print_cpu_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
1049 struct seq_file
*sf
)
1051 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
1053 blkcg_print_blkgs(sf
, blkcg
, tg_prfill_cpu_rwstat
, &blkcg_policy_throtl
,
1054 cft
->private, true);
1058 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1061 struct throtl_grp
*tg
= pd_to_tg(pd
);
1062 u64 v
= *(u64
*)((void *)tg
+ off
);
1066 return __blkg_prfill_u64(sf
, pd
, v
);
1069 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
1072 struct throtl_grp
*tg
= pd_to_tg(pd
);
1073 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
1077 return __blkg_prfill_u64(sf
, pd
, v
);
1080 static int tg_print_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
1081 struct seq_file
*sf
)
1083 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_u64
,
1084 &blkcg_policy_throtl
, cft
->private, false);
1088 static int tg_print_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
1089 struct seq_file
*sf
)
1091 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_uint
,
1092 &blkcg_policy_throtl
, cft
->private, false);
1096 static int tg_set_conf(struct cgroup
*cgrp
, struct cftype
*cft
, const char *buf
,
1099 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
1100 struct blkg_conf_ctx ctx
;
1101 struct throtl_grp
*tg
;
1102 struct throtl_service_queue
*sq
;
1105 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
1109 tg
= blkg_to_tg(ctx
.blkg
);
1110 sq
= &tg
->service_queue
;
1116 *(u64
*)((void *)tg
+ cft
->private) = ctx
.v
;
1118 *(unsigned int *)((void *)tg
+ cft
->private) = ctx
.v
;
1120 throtl_log(&tg
->service_queue
,
1121 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1122 tg
->bps
[READ
], tg
->bps
[WRITE
],
1123 tg
->iops
[READ
], tg
->iops
[WRITE
]);
1126 * We're already holding queue_lock and know @tg is valid. Let's
1127 * apply the new config directly.
1129 * Restart the slices for both READ and WRITES. It might happen
1130 * that a group's limit are dropped suddenly and we don't want to
1131 * account recently dispatched IO with new low rate.
1133 throtl_start_new_slice(tg
, 0);
1134 throtl_start_new_slice(tg
, 1);
1136 if (tg
->flags
& THROTL_TG_PENDING
) {
1137 tg_update_disptime(tg
);
1138 throtl_schedule_next_dispatch(sq
->parent_sq
, true);
1141 blkg_conf_finish(&ctx
);
1145 static int tg_set_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
1148 return tg_set_conf(cgrp
, cft
, buf
, true);
1151 static int tg_set_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
1154 return tg_set_conf(cgrp
, cft
, buf
, false);
1157 static struct cftype throtl_files
[] = {
1159 .name
= "throttle.read_bps_device",
1160 .private = offsetof(struct throtl_grp
, bps
[READ
]),
1161 .read_seq_string
= tg_print_conf_u64
,
1162 .write_string
= tg_set_conf_u64
,
1163 .max_write_len
= 256,
1166 .name
= "throttle.write_bps_device",
1167 .private = offsetof(struct throtl_grp
, bps
[WRITE
]),
1168 .read_seq_string
= tg_print_conf_u64
,
1169 .write_string
= tg_set_conf_u64
,
1170 .max_write_len
= 256,
1173 .name
= "throttle.read_iops_device",
1174 .private = offsetof(struct throtl_grp
, iops
[READ
]),
1175 .read_seq_string
= tg_print_conf_uint
,
1176 .write_string
= tg_set_conf_uint
,
1177 .max_write_len
= 256,
1180 .name
= "throttle.write_iops_device",
1181 .private = offsetof(struct throtl_grp
, iops
[WRITE
]),
1182 .read_seq_string
= tg_print_conf_uint
,
1183 .write_string
= tg_set_conf_uint
,
1184 .max_write_len
= 256,
1187 .name
= "throttle.io_service_bytes",
1188 .private = offsetof(struct tg_stats_cpu
, service_bytes
),
1189 .read_seq_string
= tg_print_cpu_rwstat
,
1192 .name
= "throttle.io_serviced",
1193 .private = offsetof(struct tg_stats_cpu
, serviced
),
1194 .read_seq_string
= tg_print_cpu_rwstat
,
1199 static void throtl_shutdown_wq(struct request_queue
*q
)
1201 struct throtl_data
*td
= q
->td
;
1203 cancel_work_sync(&td
->dispatch_work
);
1206 static struct blkcg_policy blkcg_policy_throtl
= {
1207 .pd_size
= sizeof(struct throtl_grp
),
1208 .cftypes
= throtl_files
,
1210 .pd_init_fn
= throtl_pd_init
,
1211 .pd_exit_fn
= throtl_pd_exit
,
1212 .pd_reset_stats_fn
= throtl_pd_reset_stats
,
1215 bool blk_throtl_bio(struct request_queue
*q
, struct bio
*bio
)
1217 struct throtl_data
*td
= q
->td
;
1218 struct throtl_grp
*tg
;
1219 struct throtl_service_queue
*sq
;
1220 bool rw
= bio_data_dir(bio
);
1221 struct blkcg
*blkcg
;
1222 bool throttled
= false;
1224 /* see throtl_charge_bio() */
1225 if (bio
->bi_rw
& REQ_THROTTLED
)
1229 * A throtl_grp pointer retrieved under rcu can be used to access
1230 * basic fields like stats and io rates. If a group has no rules,
1231 * just update the dispatch stats in lockless manner and return.
1234 blkcg
= bio_blkcg(bio
);
1235 tg
= throtl_lookup_tg(td
, blkcg
);
1237 if (tg_no_rule_group(tg
, rw
)) {
1238 throtl_update_dispatch_stats(tg_to_blkg(tg
),
1239 bio
->bi_size
, bio
->bi_rw
);
1240 goto out_unlock_rcu
;
1245 * Either group has not been allocated yet or it is not an unlimited
1248 spin_lock_irq(q
->queue_lock
);
1249 tg
= throtl_lookup_create_tg(td
, blkcg
);
1253 sq
= &tg
->service_queue
;
1256 /* throtl is FIFO - if bios are already queued, should queue */
1257 if (sq
->nr_queued
[rw
])
1260 /* if above limits, break to queue */
1261 if (!tg_may_dispatch(tg
, bio
, NULL
))
1264 /* within limits, let's charge and dispatch directly */
1265 throtl_charge_bio(tg
, bio
);
1268 * We need to trim slice even when bios are not being queued
1269 * otherwise it might happen that a bio is not queued for
1270 * a long time and slice keeps on extending and trim is not
1271 * called for a long time. Now if limits are reduced suddenly
1272 * we take into account all the IO dispatched so far at new
1273 * low rate and * newly queued IO gets a really long dispatch
1276 * So keep on trimming slice even if bio is not queued.
1278 throtl_trim_slice(tg
, rw
);
1281 * @bio passed through this layer without being throttled.
1282 * Climb up the ladder. If we''re already at the top, it
1283 * can be executed directly.
1291 /* out-of-limit, queue to @tg */
1292 throtl_log(sq
, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1293 rw
== READ
? 'R' : 'W',
1294 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
1295 tg
->io_disp
[rw
], tg
->iops
[rw
],
1296 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1298 bio_associate_current(bio
);
1299 tg
->td
->nr_queued
[rw
]++;
1300 throtl_add_bio_tg(bio
, tg
);
1304 * Update @tg's dispatch time and force schedule dispatch if @tg
1305 * was empty before @bio. The forced scheduling isn't likely to
1306 * cause undue delay as @bio is likely to be dispatched directly if
1307 * its @tg's disptime is not in the future.
1309 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1310 tg_update_disptime(tg
);
1311 throtl_schedule_next_dispatch(tg
->service_queue
.parent_sq
, true);
1315 spin_unlock_irq(q
->queue_lock
);
1320 * As multiple blk-throtls may stack in the same issue path, we
1321 * don't want bios to leave with the flag set. Clear the flag if
1325 bio
->bi_rw
&= ~REQ_THROTTLED
;
1330 * Dispatch all bios from all children tg's queued on @parent_sq. On
1331 * return, @parent_sq is guaranteed to not have any active children tg's
1332 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1334 static void tg_drain_bios(struct throtl_service_queue
*parent_sq
)
1336 struct throtl_grp
*tg
;
1338 while ((tg
= throtl_rb_first(parent_sq
))) {
1339 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1342 throtl_dequeue_tg(tg
);
1344 while ((bio
= bio_list_peek(&sq
->bio_lists
[READ
])))
1345 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1346 while ((bio
= bio_list_peek(&sq
->bio_lists
[WRITE
])))
1347 tg_dispatch_one_bio(tg
, bio_data_dir(bio
));
1352 * blk_throtl_drain - drain throttled bios
1353 * @q: request_queue to drain throttled bios for
1355 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1357 void blk_throtl_drain(struct request_queue
*q
)
1358 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1360 struct throtl_data
*td
= q
->td
;
1361 struct blkcg_gq
*blkg
;
1362 struct cgroup
*pos_cgrp
;
1366 queue_lockdep_assert_held(q
);
1370 * Drain each tg while doing post-order walk on the blkg tree, so
1371 * that all bios are propagated to td->service_queue. It'd be
1372 * better to walk service_queue tree directly but blkg walk is
1375 blkg_for_each_descendant_post(blkg
, pos_cgrp
, td
->queue
->root_blkg
)
1376 tg_drain_bios(&blkg_to_tg(blkg
)->service_queue
);
1378 tg_drain_bios(&td_root_tg(td
)->service_queue
);
1380 /* finally, transfer bios from top-level tg's into the td */
1381 tg_drain_bios(&td
->service_queue
);
1384 spin_unlock_irq(q
->queue_lock
);
1386 /* all bios now should be in td->service_queue, issue them */
1387 for (rw
= READ
; rw
<= WRITE
; rw
++)
1388 while ((bio
= bio_list_pop(&td
->service_queue
.bio_lists
[rw
])))
1389 generic_make_request(bio
);
1391 spin_lock_irq(q
->queue_lock
);
1394 int blk_throtl_init(struct request_queue
*q
)
1396 struct throtl_data
*td
;
1399 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1403 INIT_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
1404 throtl_service_queue_init(&td
->service_queue
, NULL
);
1409 /* activate policy */
1410 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
1416 void blk_throtl_exit(struct request_queue
*q
)
1419 throtl_shutdown_wq(q
);
1420 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
1424 static int __init
throtl_init(void)
1426 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1427 if (!kthrotld_workqueue
)
1428 panic("Failed to create kthrotld\n");
1430 return blkcg_policy_register(&blkcg_policy_throtl
);
1433 module_init(throtl_init
);