2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
14 /* Max dispatch from a group in 1 round */
15 static int throtl_grp_quantum
= 8;
17 /* Total max dispatch from all groups in one round */
18 static int throtl_quantum
= 32;
20 /* Throttling is performed over 100ms slice and after that slice is renewed */
21 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
23 /* A workqueue to queue throttle related work */
24 static struct workqueue_struct
*kthrotld_workqueue
;
25 static void throtl_schedule_delayed_work(struct throtl_data
*td
,
28 struct throtl_rb_root
{
32 unsigned long min_disptime
;
35 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
36 .count = 0, .min_disptime = 0}
38 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
41 /* List of throtl groups on the request queue*/
42 struct hlist_node tg_node
;
44 /* active throtl group service_tree member */
45 struct rb_node rb_node
;
48 * Dispatch time in jiffies. This is the estimated time when group
49 * will unthrottle and is ready to dispatch more bio. It is used as
50 * key to sort active groups in service tree.
52 unsigned long disptime
;
54 struct blkio_group blkg
;
58 /* Two lists for READ and WRITE */
59 struct bio_list bio_lists
[2];
61 /* Number of queued bios on READ and WRITE lists */
62 unsigned int nr_queued
[2];
64 /* bytes per second rate limits */
70 /* Number of bytes disptached in current slice */
71 uint64_t bytes_disp
[2];
72 /* Number of bio's dispatched in current slice */
73 unsigned int io_disp
[2];
75 /* When did we start a new slice */
76 unsigned long slice_start
[2];
77 unsigned long slice_end
[2];
79 /* Some throttle limits got updated for the group */
85 /* List of throtl groups */
86 struct hlist_head tg_list
;
88 /* service tree for active throtl groups */
89 struct throtl_rb_root tg_service_tree
;
91 struct throtl_grp root_tg
;
92 struct request_queue
*queue
;
94 /* Total Number of queued bios on READ and WRITE lists */
95 unsigned int nr_queued
[2];
98 * number of total undestroyed groups
100 unsigned int nr_undestroyed_grps
;
102 /* Work for dispatching throttled bios */
103 struct delayed_work throtl_work
;
108 enum tg_state_flags
{
109 THROTL_TG_FLAG_on_rr
= 0, /* on round-robin busy list */
112 #define THROTL_TG_FNS(name) \
113 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
115 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
117 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
119 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
121 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
123 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
126 THROTL_TG_FNS(on_rr
);
128 #define throtl_log_tg(td, tg, fmt, args...) \
129 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
130 blkg_path(&(tg)->blkg), ##args); \
132 #define throtl_log(td, fmt, args...) \
133 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
135 static inline struct throtl_grp
*tg_of_blkg(struct blkio_group
*blkg
)
138 return container_of(blkg
, struct throtl_grp
, blkg
);
143 static inline int total_nr_queued(struct throtl_data
*td
)
145 return (td
->nr_queued
[0] + td
->nr_queued
[1]);
148 static inline struct throtl_grp
*throtl_ref_get_tg(struct throtl_grp
*tg
)
150 atomic_inc(&tg
->ref
);
154 static void throtl_put_tg(struct throtl_grp
*tg
)
156 BUG_ON(atomic_read(&tg
->ref
) <= 0);
157 if (!atomic_dec_and_test(&tg
->ref
))
162 static void throtl_init_group(struct throtl_grp
*tg
)
164 INIT_HLIST_NODE(&tg
->tg_node
);
165 RB_CLEAR_NODE(&tg
->rb_node
);
166 bio_list_init(&tg
->bio_lists
[0]);
167 bio_list_init(&tg
->bio_lists
[1]);
168 tg
->limits_changed
= false;
170 /* Practically unlimited BW */
171 tg
->bps
[0] = tg
->bps
[1] = -1;
172 tg
->iops
[0] = tg
->iops
[1] = -1;
175 * Take the initial reference that will be released on destroy
176 * This can be thought of a joint reference by cgroup and
177 * request queue which will be dropped by either request queue
178 * exit or cgroup deletion path depending on who is exiting first.
180 atomic_set(&tg
->ref
, 1);
183 /* Should be called with rcu read lock held (needed for blkcg) */
185 throtl_add_group_to_td_list(struct throtl_data
*td
, struct throtl_grp
*tg
)
187 hlist_add_head(&tg
->tg_node
, &td
->tg_list
);
188 td
->nr_undestroyed_grps
++;
191 static void throtl_init_add_tg_lists(struct throtl_data
*td
,
192 struct throtl_grp
*tg
, struct blkio_cgroup
*blkcg
)
194 struct backing_dev_info
*bdi
= &td
->queue
->backing_dev_info
;
195 unsigned int major
, minor
;
197 /* Add group onto cgroup list */
198 sscanf(dev_name(bdi
->dev
), "%u:%u", &major
, &minor
);
199 blkiocg_add_blkio_group(blkcg
, &tg
->blkg
, (void *)td
,
200 MKDEV(major
, minor
), BLKIO_POLICY_THROTL
);
202 tg
->bps
[READ
] = blkcg_get_read_bps(blkcg
, tg
->blkg
.dev
);
203 tg
->bps
[WRITE
] = blkcg_get_write_bps(blkcg
, tg
->blkg
.dev
);
204 tg
->iops
[READ
] = blkcg_get_read_iops(blkcg
, tg
->blkg
.dev
);
205 tg
->iops
[WRITE
] = blkcg_get_write_iops(blkcg
, tg
->blkg
.dev
);
207 throtl_add_group_to_td_list(td
, tg
);
210 /* Should be called without queue lock and outside of rcu period */
211 static struct throtl_grp
*throtl_alloc_tg(struct throtl_data
*td
)
213 struct throtl_grp
*tg
= NULL
;
215 tg
= kzalloc_node(sizeof(*tg
), GFP_ATOMIC
, td
->queue
->node
);
219 throtl_init_group(tg
);
224 throtl_grp
*throtl_find_tg(struct throtl_data
*td
, struct blkio_cgroup
*blkcg
)
226 struct throtl_grp
*tg
= NULL
;
228 struct backing_dev_info
*bdi
= &td
->queue
->backing_dev_info
;
229 unsigned int major
, minor
;
232 * This is the common case when there are no blkio cgroups.
233 * Avoid lookup in this case
235 if (blkcg
== &blkio_root_cgroup
)
238 tg
= tg_of_blkg(blkiocg_lookup_group(blkcg
, key
));
240 /* Fill in device details for root group */
241 if (tg
&& !tg
->blkg
.dev
&& bdi
->dev
&& dev_name(bdi
->dev
)) {
242 sscanf(dev_name(bdi
->dev
), "%u:%u", &major
, &minor
);
243 tg
->blkg
.dev
= MKDEV(major
, minor
);
250 * This function returns with queue lock unlocked in case of error, like
251 * request queue is no more
253 static struct throtl_grp
* throtl_get_tg(struct throtl_data
*td
)
255 struct throtl_grp
*tg
= NULL
, *__tg
= NULL
;
256 struct blkio_cgroup
*blkcg
;
257 struct request_queue
*q
= td
->queue
;
260 blkcg
= task_blkio_cgroup(current
);
261 tg
= throtl_find_tg(td
, blkcg
);
268 * Need to allocate a group. Allocation of group also needs allocation
269 * of per cpu stats which in-turn takes a mutex() and can block. Hence
270 * we need to drop rcu lock and queue_lock before we call alloc
272 * Take the request queue reference to make sure queue does not
273 * go away once we return from allocation.
277 spin_unlock_irq(q
->queue_lock
);
279 tg
= throtl_alloc_tg(td
);
281 * We might have slept in group allocation. Make sure queue is not
284 if (unlikely(test_bit(QUEUE_FLAG_DEAD
, &q
->queue_flags
))) {
289 return ERR_PTR(-ENODEV
);
293 /* Group allocated and queue is still alive. take the lock */
294 spin_lock_irq(q
->queue_lock
);
297 * Initialize the new group. After sleeping, read the blkcg again.
300 blkcg
= task_blkio_cgroup(current
);
303 * If some other thread already allocated the group while we were
304 * not holding queue lock, free up the group
306 __tg
= throtl_find_tg(td
, blkcg
);
314 /* Group allocation failed. Account the IO to root group */
320 throtl_init_add_tg_lists(td
, tg
, blkcg
);
325 static struct throtl_grp
*throtl_rb_first(struct throtl_rb_root
*root
)
327 /* Service tree is empty */
332 root
->left
= rb_first(&root
->rb
);
335 return rb_entry_tg(root
->left
);
340 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
346 static void throtl_rb_erase(struct rb_node
*n
, struct throtl_rb_root
*root
)
350 rb_erase_init(n
, &root
->rb
);
354 static void update_min_dispatch_time(struct throtl_rb_root
*st
)
356 struct throtl_grp
*tg
;
358 tg
= throtl_rb_first(st
);
362 st
->min_disptime
= tg
->disptime
;
366 tg_service_tree_add(struct throtl_rb_root
*st
, struct throtl_grp
*tg
)
368 struct rb_node
**node
= &st
->rb
.rb_node
;
369 struct rb_node
*parent
= NULL
;
370 struct throtl_grp
*__tg
;
371 unsigned long key
= tg
->disptime
;
374 while (*node
!= NULL
) {
376 __tg
= rb_entry_tg(parent
);
378 if (time_before(key
, __tg
->disptime
))
379 node
= &parent
->rb_left
;
381 node
= &parent
->rb_right
;
387 st
->left
= &tg
->rb_node
;
389 rb_link_node(&tg
->rb_node
, parent
, node
);
390 rb_insert_color(&tg
->rb_node
, &st
->rb
);
393 static void __throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
395 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
397 tg_service_tree_add(st
, tg
);
398 throtl_mark_tg_on_rr(tg
);
402 static void throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
404 if (!throtl_tg_on_rr(tg
))
405 __throtl_enqueue_tg(td
, tg
);
408 static void __throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
410 throtl_rb_erase(&tg
->rb_node
, &td
->tg_service_tree
);
411 throtl_clear_tg_on_rr(tg
);
414 static void throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
416 if (throtl_tg_on_rr(tg
))
417 __throtl_dequeue_tg(td
, tg
);
420 static void throtl_schedule_next_dispatch(struct throtl_data
*td
)
422 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
425 * If there are more bios pending, schedule more work.
427 if (!total_nr_queued(td
))
432 update_min_dispatch_time(st
);
434 if (time_before_eq(st
->min_disptime
, jiffies
))
435 throtl_schedule_delayed_work(td
, 0);
437 throtl_schedule_delayed_work(td
, (st
->min_disptime
- jiffies
));
441 throtl_start_new_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
443 tg
->bytes_disp
[rw
] = 0;
445 tg
->slice_start
[rw
] = jiffies
;
446 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
447 throtl_log_tg(td
, tg
, "[%c] new slice start=%lu end=%lu jiffies=%lu",
448 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
449 tg
->slice_end
[rw
], jiffies
);
452 static inline void throtl_set_slice_end(struct throtl_data
*td
,
453 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
455 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
458 static inline void throtl_extend_slice(struct throtl_data
*td
,
459 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
461 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
462 throtl_log_tg(td
, tg
, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
463 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
464 tg
->slice_end
[rw
], jiffies
);
467 /* Determine if previously allocated or extended slice is complete or not */
469 throtl_slice_used(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
471 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
477 /* Trim the used slices and adjust slice start accordingly */
479 throtl_trim_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
481 unsigned long nr_slices
, time_elapsed
, io_trim
;
484 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
487 * If bps are unlimited (-1), then time slice don't get
488 * renewed. Don't try to trim the slice if slice is used. A new
489 * slice will start when appropriate.
491 if (throtl_slice_used(td
, tg
, rw
))
495 * A bio has been dispatched. Also adjust slice_end. It might happen
496 * that initially cgroup limit was very low resulting in high
497 * slice_end, but later limit was bumped up and bio was dispached
498 * sooner, then we need to reduce slice_end. A high bogus slice_end
499 * is bad because it does not allow new slice to start.
502 throtl_set_slice_end(td
, tg
, rw
, jiffies
+ throtl_slice
);
504 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
506 nr_slices
= time_elapsed
/ throtl_slice
;
510 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
514 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
516 if (!bytes_trim
&& !io_trim
)
519 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
520 tg
->bytes_disp
[rw
] -= bytes_trim
;
522 tg
->bytes_disp
[rw
] = 0;
524 if (tg
->io_disp
[rw
] >= io_trim
)
525 tg
->io_disp
[rw
] -= io_trim
;
529 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
531 throtl_log_tg(td
, tg
, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
532 " start=%lu end=%lu jiffies=%lu",
533 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
534 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
537 static bool tg_with_in_iops_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
538 struct bio
*bio
, unsigned long *wait
)
540 bool rw
= bio_data_dir(bio
);
541 unsigned int io_allowed
;
542 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
545 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
547 /* Slice has just started. Consider one slice interval */
549 jiffy_elapsed_rnd
= throtl_slice
;
551 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
554 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
555 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
556 * will allow dispatch after 1 second and after that slice should
560 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
564 io_allowed
= UINT_MAX
;
568 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
574 /* Calc approx time to dispatch */
575 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
577 if (jiffy_wait
> jiffy_elapsed
)
578 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
587 static bool tg_with_in_bps_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
588 struct bio
*bio
, unsigned long *wait
)
590 bool rw
= bio_data_dir(bio
);
591 u64 bytes_allowed
, extra_bytes
, tmp
;
592 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
594 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
596 /* Slice has just started. Consider one slice interval */
598 jiffy_elapsed_rnd
= throtl_slice
;
600 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
602 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
606 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
612 /* Calc approx time to dispatch */
613 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
614 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
620 * This wait time is without taking into consideration the rounding
621 * up we did. Add that time also.
623 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
630 * Returns whether one can dispatch a bio or not. Also returns approx number
631 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
633 static bool tg_may_dispatch(struct throtl_data
*td
, struct throtl_grp
*tg
,
634 struct bio
*bio
, unsigned long *wait
)
636 bool rw
= bio_data_dir(bio
);
637 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
640 * Currently whole state machine of group depends on first bio
641 * queued in the group bio list. So one should not be calling
642 * this function with a different bio if there are other bios
645 BUG_ON(tg
->nr_queued
[rw
] && bio
!= bio_list_peek(&tg
->bio_lists
[rw
]));
647 /* If tg->bps = -1, then BW is unlimited */
648 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
655 * If previous slice expired, start a new one otherwise renew/extend
656 * existing slice to make sure it is at least throtl_slice interval
659 if (throtl_slice_used(td
, tg
, rw
))
660 throtl_start_new_slice(td
, tg
, rw
);
662 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
663 throtl_extend_slice(td
, tg
, rw
, jiffies
+ throtl_slice
);
666 if (tg_with_in_bps_limit(td
, tg
, bio
, &bps_wait
)
667 && tg_with_in_iops_limit(td
, tg
, bio
, &iops_wait
)) {
673 max_wait
= max(bps_wait
, iops_wait
);
678 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
679 throtl_extend_slice(td
, tg
, rw
, jiffies
+ max_wait
);
684 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
686 bool rw
= bio_data_dir(bio
);
687 bool sync
= bio
->bi_rw
& REQ_SYNC
;
689 /* Charge the bio to the group */
690 tg
->bytes_disp
[rw
] += bio
->bi_size
;
694 * TODO: This will take blkg->stats_lock. Figure out a way
695 * to avoid this cost.
697 blkiocg_update_dispatch_stats(&tg
->blkg
, bio
->bi_size
, rw
, sync
);
700 static void throtl_add_bio_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
703 bool rw
= bio_data_dir(bio
);
705 bio_list_add(&tg
->bio_lists
[rw
], bio
);
706 /* Take a bio reference on tg */
707 throtl_ref_get_tg(tg
);
710 throtl_enqueue_tg(td
, tg
);
713 static void tg_update_disptime(struct throtl_data
*td
, struct throtl_grp
*tg
)
715 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
718 if ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
719 tg_may_dispatch(td
, tg
, bio
, &read_wait
);
721 if ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
722 tg_may_dispatch(td
, tg
, bio
, &write_wait
);
724 min_wait
= min(read_wait
, write_wait
);
725 disptime
= jiffies
+ min_wait
;
727 /* Update dispatch time */
728 throtl_dequeue_tg(td
, tg
);
729 tg
->disptime
= disptime
;
730 throtl_enqueue_tg(td
, tg
);
733 static void tg_dispatch_one_bio(struct throtl_data
*td
, struct throtl_grp
*tg
,
734 bool rw
, struct bio_list
*bl
)
738 bio
= bio_list_pop(&tg
->bio_lists
[rw
]);
740 /* Drop bio reference on tg */
743 BUG_ON(td
->nr_queued
[rw
] <= 0);
746 throtl_charge_bio(tg
, bio
);
747 bio_list_add(bl
, bio
);
748 bio
->bi_rw
|= REQ_THROTTLED
;
750 throtl_trim_slice(td
, tg
, rw
);
753 static int throtl_dispatch_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
756 unsigned int nr_reads
= 0, nr_writes
= 0;
757 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
758 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
761 /* Try to dispatch 75% READS and 25% WRITES */
763 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
]))
764 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
766 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
769 if (nr_reads
>= max_nr_reads
)
773 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
]))
774 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
776 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
779 if (nr_writes
>= max_nr_writes
)
783 return nr_reads
+ nr_writes
;
786 static int throtl_select_dispatch(struct throtl_data
*td
, struct bio_list
*bl
)
788 unsigned int nr_disp
= 0;
789 struct throtl_grp
*tg
;
790 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
793 tg
= throtl_rb_first(st
);
798 if (time_before(jiffies
, tg
->disptime
))
801 throtl_dequeue_tg(td
, tg
);
803 nr_disp
+= throtl_dispatch_tg(td
, tg
, bl
);
805 if (tg
->nr_queued
[0] || tg
->nr_queued
[1]) {
806 tg_update_disptime(td
, tg
);
807 throtl_enqueue_tg(td
, tg
);
810 if (nr_disp
>= throtl_quantum
)
817 static void throtl_process_limit_change(struct throtl_data
*td
)
819 struct throtl_grp
*tg
;
820 struct hlist_node
*pos
, *n
;
822 if (!td
->limits_changed
)
825 xchg(&td
->limits_changed
, false);
827 throtl_log(td
, "limits changed");
829 hlist_for_each_entry_safe(tg
, pos
, n
, &td
->tg_list
, tg_node
) {
830 if (!tg
->limits_changed
)
833 if (!xchg(&tg
->limits_changed
, false))
836 throtl_log_tg(td
, tg
, "limit change rbps=%llu wbps=%llu"
837 " riops=%u wiops=%u", tg
->bps
[READ
], tg
->bps
[WRITE
],
838 tg
->iops
[READ
], tg
->iops
[WRITE
]);
841 * Restart the slices for both READ and WRITES. It
842 * might happen that a group's limit are dropped
843 * suddenly and we don't want to account recently
844 * dispatched IO with new low rate
846 throtl_start_new_slice(td
, tg
, 0);
847 throtl_start_new_slice(td
, tg
, 1);
849 if (throtl_tg_on_rr(tg
))
850 tg_update_disptime(td
, tg
);
854 /* Dispatch throttled bios. Should be called without queue lock held. */
855 static int throtl_dispatch(struct request_queue
*q
)
857 struct throtl_data
*td
= q
->td
;
858 unsigned int nr_disp
= 0;
859 struct bio_list bio_list_on_stack
;
861 struct blk_plug plug
;
863 spin_lock_irq(q
->queue_lock
);
865 throtl_process_limit_change(td
);
867 if (!total_nr_queued(td
))
870 bio_list_init(&bio_list_on_stack
);
872 throtl_log(td
, "dispatch nr_queued=%lu read=%u write=%u",
873 total_nr_queued(td
), td
->nr_queued
[READ
],
874 td
->nr_queued
[WRITE
]);
876 nr_disp
= throtl_select_dispatch(td
, &bio_list_on_stack
);
879 throtl_log(td
, "bios disp=%u", nr_disp
);
881 throtl_schedule_next_dispatch(td
);
883 spin_unlock_irq(q
->queue_lock
);
886 * If we dispatched some requests, unplug the queue to make sure
890 blk_start_plug(&plug
);
891 while((bio
= bio_list_pop(&bio_list_on_stack
)))
892 generic_make_request(bio
);
893 blk_finish_plug(&plug
);
898 void blk_throtl_work(struct work_struct
*work
)
900 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
902 struct request_queue
*q
= td
->queue
;
907 /* Call with queue lock held */
909 throtl_schedule_delayed_work(struct throtl_data
*td
, unsigned long delay
)
912 struct delayed_work
*dwork
= &td
->throtl_work
;
914 /* schedule work if limits changed even if no bio is queued */
915 if (total_nr_queued(td
) > 0 || td
->limits_changed
) {
917 * We might have a work scheduled to be executed in future.
918 * Cancel that and schedule a new one.
920 __cancel_delayed_work(dwork
);
921 queue_delayed_work(kthrotld_workqueue
, dwork
, delay
);
922 throtl_log(td
, "schedule work. delay=%lu jiffies=%lu",
928 throtl_destroy_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
930 /* Something wrong if we are trying to remove same group twice */
931 BUG_ON(hlist_unhashed(&tg
->tg_node
));
933 hlist_del_init(&tg
->tg_node
);
936 * Put the reference taken at the time of creation so that when all
937 * queues are gone, group can be destroyed.
940 td
->nr_undestroyed_grps
--;
943 static void throtl_release_tgs(struct throtl_data
*td
)
945 struct hlist_node
*pos
, *n
;
946 struct throtl_grp
*tg
;
948 hlist_for_each_entry_safe(tg
, pos
, n
, &td
->tg_list
, tg_node
) {
950 * If cgroup removal path got to blk_group first and removed
951 * it from cgroup list, then it will take care of destroying
954 if (!blkiocg_del_blkio_group(&tg
->blkg
))
955 throtl_destroy_tg(td
, tg
);
959 static void throtl_td_free(struct throtl_data
*td
)
965 * Blk cgroup controller notification saying that blkio_group object is being
966 * delinked as associated cgroup object is going away. That also means that
967 * no new IO will come in this group. So get rid of this group as soon as
968 * any pending IO in the group is finished.
970 * This function is called under rcu_read_lock(). key is the rcu protected
971 * pointer. That means "key" is a valid throtl_data pointer as long as we are
974 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
975 * it should not be NULL as even if queue was going away, cgroup deltion
976 * path got to it first.
978 void throtl_unlink_blkio_group(void *key
, struct blkio_group
*blkg
)
981 struct throtl_data
*td
= key
;
983 spin_lock_irqsave(td
->queue
->queue_lock
, flags
);
984 throtl_destroy_tg(td
, tg_of_blkg(blkg
));
985 spin_unlock_irqrestore(td
->queue
->queue_lock
, flags
);
988 static void throtl_update_blkio_group_common(struct throtl_data
*td
,
989 struct throtl_grp
*tg
)
991 xchg(&tg
->limits_changed
, true);
992 xchg(&td
->limits_changed
, true);
993 /* Schedule a work now to process the limit change */
994 throtl_schedule_delayed_work(td
, 0);
998 * For all update functions, key should be a valid pointer because these
999 * update functions are called under blkcg_lock, that means, blkg is
1000 * valid and in turn key is valid. queue exit path can not race because
1003 * Can not take queue lock in update functions as queue lock under blkcg_lock
1004 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1006 static void throtl_update_blkio_group_read_bps(void *key
,
1007 struct blkio_group
*blkg
, u64 read_bps
)
1009 struct throtl_data
*td
= key
;
1010 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1012 tg
->bps
[READ
] = read_bps
;
1013 throtl_update_blkio_group_common(td
, tg
);
1016 static void throtl_update_blkio_group_write_bps(void *key
,
1017 struct blkio_group
*blkg
, u64 write_bps
)
1019 struct throtl_data
*td
= key
;
1020 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1022 tg
->bps
[WRITE
] = write_bps
;
1023 throtl_update_blkio_group_common(td
, tg
);
1026 static void throtl_update_blkio_group_read_iops(void *key
,
1027 struct blkio_group
*blkg
, unsigned int read_iops
)
1029 struct throtl_data
*td
= key
;
1030 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1032 tg
->iops
[READ
] = read_iops
;
1033 throtl_update_blkio_group_common(td
, tg
);
1036 static void throtl_update_blkio_group_write_iops(void *key
,
1037 struct blkio_group
*blkg
, unsigned int write_iops
)
1039 struct throtl_data
*td
= key
;
1040 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1042 tg
->iops
[WRITE
] = write_iops
;
1043 throtl_update_blkio_group_common(td
, tg
);
1046 static void throtl_shutdown_wq(struct request_queue
*q
)
1048 struct throtl_data
*td
= q
->td
;
1050 cancel_delayed_work_sync(&td
->throtl_work
);
1053 static struct blkio_policy_type blkio_policy_throtl
= {
1055 .blkio_unlink_group_fn
= throtl_unlink_blkio_group
,
1056 .blkio_update_group_read_bps_fn
=
1057 throtl_update_blkio_group_read_bps
,
1058 .blkio_update_group_write_bps_fn
=
1059 throtl_update_blkio_group_write_bps
,
1060 .blkio_update_group_read_iops_fn
=
1061 throtl_update_blkio_group_read_iops
,
1062 .blkio_update_group_write_iops_fn
=
1063 throtl_update_blkio_group_write_iops
,
1065 .plid
= BLKIO_POLICY_THROTL
,
1068 int blk_throtl_bio(struct request_queue
*q
, struct bio
**biop
)
1070 struct throtl_data
*td
= q
->td
;
1071 struct throtl_grp
*tg
;
1072 struct bio
*bio
= *biop
;
1073 bool rw
= bio_data_dir(bio
), update_disptime
= true;
1075 if (bio
->bi_rw
& REQ_THROTTLED
) {
1076 bio
->bi_rw
&= ~REQ_THROTTLED
;
1080 spin_lock_irq(q
->queue_lock
);
1081 tg
= throtl_get_tg(td
);
1084 if (PTR_ERR(tg
) == -ENODEV
) {
1086 * Queue is gone. No queue lock held here.
1092 if (tg
->nr_queued
[rw
]) {
1094 * There is already another bio queued in same dir. No
1095 * need to update dispatch time.
1097 update_disptime
= false;
1102 /* Bio is with-in rate limit of group */
1103 if (tg_may_dispatch(td
, tg
, bio
, NULL
)) {
1104 throtl_charge_bio(tg
, bio
);
1107 * We need to trim slice even when bios are not being queued
1108 * otherwise it might happen that a bio is not queued for
1109 * a long time and slice keeps on extending and trim is not
1110 * called for a long time. Now if limits are reduced suddenly
1111 * we take into account all the IO dispatched so far at new
1112 * low rate and * newly queued IO gets a really long dispatch
1115 * So keep on trimming slice even if bio is not queued.
1117 throtl_trim_slice(td
, tg
, rw
);
1122 throtl_log_tg(td
, tg
, "[%c] bio. bdisp=%u sz=%u bps=%llu"
1123 " iodisp=%u iops=%u queued=%d/%d",
1124 rw
== READ
? 'R' : 'W',
1125 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
1126 tg
->io_disp
[rw
], tg
->iops
[rw
],
1127 tg
->nr_queued
[READ
], tg
->nr_queued
[WRITE
]);
1129 throtl_add_bio_tg(q
->td
, tg
, bio
);
1132 if (update_disptime
) {
1133 tg_update_disptime(td
, tg
);
1134 throtl_schedule_next_dispatch(td
);
1138 spin_unlock_irq(q
->queue_lock
);
1142 int blk_throtl_init(struct request_queue
*q
)
1144 struct throtl_data
*td
;
1145 struct throtl_grp
*tg
;
1147 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1151 INIT_HLIST_HEAD(&td
->tg_list
);
1152 td
->tg_service_tree
= THROTL_RB_ROOT
;
1153 td
->limits_changed
= false;
1154 INIT_DELAYED_WORK(&td
->throtl_work
, blk_throtl_work
);
1156 /* Init root group */
1158 throtl_init_group(tg
);
1161 * Set root group reference to 2. One reference will be dropped when
1162 * all groups on tg_list are being deleted during queue exit. Other
1163 * reference will remain there as we don't want to delete this group
1164 * as it is statically allocated and gets destroyed when throtl_data
1167 atomic_inc(&tg
->ref
);
1170 blkiocg_add_blkio_group(&blkio_root_cgroup
, &tg
->blkg
, (void *)td
,
1171 0, BLKIO_POLICY_THROTL
);
1173 throtl_add_group_to_td_list(td
, tg
);
1175 /* Attach throtl data to request queue */
1181 void blk_throtl_exit(struct request_queue
*q
)
1183 struct throtl_data
*td
= q
->td
;
1188 throtl_shutdown_wq(q
);
1190 spin_lock_irq(q
->queue_lock
);
1191 throtl_release_tgs(td
);
1193 /* If there are other groups */
1194 if (td
->nr_undestroyed_grps
> 0)
1197 spin_unlock_irq(q
->queue_lock
);
1200 * Wait for tg->blkg->key accessors to exit their grace periods.
1201 * Do this wait only if there are other undestroyed groups out
1202 * there (other than root group). This can happen if cgroup deletion
1203 * path claimed the responsibility of cleaning up a group before
1204 * queue cleanup code get to the group.
1206 * Do not call synchronize_rcu() unconditionally as there are drivers
1207 * which create/delete request queue hundreds of times during scan/boot
1208 * and synchronize_rcu() can take significant time and slow down boot.
1214 * Just being safe to make sure after previous flush if some body did
1215 * update limits through cgroup and another work got queued, cancel
1218 throtl_shutdown_wq(q
);
1222 static int __init
throtl_init(void)
1224 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1225 if (!kthrotld_workqueue
)
1226 panic("Failed to create kthrotld\n");
1228 blkio_policy_register(&blkio_policy_throtl
);
1232 module_init(throtl_init
);