2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl
;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct
*kthrotld_workqueue
;
29 struct throtl_service_queue
{
30 struct rb_root pending_tree
; /* RB tree of active tgs */
31 struct rb_node
*first_pending
; /* first node in the tree */
32 unsigned int nr_pending
; /* # queued in the tree */
33 unsigned long first_pending_disptime
; /* disptime of the first tg */
36 #define THROTL_SERVICE_QUEUE_INITIALIZER \
37 (struct throtl_service_queue){ .pending_tree = RB_ROOT }
39 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
41 /* Per-cpu group stats */
43 /* total bytes transferred */
44 struct blkg_rwstat service_bytes
;
45 /* total IOs serviced, post merge */
46 struct blkg_rwstat serviced
;
50 /* must be the first member */
51 struct blkg_policy_data pd
;
53 /* active throtl group service_queue member */
54 struct rb_node rb_node
;
57 * Dispatch time in jiffies. This is the estimated time when group
58 * will unthrottle and is ready to dispatch more bio. It is used as
59 * key to sort active groups in service tree.
61 unsigned long disptime
;
65 /* Two lists for READ and WRITE */
66 struct bio_list bio_lists
[2];
68 /* Number of queued bios on READ and WRITE lists */
69 unsigned int nr_queued
[2];
71 /* bytes per second rate limits */
77 /* Number of bytes disptached in current slice */
78 uint64_t bytes_disp
[2];
79 /* Number of bio's dispatched in current slice */
80 unsigned int io_disp
[2];
82 /* When did we start a new slice */
83 unsigned long slice_start
[2];
84 unsigned long slice_end
[2];
86 /* Per cpu stats pointer */
87 struct tg_stats_cpu __percpu
*stats_cpu
;
89 /* List of tgs waiting for per cpu stats memory to be allocated */
90 struct list_head stats_alloc_node
;
95 /* service tree for active throtl groups */
96 struct throtl_service_queue service_queue
;
98 struct request_queue
*queue
;
100 /* Total Number of queued bios on READ and WRITE lists */
101 unsigned int nr_queued
[2];
104 * number of total undestroyed groups
106 unsigned int nr_undestroyed_grps
;
108 /* Work for dispatching throttled bios */
109 struct delayed_work dispatch_work
;
112 /* list and work item to allocate percpu group stats */
113 static DEFINE_SPINLOCK(tg_stats_alloc_lock
);
114 static LIST_HEAD(tg_stats_alloc_list
);
116 static void tg_stats_alloc_fn(struct work_struct
*);
117 static DECLARE_DELAYED_WORK(tg_stats_alloc_work
, tg_stats_alloc_fn
);
119 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
121 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
124 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
126 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
129 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
131 return pd_to_blkg(&tg
->pd
);
134 static inline struct throtl_grp
*td_root_tg(struct throtl_data
*td
)
136 return blkg_to_tg(td
->queue
->root_blkg
);
139 enum tg_state_flags
{
140 THROTL_TG_FLAG_on_rr
= 0, /* on round-robin busy list */
143 #define THROTL_TG_FNS(name) \
144 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
146 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
148 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
150 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
152 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
154 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
157 THROTL_TG_FNS(on_rr
);
159 #define throtl_log_tg(td, tg, fmt, args...) do { \
162 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
163 blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
166 #define throtl_log(td, fmt, args...) \
167 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
170 * Worker for allocating per cpu stat for tgs. This is scheduled on the
171 * system_wq once there are some groups on the alloc_list waiting for
174 static void tg_stats_alloc_fn(struct work_struct
*work
)
176 static struct tg_stats_cpu
*stats_cpu
; /* this fn is non-reentrant */
177 struct delayed_work
*dwork
= to_delayed_work(work
);
182 stats_cpu
= alloc_percpu(struct tg_stats_cpu
);
184 /* allocation failed, try again after some time */
185 schedule_delayed_work(dwork
, msecs_to_jiffies(10));
190 spin_lock_irq(&tg_stats_alloc_lock
);
192 if (!list_empty(&tg_stats_alloc_list
)) {
193 struct throtl_grp
*tg
= list_first_entry(&tg_stats_alloc_list
,
196 swap(tg
->stats_cpu
, stats_cpu
);
197 list_del_init(&tg
->stats_alloc_node
);
200 empty
= list_empty(&tg_stats_alloc_list
);
201 spin_unlock_irq(&tg_stats_alloc_lock
);
206 static void throtl_pd_init(struct blkcg_gq
*blkg
)
208 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
211 RB_CLEAR_NODE(&tg
->rb_node
);
212 bio_list_init(&tg
->bio_lists
[0]);
213 bio_list_init(&tg
->bio_lists
[1]);
218 tg
->iops
[WRITE
] = -1;
221 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
222 * but percpu allocator can't be called from IO path. Queue tg on
223 * tg_stats_alloc_list and allocate from work item.
225 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
226 list_add(&tg
->stats_alloc_node
, &tg_stats_alloc_list
);
227 schedule_delayed_work(&tg_stats_alloc_work
, 0);
228 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
231 static void throtl_pd_exit(struct blkcg_gq
*blkg
)
233 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
236 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
237 list_del_init(&tg
->stats_alloc_node
);
238 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
240 free_percpu(tg
->stats_cpu
);
243 static void throtl_pd_reset_stats(struct blkcg_gq
*blkg
)
245 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
248 if (tg
->stats_cpu
== NULL
)
251 for_each_possible_cpu(cpu
) {
252 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
254 blkg_rwstat_reset(&sc
->service_bytes
);
255 blkg_rwstat_reset(&sc
->serviced
);
259 static struct throtl_grp
*throtl_lookup_tg(struct throtl_data
*td
,
263 * This is the common case when there are no blkcgs. Avoid lookup
266 if (blkcg
== &blkcg_root
)
267 return td_root_tg(td
);
269 return blkg_to_tg(blkg_lookup(blkcg
, td
->queue
));
272 static struct throtl_grp
*throtl_lookup_create_tg(struct throtl_data
*td
,
275 struct request_queue
*q
= td
->queue
;
276 struct throtl_grp
*tg
= NULL
;
279 * This is the common case when there are no blkcgs. Avoid lookup
282 if (blkcg
== &blkcg_root
) {
285 struct blkcg_gq
*blkg
;
287 blkg
= blkg_lookup_create(blkcg
, q
);
289 /* if %NULL and @q is alive, fall back to root_tg */
291 tg
= blkg_to_tg(blkg
);
292 else if (!blk_queue_dying(q
))
299 static struct throtl_grp
*throtl_rb_first(struct throtl_service_queue
*sq
)
301 /* Service tree is empty */
305 if (!sq
->first_pending
)
306 sq
->first_pending
= rb_first(&sq
->pending_tree
);
308 if (sq
->first_pending
)
309 return rb_entry_tg(sq
->first_pending
);
314 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
320 static void throtl_rb_erase(struct rb_node
*n
, struct throtl_service_queue
*sq
)
322 if (sq
->first_pending
== n
)
323 sq
->first_pending
= NULL
;
324 rb_erase_init(n
, &sq
->pending_tree
);
328 static void update_min_dispatch_time(struct throtl_service_queue
*sq
)
330 struct throtl_grp
*tg
;
332 tg
= throtl_rb_first(sq
);
336 sq
->first_pending_disptime
= tg
->disptime
;
339 static void tg_service_queue_add(struct throtl_service_queue
*sq
,
340 struct throtl_grp
*tg
)
342 struct rb_node
**node
= &sq
->pending_tree
.rb_node
;
343 struct rb_node
*parent
= NULL
;
344 struct throtl_grp
*__tg
;
345 unsigned long key
= tg
->disptime
;
348 while (*node
!= NULL
) {
350 __tg
= rb_entry_tg(parent
);
352 if (time_before(key
, __tg
->disptime
))
353 node
= &parent
->rb_left
;
355 node
= &parent
->rb_right
;
361 sq
->first_pending
= &tg
->rb_node
;
363 rb_link_node(&tg
->rb_node
, parent
, node
);
364 rb_insert_color(&tg
->rb_node
, &sq
->pending_tree
);
367 static void __throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
369 struct throtl_service_queue
*sq
= &td
->service_queue
;
371 tg_service_queue_add(sq
, tg
);
372 throtl_mark_tg_on_rr(tg
);
376 static void throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
378 if (!throtl_tg_on_rr(tg
))
379 __throtl_enqueue_tg(td
, tg
);
382 static void __throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
384 throtl_rb_erase(&tg
->rb_node
, &td
->service_queue
);
385 throtl_clear_tg_on_rr(tg
);
388 static void throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
390 if (throtl_tg_on_rr(tg
))
391 __throtl_dequeue_tg(td
, tg
);
394 /* Call with queue lock held */
395 static void throtl_schedule_delayed_work(struct throtl_data
*td
,
398 struct delayed_work
*dwork
= &td
->dispatch_work
;
400 mod_delayed_work(kthrotld_workqueue
, dwork
, delay
);
401 throtl_log(td
, "schedule work. delay=%lu jiffies=%lu", delay
, jiffies
);
404 static void throtl_schedule_next_dispatch(struct throtl_data
*td
)
406 struct throtl_service_queue
*sq
= &td
->service_queue
;
408 /* any pending children left? */
412 update_min_dispatch_time(sq
);
414 if (time_before_eq(sq
->first_pending_disptime
, jiffies
))
415 throtl_schedule_delayed_work(td
, 0);
417 throtl_schedule_delayed_work(td
, sq
->first_pending_disptime
- jiffies
);
421 throtl_start_new_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
423 tg
->bytes_disp
[rw
] = 0;
425 tg
->slice_start
[rw
] = jiffies
;
426 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
427 throtl_log_tg(td
, tg
, "[%c] new slice start=%lu end=%lu jiffies=%lu",
428 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
429 tg
->slice_end
[rw
], jiffies
);
432 static inline void throtl_set_slice_end(struct throtl_data
*td
,
433 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
435 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
438 static inline void throtl_extend_slice(struct throtl_data
*td
,
439 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
441 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
442 throtl_log_tg(td
, tg
, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
443 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
444 tg
->slice_end
[rw
], jiffies
);
447 /* Determine if previously allocated or extended slice is complete or not */
449 throtl_slice_used(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
451 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
457 /* Trim the used slices and adjust slice start accordingly */
459 throtl_trim_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
461 unsigned long nr_slices
, time_elapsed
, io_trim
;
464 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
467 * If bps are unlimited (-1), then time slice don't get
468 * renewed. Don't try to trim the slice if slice is used. A new
469 * slice will start when appropriate.
471 if (throtl_slice_used(td
, tg
, rw
))
475 * A bio has been dispatched. Also adjust slice_end. It might happen
476 * that initially cgroup limit was very low resulting in high
477 * slice_end, but later limit was bumped up and bio was dispached
478 * sooner, then we need to reduce slice_end. A high bogus slice_end
479 * is bad because it does not allow new slice to start.
482 throtl_set_slice_end(td
, tg
, rw
, jiffies
+ throtl_slice
);
484 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
486 nr_slices
= time_elapsed
/ throtl_slice
;
490 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
494 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
496 if (!bytes_trim
&& !io_trim
)
499 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
500 tg
->bytes_disp
[rw
] -= bytes_trim
;
502 tg
->bytes_disp
[rw
] = 0;
504 if (tg
->io_disp
[rw
] >= io_trim
)
505 tg
->io_disp
[rw
] -= io_trim
;
509 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
511 throtl_log_tg(td
, tg
, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
512 " start=%lu end=%lu jiffies=%lu",
513 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
514 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
517 static bool tg_with_in_iops_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
518 struct bio
*bio
, unsigned long *wait
)
520 bool rw
= bio_data_dir(bio
);
521 unsigned int io_allowed
;
522 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
525 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
527 /* Slice has just started. Consider one slice interval */
529 jiffy_elapsed_rnd
= throtl_slice
;
531 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
534 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
535 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
536 * will allow dispatch after 1 second and after that slice should
540 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
544 io_allowed
= UINT_MAX
;
548 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
554 /* Calc approx time to dispatch */
555 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
557 if (jiffy_wait
> jiffy_elapsed
)
558 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
567 static bool tg_with_in_bps_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
568 struct bio
*bio
, unsigned long *wait
)
570 bool rw
= bio_data_dir(bio
);
571 u64 bytes_allowed
, extra_bytes
, tmp
;
572 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
574 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
576 /* Slice has just started. Consider one slice interval */
578 jiffy_elapsed_rnd
= throtl_slice
;
580 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
582 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
586 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
592 /* Calc approx time to dispatch */
593 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
594 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
600 * This wait time is without taking into consideration the rounding
601 * up we did. Add that time also.
603 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
609 static bool tg_no_rule_group(struct throtl_grp
*tg
, bool rw
) {
610 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1)
616 * Returns whether one can dispatch a bio or not. Also returns approx number
617 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
619 static bool tg_may_dispatch(struct throtl_data
*td
, struct throtl_grp
*tg
,
620 struct bio
*bio
, unsigned long *wait
)
622 bool rw
= bio_data_dir(bio
);
623 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
626 * Currently whole state machine of group depends on first bio
627 * queued in the group bio list. So one should not be calling
628 * this function with a different bio if there are other bios
631 BUG_ON(tg
->nr_queued
[rw
] && bio
!= bio_list_peek(&tg
->bio_lists
[rw
]));
633 /* If tg->bps = -1, then BW is unlimited */
634 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
641 * If previous slice expired, start a new one otherwise renew/extend
642 * existing slice to make sure it is at least throtl_slice interval
645 if (throtl_slice_used(td
, tg
, rw
))
646 throtl_start_new_slice(td
, tg
, rw
);
648 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
649 throtl_extend_slice(td
, tg
, rw
, jiffies
+ throtl_slice
);
652 if (tg_with_in_bps_limit(td
, tg
, bio
, &bps_wait
)
653 && tg_with_in_iops_limit(td
, tg
, bio
, &iops_wait
)) {
659 max_wait
= max(bps_wait
, iops_wait
);
664 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
665 throtl_extend_slice(td
, tg
, rw
, jiffies
+ max_wait
);
670 static void throtl_update_dispatch_stats(struct blkcg_gq
*blkg
, u64 bytes
,
673 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
674 struct tg_stats_cpu
*stats_cpu
;
677 /* If per cpu stats are not allocated yet, don't do any accounting. */
678 if (tg
->stats_cpu
== NULL
)
682 * Disabling interrupts to provide mutual exclusion between two
683 * writes on same cpu. It probably is not needed for 64bit. Not
684 * optimizing that case yet.
686 local_irq_save(flags
);
688 stats_cpu
= this_cpu_ptr(tg
->stats_cpu
);
690 blkg_rwstat_add(&stats_cpu
->serviced
, rw
, 1);
691 blkg_rwstat_add(&stats_cpu
->service_bytes
, rw
, bytes
);
693 local_irq_restore(flags
);
696 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
698 bool rw
= bio_data_dir(bio
);
700 /* Charge the bio to the group */
701 tg
->bytes_disp
[rw
] += bio
->bi_size
;
704 throtl_update_dispatch_stats(tg_to_blkg(tg
), bio
->bi_size
, bio
->bi_rw
);
707 static void throtl_add_bio_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
710 bool rw
= bio_data_dir(bio
);
712 bio_list_add(&tg
->bio_lists
[rw
], bio
);
713 /* Take a bio reference on tg */
714 blkg_get(tg_to_blkg(tg
));
717 throtl_enqueue_tg(td
, tg
);
720 static void tg_update_disptime(struct throtl_data
*td
, struct throtl_grp
*tg
)
722 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
725 if ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
726 tg_may_dispatch(td
, tg
, bio
, &read_wait
);
728 if ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
729 tg_may_dispatch(td
, tg
, bio
, &write_wait
);
731 min_wait
= min(read_wait
, write_wait
);
732 disptime
= jiffies
+ min_wait
;
734 /* Update dispatch time */
735 throtl_dequeue_tg(td
, tg
);
736 tg
->disptime
= disptime
;
737 throtl_enqueue_tg(td
, tg
);
740 static void tg_dispatch_one_bio(struct throtl_data
*td
, struct throtl_grp
*tg
,
741 bool rw
, struct bio_list
*bl
)
745 bio
= bio_list_pop(&tg
->bio_lists
[rw
]);
747 /* Drop bio reference on blkg */
748 blkg_put(tg_to_blkg(tg
));
750 BUG_ON(td
->nr_queued
[rw
] <= 0);
753 throtl_charge_bio(tg
, bio
);
754 bio_list_add(bl
, bio
);
755 bio
->bi_rw
|= REQ_THROTTLED
;
757 throtl_trim_slice(td
, tg
, rw
);
760 static int throtl_dispatch_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
763 unsigned int nr_reads
= 0, nr_writes
= 0;
764 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
765 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
768 /* Try to dispatch 75% READS and 25% WRITES */
770 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
]))
771 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
773 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
776 if (nr_reads
>= max_nr_reads
)
780 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
]))
781 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
783 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
786 if (nr_writes
>= max_nr_writes
)
790 return nr_reads
+ nr_writes
;
793 static int throtl_select_dispatch(struct throtl_data
*td
, struct bio_list
*bl
)
795 unsigned int nr_disp
= 0;
796 struct throtl_grp
*tg
;
797 struct throtl_service_queue
*sq
= &td
->service_queue
;
800 tg
= throtl_rb_first(sq
);
805 if (time_before(jiffies
, tg
->disptime
))
808 throtl_dequeue_tg(td
, tg
);
810 nr_disp
+= throtl_dispatch_tg(td
, tg
, bl
);
812 if (tg
->nr_queued
[0] || tg
->nr_queued
[1])
813 tg_update_disptime(td
, tg
);
815 if (nr_disp
>= throtl_quantum
)
822 /* work function to dispatch throttled bios */
823 void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
825 struct throtl_data
*td
= container_of(to_delayed_work(work
),
826 struct throtl_data
, dispatch_work
);
827 struct request_queue
*q
= td
->queue
;
828 unsigned int nr_disp
= 0;
829 struct bio_list bio_list_on_stack
;
831 struct blk_plug plug
;
833 spin_lock_irq(q
->queue_lock
);
835 bio_list_init(&bio_list_on_stack
);
837 throtl_log(td
, "dispatch nr_queued=%u read=%u write=%u",
838 td
->nr_queued
[READ
] + td
->nr_queued
[WRITE
],
839 td
->nr_queued
[READ
], td
->nr_queued
[WRITE
]);
841 nr_disp
= throtl_select_dispatch(td
, &bio_list_on_stack
);
844 throtl_log(td
, "bios disp=%u", nr_disp
);
846 throtl_schedule_next_dispatch(td
);
848 spin_unlock_irq(q
->queue_lock
);
851 * If we dispatched some requests, unplug the queue to make sure
855 blk_start_plug(&plug
);
856 while((bio
= bio_list_pop(&bio_list_on_stack
)))
857 generic_make_request(bio
);
858 blk_finish_plug(&plug
);
862 static u64
tg_prfill_cpu_rwstat(struct seq_file
*sf
,
863 struct blkg_policy_data
*pd
, int off
)
865 struct throtl_grp
*tg
= pd_to_tg(pd
);
866 struct blkg_rwstat rwstat
= { }, tmp
;
869 for_each_possible_cpu(cpu
) {
870 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
872 tmp
= blkg_rwstat_read((void *)sc
+ off
);
873 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
874 rwstat
.cnt
[i
] += tmp
.cnt
[i
];
877 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
880 static int tg_print_cpu_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
883 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
885 blkcg_print_blkgs(sf
, blkcg
, tg_prfill_cpu_rwstat
, &blkcg_policy_throtl
,
890 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
893 struct throtl_grp
*tg
= pd_to_tg(pd
);
894 u64 v
= *(u64
*)((void *)tg
+ off
);
898 return __blkg_prfill_u64(sf
, pd
, v
);
901 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
904 struct throtl_grp
*tg
= pd_to_tg(pd
);
905 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
909 return __blkg_prfill_u64(sf
, pd
, v
);
912 static int tg_print_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
915 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_u64
,
916 &blkcg_policy_throtl
, cft
->private, false);
920 static int tg_print_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
923 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_uint
,
924 &blkcg_policy_throtl
, cft
->private, false);
928 static int tg_set_conf(struct cgroup
*cgrp
, struct cftype
*cft
, const char *buf
,
931 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
932 struct blkg_conf_ctx ctx
;
933 struct throtl_grp
*tg
;
934 struct throtl_data
*td
;
937 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
941 tg
= blkg_to_tg(ctx
.blkg
);
942 td
= ctx
.blkg
->q
->td
;
948 *(u64
*)((void *)tg
+ cft
->private) = ctx
.v
;
950 *(unsigned int *)((void *)tg
+ cft
->private) = ctx
.v
;
952 throtl_log_tg(td
, tg
, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
953 tg
->bps
[READ
], tg
->bps
[WRITE
],
954 tg
->iops
[READ
], tg
->iops
[WRITE
]);
957 * We're already holding queue_lock and know @tg is valid. Let's
958 * apply the new config directly.
960 * Restart the slices for both READ and WRITES. It might happen
961 * that a group's limit are dropped suddenly and we don't want to
962 * account recently dispatched IO with new low rate.
964 throtl_start_new_slice(td
, tg
, 0);
965 throtl_start_new_slice(td
, tg
, 1);
967 if (throtl_tg_on_rr(tg
)) {
968 tg_update_disptime(td
, tg
);
969 throtl_schedule_next_dispatch(td
);
972 blkg_conf_finish(&ctx
);
976 static int tg_set_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
979 return tg_set_conf(cgrp
, cft
, buf
, true);
982 static int tg_set_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
985 return tg_set_conf(cgrp
, cft
, buf
, false);
988 static struct cftype throtl_files
[] = {
990 .name
= "throttle.read_bps_device",
991 .private = offsetof(struct throtl_grp
, bps
[READ
]),
992 .read_seq_string
= tg_print_conf_u64
,
993 .write_string
= tg_set_conf_u64
,
994 .max_write_len
= 256,
997 .name
= "throttle.write_bps_device",
998 .private = offsetof(struct throtl_grp
, bps
[WRITE
]),
999 .read_seq_string
= tg_print_conf_u64
,
1000 .write_string
= tg_set_conf_u64
,
1001 .max_write_len
= 256,
1004 .name
= "throttle.read_iops_device",
1005 .private = offsetof(struct throtl_grp
, iops
[READ
]),
1006 .read_seq_string
= tg_print_conf_uint
,
1007 .write_string
= tg_set_conf_uint
,
1008 .max_write_len
= 256,
1011 .name
= "throttle.write_iops_device",
1012 .private = offsetof(struct throtl_grp
, iops
[WRITE
]),
1013 .read_seq_string
= tg_print_conf_uint
,
1014 .write_string
= tg_set_conf_uint
,
1015 .max_write_len
= 256,
1018 .name
= "throttle.io_service_bytes",
1019 .private = offsetof(struct tg_stats_cpu
, service_bytes
),
1020 .read_seq_string
= tg_print_cpu_rwstat
,
1023 .name
= "throttle.io_serviced",
1024 .private = offsetof(struct tg_stats_cpu
, serviced
),
1025 .read_seq_string
= tg_print_cpu_rwstat
,
1030 static void throtl_shutdown_wq(struct request_queue
*q
)
1032 struct throtl_data
*td
= q
->td
;
1034 cancel_delayed_work_sync(&td
->dispatch_work
);
1037 static struct blkcg_policy blkcg_policy_throtl
= {
1038 .pd_size
= sizeof(struct throtl_grp
),
1039 .cftypes
= throtl_files
,
1041 .pd_init_fn
= throtl_pd_init
,
1042 .pd_exit_fn
= throtl_pd_exit
,
1043 .pd_reset_stats_fn
= throtl_pd_reset_stats
,
1046 bool blk_throtl_bio(struct request_queue
*q
, struct bio
*bio
)
1048 struct throtl_data
*td
= q
->td
;
1049 struct throtl_grp
*tg
;
1050 bool rw
= bio_data_dir(bio
), update_disptime
= true;
1051 struct blkcg
*blkcg
;
1052 bool throttled
= false;
1054 if (bio
->bi_rw
& REQ_THROTTLED
) {
1055 bio
->bi_rw
&= ~REQ_THROTTLED
;
1060 * A throtl_grp pointer retrieved under rcu can be used to access
1061 * basic fields like stats and io rates. If a group has no rules,
1062 * just update the dispatch stats in lockless manner and return.
1065 blkcg
= bio_blkcg(bio
);
1066 tg
= throtl_lookup_tg(td
, blkcg
);
1068 if (tg_no_rule_group(tg
, rw
)) {
1069 throtl_update_dispatch_stats(tg_to_blkg(tg
),
1070 bio
->bi_size
, bio
->bi_rw
);
1071 goto out_unlock_rcu
;
1076 * Either group has not been allocated yet or it is not an unlimited
1079 spin_lock_irq(q
->queue_lock
);
1080 tg
= throtl_lookup_create_tg(td
, blkcg
);
1084 if (tg
->nr_queued
[rw
]) {
1086 * There is already another bio queued in same dir. No
1087 * need to update dispatch time.
1089 update_disptime
= false;
1094 /* Bio is with-in rate limit of group */
1095 if (tg_may_dispatch(td
, tg
, bio
, NULL
)) {
1096 throtl_charge_bio(tg
, bio
);
1099 * We need to trim slice even when bios are not being queued
1100 * otherwise it might happen that a bio is not queued for
1101 * a long time and slice keeps on extending and trim is not
1102 * called for a long time. Now if limits are reduced suddenly
1103 * we take into account all the IO dispatched so far at new
1104 * low rate and * newly queued IO gets a really long dispatch
1107 * So keep on trimming slice even if bio is not queued.
1109 throtl_trim_slice(td
, tg
, rw
);
1114 throtl_log_tg(td
, tg
, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1115 " iodisp=%u iops=%u queued=%d/%d",
1116 rw
== READ
? 'R' : 'W',
1117 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
1118 tg
->io_disp
[rw
], tg
->iops
[rw
],
1119 tg
->nr_queued
[READ
], tg
->nr_queued
[WRITE
]);
1121 bio_associate_current(bio
);
1122 throtl_add_bio_tg(q
->td
, tg
, bio
);
1125 if (update_disptime
) {
1126 tg_update_disptime(td
, tg
);
1127 throtl_schedule_next_dispatch(td
);
1131 spin_unlock_irq(q
->queue_lock
);
1139 * blk_throtl_drain - drain throttled bios
1140 * @q: request_queue to drain throttled bios for
1142 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1144 void blk_throtl_drain(struct request_queue
*q
)
1145 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1147 struct throtl_data
*td
= q
->td
;
1148 struct throtl_service_queue
*sq
= &td
->service_queue
;
1149 struct throtl_grp
*tg
;
1153 queue_lockdep_assert_held(q
);
1157 while ((tg
= throtl_rb_first(sq
))) {
1158 throtl_dequeue_tg(td
, tg
);
1160 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
1161 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1162 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
1163 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1165 spin_unlock_irq(q
->queue_lock
);
1167 while ((bio
= bio_list_pop(&bl
)))
1168 generic_make_request(bio
);
1170 spin_lock_irq(q
->queue_lock
);
1173 int blk_throtl_init(struct request_queue
*q
)
1175 struct throtl_data
*td
;
1178 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1182 td
->service_queue
= THROTL_SERVICE_QUEUE_INITIALIZER
;
1183 INIT_DELAYED_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
1188 /* activate policy */
1189 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
1195 void blk_throtl_exit(struct request_queue
*q
)
1198 throtl_shutdown_wq(q
);
1199 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
1203 static int __init
throtl_init(void)
1205 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1206 if (!kthrotld_workqueue
)
1207 panic("Failed to create kthrotld\n");
1209 return blkcg_policy_register(&blkcg_policy_throtl
);
1212 module_init(throtl_init
);