]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-throttle.c
blkcg: embed struct blkg_policy_data in policy specific data
[mirror_ubuntu-artful-kernel.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
3c798398 24static struct blkcg_policy blkcg_policy_throtl;
0381411e 25
450adcbe
VG
26/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
28static void throtl_schedule_delayed_work(struct throtl_data *td,
29 unsigned long delay);
30
e43473b7
VG
31struct throtl_rb_root {
32 struct rb_root rb;
33 struct rb_node *left;
34 unsigned int count;
35 unsigned long min_disptime;
36};
37
38#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
39 .count = 0, .min_disptime = 0}
40
41#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
42
8a3d2615
TH
43/* Per-cpu group stats */
44struct tg_stats_cpu {
45 /* total bytes transferred */
46 struct blkg_rwstat service_bytes;
47 /* total IOs serviced, post merge */
48 struct blkg_rwstat serviced;
49};
50
e43473b7 51struct throtl_grp {
f95a04af
TH
52 /* must be the first member */
53 struct blkg_policy_data pd;
54
e43473b7
VG
55 /* active throtl group service_tree member */
56 struct rb_node rb_node;
57
58 /*
59 * Dispatch time in jiffies. This is the estimated time when group
60 * will unthrottle and is ready to dispatch more bio. It is used as
61 * key to sort active groups in service tree.
62 */
63 unsigned long disptime;
64
e43473b7
VG
65 unsigned int flags;
66
67 /* Two lists for READ and WRITE */
68 struct bio_list bio_lists[2];
69
70 /* Number of queued bios on READ and WRITE lists */
71 unsigned int nr_queued[2];
72
73 /* bytes per second rate limits */
74 uint64_t bps[2];
75
8e89d13f
VG
76 /* IOPS limits */
77 unsigned int iops[2];
78
e43473b7
VG
79 /* Number of bytes disptached in current slice */
80 uint64_t bytes_disp[2];
8e89d13f
VG
81 /* Number of bio's dispatched in current slice */
82 unsigned int io_disp[2];
e43473b7
VG
83
84 /* When did we start a new slice */
85 unsigned long slice_start[2];
86 unsigned long slice_end[2];
fe071437
VG
87
88 /* Some throttle limits got updated for the group */
6f037937 89 int limits_changed;
8a3d2615
TH
90
91 /* Per cpu stats pointer */
92 struct tg_stats_cpu __percpu *stats_cpu;
93
94 /* List of tgs waiting for per cpu stats memory to be allocated */
95 struct list_head stats_alloc_node;
e43473b7
VG
96};
97
98struct throtl_data
99{
e43473b7
VG
100 /* service tree for active throtl groups */
101 struct throtl_rb_root tg_service_tree;
102
e43473b7
VG
103 struct request_queue *queue;
104
105 /* Total Number of queued bios on READ and WRITE lists */
106 unsigned int nr_queued[2];
107
108 /*
02977e4a 109 * number of total undestroyed groups
e43473b7
VG
110 */
111 unsigned int nr_undestroyed_grps;
112
113 /* Work for dispatching throttled bios */
114 struct delayed_work throtl_work;
fe071437 115
6f037937 116 int limits_changed;
e43473b7
VG
117};
118
8a3d2615
TH
119/* list and work item to allocate percpu group stats */
120static DEFINE_SPINLOCK(tg_stats_alloc_lock);
121static LIST_HEAD(tg_stats_alloc_list);
122
123static void tg_stats_alloc_fn(struct work_struct *);
124static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
125
f95a04af
TH
126static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
127{
128 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
129}
130
3c798398 131static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0381411e 132{
f95a04af 133 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0381411e
TH
134}
135
3c798398 136static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 137{
f95a04af 138 return pd_to_blkg(&tg->pd);
0381411e
TH
139}
140
03d8e111
TH
141static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
142{
143 return blkg_to_tg(td->queue->root_blkg);
144}
145
e43473b7
VG
146enum tg_state_flags {
147 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
148};
149
150#define THROTL_TG_FNS(name) \
151static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
152{ \
153 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
154} \
155static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
156{ \
157 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
158} \
159static inline int throtl_tg_##name(const struct throtl_grp *tg) \
160{ \
161 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
162}
163
164THROTL_TG_FNS(on_rr);
165
54e7ed12
TH
166#define throtl_log_tg(td, tg, fmt, args...) do { \
167 char __pbuf[128]; \
168 \
169 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
170 blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
171} while (0)
e43473b7
VG
172
173#define throtl_log(td, fmt, args...) \
174 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
175
d2f31a5f 176static inline unsigned int total_nr_queued(struct throtl_data *td)
e43473b7 177{
d2f31a5f 178 return td->nr_queued[0] + td->nr_queued[1];
e43473b7
VG
179}
180
8a3d2615
TH
181/*
182 * Worker for allocating per cpu stat for tgs. This is scheduled on the
183 * system_nrt_wq once there are some groups on the alloc_list waiting for
184 * allocation.
185 */
186static void tg_stats_alloc_fn(struct work_struct *work)
187{
188 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
189 struct delayed_work *dwork = to_delayed_work(work);
190 bool empty = false;
191
192alloc_stats:
193 if (!stats_cpu) {
194 stats_cpu = alloc_percpu(struct tg_stats_cpu);
195 if (!stats_cpu) {
196 /* allocation failed, try again after some time */
197 queue_delayed_work(system_nrt_wq, dwork,
198 msecs_to_jiffies(10));
199 return;
200 }
201 }
202
203 spin_lock_irq(&tg_stats_alloc_lock);
204
205 if (!list_empty(&tg_stats_alloc_list)) {
206 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
207 struct throtl_grp,
208 stats_alloc_node);
209 swap(tg->stats_cpu, stats_cpu);
210 list_del_init(&tg->stats_alloc_node);
211 }
212
213 empty = list_empty(&tg_stats_alloc_list);
214 spin_unlock_irq(&tg_stats_alloc_lock);
215 if (!empty)
216 goto alloc_stats;
217}
218
3c798398 219static void throtl_pd_init(struct blkcg_gq *blkg)
a29a171e 220{
0381411e 221 struct throtl_grp *tg = blkg_to_tg(blkg);
cd1604fa 222
a29a171e
VG
223 RB_CLEAR_NODE(&tg->rb_node);
224 bio_list_init(&tg->bio_lists[0]);
225 bio_list_init(&tg->bio_lists[1]);
226 tg->limits_changed = false;
227
e56da7e2
TH
228 tg->bps[READ] = -1;
229 tg->bps[WRITE] = -1;
230 tg->iops[READ] = -1;
231 tg->iops[WRITE] = -1;
8a3d2615
TH
232
233 /*
234 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
235 * but percpu allocator can't be called from IO path. Queue tg on
236 * tg_stats_alloc_list and allocate from work item.
237 */
238 spin_lock(&tg_stats_alloc_lock);
239 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
240 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
241 spin_unlock(&tg_stats_alloc_lock);
242}
243
3c798398 244static void throtl_pd_exit(struct blkcg_gq *blkg)
8a3d2615
TH
245{
246 struct throtl_grp *tg = blkg_to_tg(blkg);
247
248 spin_lock(&tg_stats_alloc_lock);
249 list_del_init(&tg->stats_alloc_node);
250 spin_unlock(&tg_stats_alloc_lock);
251
252 free_percpu(tg->stats_cpu);
253}
254
3c798398 255static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
8a3d2615
TH
256{
257 struct throtl_grp *tg = blkg_to_tg(blkg);
258 int cpu;
259
260 if (tg->stats_cpu == NULL)
261 return;
262
263 for_each_possible_cpu(cpu) {
264 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
265
266 blkg_rwstat_reset(&sc->service_bytes);
267 blkg_rwstat_reset(&sc->serviced);
268 }
a29a171e
VG
269}
270
3c798398
TH
271static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
272 struct blkcg *blkcg)
e43473b7 273{
be2c6b19 274 /*
3c798398
TH
275 * This is the common case when there are no blkcgs. Avoid lookup
276 * in this case
cd1604fa 277 */
3c798398 278 if (blkcg == &blkcg_root)
03d8e111 279 return td_root_tg(td);
e43473b7 280
e8989fae 281 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
e43473b7
VG
282}
283
cd1604fa 284static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
3c798398 285 struct blkcg *blkcg)
e43473b7 286{
f469a7b4 287 struct request_queue *q = td->queue;
cd1604fa 288 struct throtl_grp *tg = NULL;
bc16a4f9 289
f469a7b4 290 /*
3c798398
TH
291 * This is the common case when there are no blkcgs. Avoid lookup
292 * in this case
f469a7b4 293 */
3c798398 294 if (blkcg == &blkcg_root) {
03d8e111 295 tg = td_root_tg(td);
cd1604fa 296 } else {
3c798398 297 struct blkcg_gq *blkg;
f469a7b4 298
3c96cb32 299 blkg = blkg_lookup_create(blkcg, q);
f469a7b4 300
cd1604fa
TH
301 /* if %NULL and @q is alive, fall back to root_tg */
302 if (!IS_ERR(blkg))
0381411e 303 tg = blkg_to_tg(blkg);
cd1604fa 304 else if (!blk_queue_dead(q))
03d8e111 305 tg = td_root_tg(td);
f469a7b4
VG
306 }
307
e43473b7
VG
308 return tg;
309}
310
311static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
312{
313 /* Service tree is empty */
314 if (!root->count)
315 return NULL;
316
317 if (!root->left)
318 root->left = rb_first(&root->rb);
319
320 if (root->left)
321 return rb_entry_tg(root->left);
322
323 return NULL;
324}
325
326static void rb_erase_init(struct rb_node *n, struct rb_root *root)
327{
328 rb_erase(n, root);
329 RB_CLEAR_NODE(n);
330}
331
332static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
333{
334 if (root->left == n)
335 root->left = NULL;
336 rb_erase_init(n, &root->rb);
337 --root->count;
338}
339
340static void update_min_dispatch_time(struct throtl_rb_root *st)
341{
342 struct throtl_grp *tg;
343
344 tg = throtl_rb_first(st);
345 if (!tg)
346 return;
347
348 st->min_disptime = tg->disptime;
349}
350
351static void
352tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
353{
354 struct rb_node **node = &st->rb.rb_node;
355 struct rb_node *parent = NULL;
356 struct throtl_grp *__tg;
357 unsigned long key = tg->disptime;
358 int left = 1;
359
360 while (*node != NULL) {
361 parent = *node;
362 __tg = rb_entry_tg(parent);
363
364 if (time_before(key, __tg->disptime))
365 node = &parent->rb_left;
366 else {
367 node = &parent->rb_right;
368 left = 0;
369 }
370 }
371
372 if (left)
373 st->left = &tg->rb_node;
374
375 rb_link_node(&tg->rb_node, parent, node);
376 rb_insert_color(&tg->rb_node, &st->rb);
377}
378
379static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
380{
381 struct throtl_rb_root *st = &td->tg_service_tree;
382
383 tg_service_tree_add(st, tg);
384 throtl_mark_tg_on_rr(tg);
385 st->count++;
386}
387
388static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
389{
390 if (!throtl_tg_on_rr(tg))
391 __throtl_enqueue_tg(td, tg);
392}
393
394static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
395{
396 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
397 throtl_clear_tg_on_rr(tg);
398}
399
400static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
401{
402 if (throtl_tg_on_rr(tg))
403 __throtl_dequeue_tg(td, tg);
404}
405
406static void throtl_schedule_next_dispatch(struct throtl_data *td)
407{
408 struct throtl_rb_root *st = &td->tg_service_tree;
409
410 /*
411 * If there are more bios pending, schedule more work.
412 */
413 if (!total_nr_queued(td))
414 return;
415
416 BUG_ON(!st->count);
417
418 update_min_dispatch_time(st);
419
420 if (time_before_eq(st->min_disptime, jiffies))
450adcbe 421 throtl_schedule_delayed_work(td, 0);
e43473b7 422 else
450adcbe 423 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
e43473b7
VG
424}
425
426static inline void
427throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
428{
429 tg->bytes_disp[rw] = 0;
8e89d13f 430 tg->io_disp[rw] = 0;
e43473b7
VG
431 tg->slice_start[rw] = jiffies;
432 tg->slice_end[rw] = jiffies + throtl_slice;
433 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
434 rw == READ ? 'R' : 'W', tg->slice_start[rw],
435 tg->slice_end[rw], jiffies);
436}
437
d1ae8ffd
VG
438static inline void throtl_set_slice_end(struct throtl_data *td,
439 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
440{
441 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
442}
443
e43473b7
VG
444static inline void throtl_extend_slice(struct throtl_data *td,
445 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
446{
447 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
448 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
449 rw == READ ? 'R' : 'W', tg->slice_start[rw],
450 tg->slice_end[rw], jiffies);
451}
452
453/* Determine if previously allocated or extended slice is complete or not */
454static bool
455throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
456{
457 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
458 return 0;
459
460 return 1;
461}
462
463/* Trim the used slices and adjust slice start accordingly */
464static inline void
465throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
466{
3aad5d3e
VG
467 unsigned long nr_slices, time_elapsed, io_trim;
468 u64 bytes_trim, tmp;
e43473b7
VG
469
470 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
471
472 /*
473 * If bps are unlimited (-1), then time slice don't get
474 * renewed. Don't try to trim the slice if slice is used. A new
475 * slice will start when appropriate.
476 */
477 if (throtl_slice_used(td, tg, rw))
478 return;
479
d1ae8ffd
VG
480 /*
481 * A bio has been dispatched. Also adjust slice_end. It might happen
482 * that initially cgroup limit was very low resulting in high
483 * slice_end, but later limit was bumped up and bio was dispached
484 * sooner, then we need to reduce slice_end. A high bogus slice_end
485 * is bad because it does not allow new slice to start.
486 */
487
488 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
489
e43473b7
VG
490 time_elapsed = jiffies - tg->slice_start[rw];
491
492 nr_slices = time_elapsed / throtl_slice;
493
494 if (!nr_slices)
495 return;
3aad5d3e
VG
496 tmp = tg->bps[rw] * throtl_slice * nr_slices;
497 do_div(tmp, HZ);
498 bytes_trim = tmp;
e43473b7 499
8e89d13f 500 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
e43473b7 501
8e89d13f 502 if (!bytes_trim && !io_trim)
e43473b7
VG
503 return;
504
505 if (tg->bytes_disp[rw] >= bytes_trim)
506 tg->bytes_disp[rw] -= bytes_trim;
507 else
508 tg->bytes_disp[rw] = 0;
509
8e89d13f
VG
510 if (tg->io_disp[rw] >= io_trim)
511 tg->io_disp[rw] -= io_trim;
512 else
513 tg->io_disp[rw] = 0;
514
e43473b7
VG
515 tg->slice_start[rw] += nr_slices * throtl_slice;
516
3aad5d3e 517 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
e43473b7 518 " start=%lu end=%lu jiffies=%lu",
8e89d13f 519 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
e43473b7
VG
520 tg->slice_start[rw], tg->slice_end[rw], jiffies);
521}
522
8e89d13f
VG
523static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
524 struct bio *bio, unsigned long *wait)
e43473b7
VG
525{
526 bool rw = bio_data_dir(bio);
8e89d13f 527 unsigned int io_allowed;
e43473b7 528 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 529 u64 tmp;
e43473b7 530
8e89d13f 531 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 532
8e89d13f
VG
533 /* Slice has just started. Consider one slice interval */
534 if (!jiffy_elapsed)
535 jiffy_elapsed_rnd = throtl_slice;
536
537 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
538
c49c06e4
VG
539 /*
540 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
541 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
542 * will allow dispatch after 1 second and after that slice should
543 * have been trimmed.
544 */
545
546 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
547 do_div(tmp, HZ);
548
549 if (tmp > UINT_MAX)
550 io_allowed = UINT_MAX;
551 else
552 io_allowed = tmp;
8e89d13f
VG
553
554 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
555 if (wait)
556 *wait = 0;
557 return 1;
558 }
559
8e89d13f
VG
560 /* Calc approx time to dispatch */
561 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
562
563 if (jiffy_wait > jiffy_elapsed)
564 jiffy_wait = jiffy_wait - jiffy_elapsed;
565 else
566 jiffy_wait = 1;
567
568 if (wait)
569 *wait = jiffy_wait;
570 return 0;
571}
572
573static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
574 struct bio *bio, unsigned long *wait)
575{
576 bool rw = bio_data_dir(bio);
3aad5d3e 577 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 578 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
579
580 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
581
582 /* Slice has just started. Consider one slice interval */
583 if (!jiffy_elapsed)
584 jiffy_elapsed_rnd = throtl_slice;
585
586 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
587
5e901a2b
VG
588 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
589 do_div(tmp, HZ);
3aad5d3e 590 bytes_allowed = tmp;
e43473b7
VG
591
592 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
593 if (wait)
594 *wait = 0;
595 return 1;
596 }
597
598 /* Calc approx time to dispatch */
599 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
600 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
601
602 if (!jiffy_wait)
603 jiffy_wait = 1;
604
605 /*
606 * This wait time is without taking into consideration the rounding
607 * up we did. Add that time also.
608 */
609 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
610 if (wait)
611 *wait = jiffy_wait;
8e89d13f
VG
612 return 0;
613}
614
af75cd3c
VG
615static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
616 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
617 return 1;
618 return 0;
619}
620
8e89d13f
VG
621/*
622 * Returns whether one can dispatch a bio or not. Also returns approx number
623 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
624 */
625static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
626 struct bio *bio, unsigned long *wait)
627{
628 bool rw = bio_data_dir(bio);
629 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
630
631 /*
632 * Currently whole state machine of group depends on first bio
633 * queued in the group bio list. So one should not be calling
634 * this function with a different bio if there are other bios
635 * queued.
636 */
637 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
e43473b7 638
8e89d13f
VG
639 /* If tg->bps = -1, then BW is unlimited */
640 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
641 if (wait)
642 *wait = 0;
643 return 1;
644 }
645
646 /*
647 * If previous slice expired, start a new one otherwise renew/extend
648 * existing slice to make sure it is at least throtl_slice interval
649 * long since now.
650 */
651 if (throtl_slice_used(td, tg, rw))
652 throtl_start_new_slice(td, tg, rw);
653 else {
654 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
655 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
656 }
657
658 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
659 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
660 if (wait)
661 *wait = 0;
662 return 1;
663 }
664
665 max_wait = max(bps_wait, iops_wait);
666
667 if (wait)
668 *wait = max_wait;
669
670 if (time_before(tg->slice_end[rw], jiffies + max_wait))
671 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
e43473b7
VG
672
673 return 0;
674}
675
3c798398 676static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
629ed0b1
TH
677 int rw)
678{
8a3d2615
TH
679 struct throtl_grp *tg = blkg_to_tg(blkg);
680 struct tg_stats_cpu *stats_cpu;
629ed0b1
TH
681 unsigned long flags;
682
683 /* If per cpu stats are not allocated yet, don't do any accounting. */
8a3d2615 684 if (tg->stats_cpu == NULL)
629ed0b1
TH
685 return;
686
687 /*
688 * Disabling interrupts to provide mutual exclusion between two
689 * writes on same cpu. It probably is not needed for 64bit. Not
690 * optimizing that case yet.
691 */
692 local_irq_save(flags);
693
8a3d2615 694 stats_cpu = this_cpu_ptr(tg->stats_cpu);
629ed0b1 695
629ed0b1
TH
696 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
697 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
698
699 local_irq_restore(flags);
700}
701
e43473b7
VG
702static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
703{
704 bool rw = bio_data_dir(bio);
e43473b7
VG
705
706 /* Charge the bio to the group */
707 tg->bytes_disp[rw] += bio->bi_size;
8e89d13f 708 tg->io_disp[rw]++;
e43473b7 709
629ed0b1 710 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
e43473b7
VG
711}
712
713static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
714 struct bio *bio)
715{
716 bool rw = bio_data_dir(bio);
717
718 bio_list_add(&tg->bio_lists[rw], bio);
719 /* Take a bio reference on tg */
1adaf3dd 720 blkg_get(tg_to_blkg(tg));
e43473b7
VG
721 tg->nr_queued[rw]++;
722 td->nr_queued[rw]++;
723 throtl_enqueue_tg(td, tg);
724}
725
726static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
727{
728 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
729 struct bio *bio;
730
731 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
732 tg_may_dispatch(td, tg, bio, &read_wait);
733
734 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
735 tg_may_dispatch(td, tg, bio, &write_wait);
736
737 min_wait = min(read_wait, write_wait);
738 disptime = jiffies + min_wait;
739
e43473b7
VG
740 /* Update dispatch time */
741 throtl_dequeue_tg(td, tg);
742 tg->disptime = disptime;
743 throtl_enqueue_tg(td, tg);
744}
745
746static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
747 bool rw, struct bio_list *bl)
748{
749 struct bio *bio;
750
751 bio = bio_list_pop(&tg->bio_lists[rw]);
752 tg->nr_queued[rw]--;
1adaf3dd
TH
753 /* Drop bio reference on blkg */
754 blkg_put(tg_to_blkg(tg));
e43473b7
VG
755
756 BUG_ON(td->nr_queued[rw] <= 0);
757 td->nr_queued[rw]--;
758
759 throtl_charge_bio(tg, bio);
760 bio_list_add(bl, bio);
761 bio->bi_rw |= REQ_THROTTLED;
762
763 throtl_trim_slice(td, tg, rw);
764}
765
766static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
767 struct bio_list *bl)
768{
769 unsigned int nr_reads = 0, nr_writes = 0;
770 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 771 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
772 struct bio *bio;
773
774 /* Try to dispatch 75% READS and 25% WRITES */
775
776 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
777 && tg_may_dispatch(td, tg, bio, NULL)) {
778
779 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
780 nr_reads++;
781
782 if (nr_reads >= max_nr_reads)
783 break;
784 }
785
786 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
787 && tg_may_dispatch(td, tg, bio, NULL)) {
788
789 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
790 nr_writes++;
791
792 if (nr_writes >= max_nr_writes)
793 break;
794 }
795
796 return nr_reads + nr_writes;
797}
798
799static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
800{
801 unsigned int nr_disp = 0;
802 struct throtl_grp *tg;
803 struct throtl_rb_root *st = &td->tg_service_tree;
804
805 while (1) {
806 tg = throtl_rb_first(st);
807
808 if (!tg)
809 break;
810
811 if (time_before(jiffies, tg->disptime))
812 break;
813
814 throtl_dequeue_tg(td, tg);
815
816 nr_disp += throtl_dispatch_tg(td, tg, bl);
817
818 if (tg->nr_queued[0] || tg->nr_queued[1]) {
819 tg_update_disptime(td, tg);
820 throtl_enqueue_tg(td, tg);
821 }
822
823 if (nr_disp >= throtl_quantum)
824 break;
825 }
826
827 return nr_disp;
828}
829
fe071437
VG
830static void throtl_process_limit_change(struct throtl_data *td)
831{
4eef3049 832 struct request_queue *q = td->queue;
3c798398 833 struct blkcg_gq *blkg, *n;
fe071437 834
de701c74 835 if (!td->limits_changed)
fe071437
VG
836 return;
837
de701c74 838 xchg(&td->limits_changed, false);
fe071437 839
de701c74 840 throtl_log(td, "limits changed");
fe071437 841
e8989fae 842 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
4eef3049
TH
843 struct throtl_grp *tg = blkg_to_tg(blkg);
844
de701c74
VG
845 if (!tg->limits_changed)
846 continue;
847
848 if (!xchg(&tg->limits_changed, false))
849 continue;
850
851 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
852 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
853 tg->iops[READ], tg->iops[WRITE]);
854
04521db0
VG
855 /*
856 * Restart the slices for both READ and WRITES. It
857 * might happen that a group's limit are dropped
858 * suddenly and we don't want to account recently
859 * dispatched IO with new low rate
860 */
861 throtl_start_new_slice(td, tg, 0);
862 throtl_start_new_slice(td, tg, 1);
863
de701c74 864 if (throtl_tg_on_rr(tg))
fe071437 865 tg_update_disptime(td, tg);
fe071437 866 }
fe071437
VG
867}
868
e43473b7
VG
869/* Dispatch throttled bios. Should be called without queue lock held. */
870static int throtl_dispatch(struct request_queue *q)
871{
872 struct throtl_data *td = q->td;
873 unsigned int nr_disp = 0;
874 struct bio_list bio_list_on_stack;
875 struct bio *bio;
69d60eb9 876 struct blk_plug plug;
e43473b7
VG
877
878 spin_lock_irq(q->queue_lock);
879
fe071437
VG
880 throtl_process_limit_change(td);
881
e43473b7
VG
882 if (!total_nr_queued(td))
883 goto out;
884
885 bio_list_init(&bio_list_on_stack);
886
d2f31a5f 887 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
e43473b7
VG
888 total_nr_queued(td), td->nr_queued[READ],
889 td->nr_queued[WRITE]);
890
891 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
892
893 if (nr_disp)
894 throtl_log(td, "bios disp=%u", nr_disp);
895
896 throtl_schedule_next_dispatch(td);
897out:
898 spin_unlock_irq(q->queue_lock);
899
900 /*
901 * If we dispatched some requests, unplug the queue to make sure
902 * immediate dispatch
903 */
904 if (nr_disp) {
69d60eb9 905 blk_start_plug(&plug);
e43473b7
VG
906 while((bio = bio_list_pop(&bio_list_on_stack)))
907 generic_make_request(bio);
69d60eb9 908 blk_finish_plug(&plug);
e43473b7
VG
909 }
910 return nr_disp;
911}
912
913void blk_throtl_work(struct work_struct *work)
914{
915 struct throtl_data *td = container_of(work, struct throtl_data,
916 throtl_work.work);
917 struct request_queue *q = td->queue;
918
919 throtl_dispatch(q);
920}
921
922/* Call with queue lock held */
450adcbe
VG
923static void
924throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
e43473b7
VG
925{
926
e43473b7
VG
927 struct delayed_work *dwork = &td->throtl_work;
928
04521db0 929 /* schedule work if limits changed even if no bio is queued */
d2f31a5f 930 if (total_nr_queued(td) || td->limits_changed) {
e43473b7
VG
931 /*
932 * We might have a work scheduled to be executed in future.
933 * Cancel that and schedule a new one.
934 */
935 __cancel_delayed_work(dwork);
450adcbe 936 queue_delayed_work(kthrotld_workqueue, dwork, delay);
e43473b7
VG
937 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
938 delay, jiffies);
939 }
940}
e43473b7 941
f95a04af
TH
942static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
943 struct blkg_policy_data *pd, int off)
41b38b6d 944{
f95a04af 945 struct throtl_grp *tg = pd_to_tg(pd);
41b38b6d
TH
946 struct blkg_rwstat rwstat = { }, tmp;
947 int i, cpu;
948
949 for_each_possible_cpu(cpu) {
8a3d2615 950 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
41b38b6d
TH
951
952 tmp = blkg_rwstat_read((void *)sc + off);
953 for (i = 0; i < BLKG_RWSTAT_NR; i++)
954 rwstat.cnt[i] += tmp.cnt[i];
955 }
956
f95a04af 957 return __blkg_prfill_rwstat(sf, pd, &rwstat);
41b38b6d
TH
958}
959
8a3d2615
TH
960static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
961 struct seq_file *sf)
41b38b6d 962{
3c798398 963 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
41b38b6d 964
3c798398 965 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
5bc4afb1 966 cft->private, true);
41b38b6d
TH
967 return 0;
968}
969
f95a04af
TH
970static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
971 int off)
60c2bc2d 972{
f95a04af
TH
973 struct throtl_grp *tg = pd_to_tg(pd);
974 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 975
af133ceb 976 if (v == -1)
60c2bc2d 977 return 0;
f95a04af 978 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
979}
980
f95a04af
TH
981static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
982 int off)
e43473b7 983{
f95a04af
TH
984 struct throtl_grp *tg = pd_to_tg(pd);
985 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 986
af133ceb
TH
987 if (v == -1)
988 return 0;
f95a04af 989 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
990}
991
af133ceb
TH
992static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
993 struct seq_file *sf)
8e89d13f 994{
3c798398
TH
995 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
996 &blkcg_policy_throtl, cft->private, false);
af133ceb 997 return 0;
8e89d13f
VG
998}
999
af133ceb
TH
1000static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1001 struct seq_file *sf)
8e89d13f 1002{
3c798398
TH
1003 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
1004 &blkcg_policy_throtl, cft->private, false);
af133ceb 1005 return 0;
60c2bc2d
TH
1006}
1007
af133ceb
TH
1008static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1009 bool is_u64)
60c2bc2d 1010{
3c798398 1011 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
60c2bc2d 1012 struct blkg_conf_ctx ctx;
af133ceb 1013 struct throtl_grp *tg;
a2b1693b 1014 struct throtl_data *td;
60c2bc2d
TH
1015 int ret;
1016
3c798398 1017 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
60c2bc2d
TH
1018 if (ret)
1019 return ret;
1020
af133ceb 1021 tg = blkg_to_tg(ctx.blkg);
a2b1693b 1022 td = ctx.blkg->q->td;
af133ceb 1023
a2b1693b
TH
1024 if (!ctx.v)
1025 ctx.v = -1;
af133ceb 1026
a2b1693b
TH
1027 if (is_u64)
1028 *(u64 *)((void *)tg + cft->private) = ctx.v;
1029 else
1030 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
af133ceb 1031
a2b1693b
TH
1032 /* XXX: we don't need the following deferred processing */
1033 xchg(&tg->limits_changed, true);
1034 xchg(&td->limits_changed, true);
1035 throtl_schedule_delayed_work(td, 0);
60c2bc2d
TH
1036
1037 blkg_conf_finish(&ctx);
a2b1693b 1038 return 0;
8e89d13f
VG
1039}
1040
af133ceb
TH
1041static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1042 const char *buf)
60c2bc2d 1043{
af133ceb 1044 return tg_set_conf(cgrp, cft, buf, true);
60c2bc2d
TH
1045}
1046
af133ceb
TH
1047static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1048 const char *buf)
60c2bc2d 1049{
af133ceb 1050 return tg_set_conf(cgrp, cft, buf, false);
60c2bc2d
TH
1051}
1052
1053static struct cftype throtl_files[] = {
1054 {
1055 .name = "throttle.read_bps_device",
af133ceb
TH
1056 .private = offsetof(struct throtl_grp, bps[READ]),
1057 .read_seq_string = tg_print_conf_u64,
1058 .write_string = tg_set_conf_u64,
60c2bc2d
TH
1059 .max_write_len = 256,
1060 },
1061 {
1062 .name = "throttle.write_bps_device",
af133ceb
TH
1063 .private = offsetof(struct throtl_grp, bps[WRITE]),
1064 .read_seq_string = tg_print_conf_u64,
1065 .write_string = tg_set_conf_u64,
60c2bc2d
TH
1066 .max_write_len = 256,
1067 },
1068 {
1069 .name = "throttle.read_iops_device",
af133ceb
TH
1070 .private = offsetof(struct throtl_grp, iops[READ]),
1071 .read_seq_string = tg_print_conf_uint,
1072 .write_string = tg_set_conf_uint,
60c2bc2d
TH
1073 .max_write_len = 256,
1074 },
1075 {
1076 .name = "throttle.write_iops_device",
af133ceb
TH
1077 .private = offsetof(struct throtl_grp, iops[WRITE]),
1078 .read_seq_string = tg_print_conf_uint,
1079 .write_string = tg_set_conf_uint,
60c2bc2d
TH
1080 .max_write_len = 256,
1081 },
1082 {
1083 .name = "throttle.io_service_bytes",
5bc4afb1 1084 .private = offsetof(struct tg_stats_cpu, service_bytes),
8a3d2615 1085 .read_seq_string = tg_print_cpu_rwstat,
60c2bc2d
TH
1086 },
1087 {
1088 .name = "throttle.io_serviced",
5bc4afb1 1089 .private = offsetof(struct tg_stats_cpu, serviced),
8a3d2615 1090 .read_seq_string = tg_print_cpu_rwstat,
60c2bc2d
TH
1091 },
1092 { } /* terminate */
1093};
1094
da527770 1095static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1096{
1097 struct throtl_data *td = q->td;
1098
1099 cancel_delayed_work_sync(&td->throtl_work);
1100}
1101
3c798398 1102static struct blkcg_policy blkcg_policy_throtl = {
e43473b7 1103 .ops = {
3c798398
TH
1104 .pd_init_fn = throtl_pd_init,
1105 .pd_exit_fn = throtl_pd_exit,
1106 .pd_reset_stats_fn = throtl_pd_reset_stats,
e43473b7 1107 },
f95a04af 1108 .pd_size = sizeof(struct throtl_grp),
60c2bc2d 1109 .cftypes = throtl_files,
e43473b7
VG
1110};
1111
bc16a4f9 1112bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
e43473b7
VG
1113{
1114 struct throtl_data *td = q->td;
1115 struct throtl_grp *tg;
e43473b7 1116 bool rw = bio_data_dir(bio), update_disptime = true;
3c798398 1117 struct blkcg *blkcg;
bc16a4f9 1118 bool throttled = false;
e43473b7
VG
1119
1120 if (bio->bi_rw & REQ_THROTTLED) {
1121 bio->bi_rw &= ~REQ_THROTTLED;
bc16a4f9 1122 goto out;
e43473b7
VG
1123 }
1124
671058fb
TH
1125 /* bio_associate_current() needs ioc, try creating */
1126 create_io_context(GFP_ATOMIC, q->node);
1127
af75cd3c
VG
1128 /*
1129 * A throtl_grp pointer retrieved under rcu can be used to access
1130 * basic fields like stats and io rates. If a group has no rules,
1131 * just update the dispatch stats in lockless manner and return.
1132 */
af75cd3c 1133 rcu_read_lock();
3c798398 1134 blkcg = bio_blkcg(bio);
cd1604fa 1135 tg = throtl_lookup_tg(td, blkcg);
af75cd3c 1136 if (tg) {
af75cd3c 1137 if (tg_no_rule_group(tg, rw)) {
629ed0b1
TH
1138 throtl_update_dispatch_stats(tg_to_blkg(tg),
1139 bio->bi_size, bio->bi_rw);
2a7f1244 1140 goto out_unlock_rcu;
af75cd3c
VG
1141 }
1142 }
af75cd3c
VG
1143
1144 /*
1145 * Either group has not been allocated yet or it is not an unlimited
1146 * IO group
1147 */
e43473b7 1148 spin_lock_irq(q->queue_lock);
cd1604fa 1149 tg = throtl_lookup_create_tg(td, blkcg);
bc16a4f9
TH
1150 if (unlikely(!tg))
1151 goto out_unlock;
f469a7b4 1152
e43473b7
VG
1153 if (tg->nr_queued[rw]) {
1154 /*
1155 * There is already another bio queued in same dir. No
1156 * need to update dispatch time.
1157 */
231d704b 1158 update_disptime = false;
e43473b7 1159 goto queue_bio;
de701c74 1160
e43473b7
VG
1161 }
1162
1163 /* Bio is with-in rate limit of group */
1164 if (tg_may_dispatch(td, tg, bio, NULL)) {
1165 throtl_charge_bio(tg, bio);
04521db0
VG
1166
1167 /*
1168 * We need to trim slice even when bios are not being queued
1169 * otherwise it might happen that a bio is not queued for
1170 * a long time and slice keeps on extending and trim is not
1171 * called for a long time. Now if limits are reduced suddenly
1172 * we take into account all the IO dispatched so far at new
1173 * low rate and * newly queued IO gets a really long dispatch
1174 * time.
1175 *
1176 * So keep on trimming slice even if bio is not queued.
1177 */
1178 throtl_trim_slice(td, tg, rw);
bc16a4f9 1179 goto out_unlock;
e43473b7
VG
1180 }
1181
1182queue_bio:
fd16d263 1183 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
8e89d13f
VG
1184 " iodisp=%u iops=%u queued=%d/%d",
1185 rw == READ ? 'R' : 'W',
e43473b7 1186 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
8e89d13f 1187 tg->io_disp[rw], tg->iops[rw],
e43473b7
VG
1188 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1189
671058fb 1190 bio_associate_current(bio);
e43473b7 1191 throtl_add_bio_tg(q->td, tg, bio);
bc16a4f9 1192 throttled = true;
e43473b7
VG
1193
1194 if (update_disptime) {
1195 tg_update_disptime(td, tg);
1196 throtl_schedule_next_dispatch(td);
1197 }
1198
bc16a4f9 1199out_unlock:
e43473b7 1200 spin_unlock_irq(q->queue_lock);
2a7f1244
TH
1201out_unlock_rcu:
1202 rcu_read_unlock();
bc16a4f9
TH
1203out:
1204 return throttled;
e43473b7
VG
1205}
1206
c9a929dd
TH
1207/**
1208 * blk_throtl_drain - drain throttled bios
1209 * @q: request_queue to drain throttled bios for
1210 *
1211 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1212 */
1213void blk_throtl_drain(struct request_queue *q)
1214 __releases(q->queue_lock) __acquires(q->queue_lock)
1215{
1216 struct throtl_data *td = q->td;
1217 struct throtl_rb_root *st = &td->tg_service_tree;
1218 struct throtl_grp *tg;
1219 struct bio_list bl;
1220 struct bio *bio;
1221
334c2b0b 1222 WARN_ON_ONCE(!queue_is_locked(q));
c9a929dd
TH
1223
1224 bio_list_init(&bl);
1225
1226 while ((tg = throtl_rb_first(st))) {
1227 throtl_dequeue_tg(td, tg);
1228
1229 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1230 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1231 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1232 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1233 }
1234 spin_unlock_irq(q->queue_lock);
1235
1236 while ((bio = bio_list_pop(&bl)))
1237 generic_make_request(bio);
1238
1239 spin_lock_irq(q->queue_lock);
1240}
1241
e43473b7
VG
1242int blk_throtl_init(struct request_queue *q)
1243{
1244 struct throtl_data *td;
a2b1693b 1245 int ret;
e43473b7
VG
1246
1247 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1248 if (!td)
1249 return -ENOMEM;
1250
e43473b7 1251 td->tg_service_tree = THROTL_RB_ROOT;
de701c74 1252 td->limits_changed = false;
a29a171e 1253 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
e43473b7 1254
cd1604fa 1255 q->td = td;
29b12589 1256 td->queue = q;
02977e4a 1257
a2b1693b 1258 /* activate policy */
3c798398 1259 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
a2b1693b 1260 if (ret)
f51b802c 1261 kfree(td);
a2b1693b 1262 return ret;
e43473b7
VG
1263}
1264
1265void blk_throtl_exit(struct request_queue *q)
1266{
c875f4d0 1267 BUG_ON(!q->td);
da527770 1268 throtl_shutdown_wq(q);
3c798398 1269 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
c9a929dd 1270 kfree(q->td);
e43473b7
VG
1271}
1272
1273static int __init throtl_init(void)
1274{
450adcbe
VG
1275 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1276 if (!kthrotld_workqueue)
1277 panic("Failed to create kthrotld\n");
1278
3c798398 1279 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
1280}
1281
1282module_init(throtl_init);