]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - block/blk-cgroup.c
blkcg: add request_queue->root_blkg
[mirror_ubuntu-zesty-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190 14#include <linux/kdev_t.h>
9d6a986c 15#include <linux/module.h>
accee785 16#include <linux/err.h>
9195291e 17#include <linux/blkdev.h>
5a0e3ad6 18#include <linux/slab.h>
34d0f179 19#include <linux/genhd.h>
72e06c25 20#include <linux/delay.h>
9a9e8a26 21#include <linux/atomic.h>
72e06c25 22#include "blk-cgroup.h"
5efd6113 23#include "blk.h"
3e252066 24
84c124da
DS
25#define MAX_KEY_LEN 100
26
bc0d6501 27static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1
TH
28static DEFINE_MUTEX(all_q_mutex);
29static LIST_HEAD(all_q_list);
30
3381cb8d 31struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
9d6a986c
VG
32EXPORT_SYMBOL_GPL(blkio_root_cgroup);
33
8bd435b3 34static struct blkio_policy_type *blkio_policy[BLKCG_MAX_POLS];
035d10b2 35
31e4c28d
VG
36struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
37{
38 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
39 struct blkio_cgroup, css);
40}
9d6a986c 41EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 42
4f85cb96 43static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
70087dc3
VG
44{
45 return container_of(task_subsys_state(tsk, blkio_subsys_id),
46 struct blkio_cgroup, css);
47}
4f85cb96
TH
48
49struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
50{
51 if (bio && bio->bi_css)
52 return container_of(bio->bi_css, struct blkio_cgroup, css);
53 return task_blkio_cgroup(current);
54}
55EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
70087dc3 56
0381411e
TH
57/**
58 * blkg_free - free a blkg
59 * @blkg: blkg to free
60 *
61 * Free @blkg which may be partially allocated.
62 */
63static void blkg_free(struct blkio_group *blkg)
64{
e8989fae 65 int i;
549d3aa8
TH
66
67 if (!blkg)
68 return;
69
8bd435b3 70 for (i = 0; i < BLKCG_MAX_POLS; i++) {
9ade5ea4 71 struct blkio_policy_type *pol = blkio_policy[i];
e8989fae
TH
72 struct blkg_policy_data *pd = blkg->pd[i];
73
9ade5ea4
TH
74 if (!pd)
75 continue;
76
77 if (pol && pol->ops.blkio_exit_group_fn)
78 pol->ops.blkio_exit_group_fn(blkg);
79
9ade5ea4 80 kfree(pd);
0381411e 81 }
e8989fae 82
549d3aa8 83 kfree(blkg);
0381411e
TH
84}
85
86/**
87 * blkg_alloc - allocate a blkg
88 * @blkcg: block cgroup the new blkg is associated with
89 * @q: request_queue the new blkg is associated with
0381411e 90 *
e8989fae 91 * Allocate a new blkg assocating @blkcg and @q.
0381411e
TH
92 */
93static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
e8989fae 94 struct request_queue *q)
0381411e
TH
95{
96 struct blkio_group *blkg;
e8989fae 97 int i;
0381411e
TH
98
99 /* alloc and init base part */
100 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
101 if (!blkg)
102 return NULL;
103
c875f4d0 104 blkg->q = q;
e8989fae 105 INIT_LIST_HEAD(&blkg->q_node);
0381411e 106 blkg->blkcg = blkcg;
1adaf3dd 107 blkg->refcnt = 1;
0381411e
TH
108 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
109
8bd435b3 110 for (i = 0; i < BLKCG_MAX_POLS; i++) {
e8989fae
TH
111 struct blkio_policy_type *pol = blkio_policy[i];
112 struct blkg_policy_data *pd;
0381411e 113
e8989fae
TH
114 if (!pol)
115 continue;
116
117 /* alloc per-policy data and attach it to blkg */
118 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
119 q->node);
120 if (!pd) {
121 blkg_free(blkg);
122 return NULL;
123 }
549d3aa8 124
e8989fae
TH
125 blkg->pd[i] = pd;
126 pd->blkg = blkg;
0381411e
TH
127 }
128
549d3aa8 129 /* invoke per-policy init */
8bd435b3 130 for (i = 0; i < BLKCG_MAX_POLS; i++) {
e8989fae
TH
131 struct blkio_policy_type *pol = blkio_policy[i];
132
133 if (pol)
134 pol->ops.blkio_init_group_fn(blkg);
135 }
136
0381411e
TH
137 return blkg;
138}
139
80fd9979
TH
140static struct blkio_group *__blkg_lookup(struct blkio_cgroup *blkcg,
141 struct request_queue *q)
142{
143 struct blkio_group *blkg;
144 struct hlist_node *n;
145
146 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
147 if (blkg->q == q)
148 return blkg;
149 return NULL;
150}
151
152/**
153 * blkg_lookup - lookup blkg for the specified blkcg - q pair
154 * @blkcg: blkcg of interest
155 * @q: request_queue of interest
156 *
157 * Lookup blkg for the @blkcg - @q pair. This function should be called
158 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
159 * - see blk_queue_bypass_start() for details.
160 */
161struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
162 struct request_queue *q)
163{
164 WARN_ON_ONCE(!rcu_read_lock_held());
165
166 if (unlikely(blk_queue_bypass(q)))
167 return NULL;
168 return __blkg_lookup(blkcg, q);
169}
170EXPORT_SYMBOL_GPL(blkg_lookup);
171
cd1604fa
TH
172struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
173 struct request_queue *q,
cd1604fa
TH
174 bool for_root)
175 __releases(q->queue_lock) __acquires(q->queue_lock)
5624a4e4 176{
1cd9e039 177 struct blkio_group *blkg;
5624a4e4 178
cd1604fa
TH
179 WARN_ON_ONCE(!rcu_read_lock_held());
180 lockdep_assert_held(q->queue_lock);
181
182 /*
183 * This could be the first entry point of blkcg implementation and
184 * we shouldn't allow anything to go through for a bypassing queue.
cd1604fa
TH
185 */
186 if (unlikely(blk_queue_bypass(q)) && !for_root)
187 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
188
80fd9979 189 blkg = __blkg_lookup(blkcg, q);
cd1604fa
TH
190 if (blkg)
191 return blkg;
192
7ee9c562 193 /* blkg holds a reference to blkcg */
cd1604fa
TH
194 if (!css_tryget(&blkcg->css))
195 return ERR_PTR(-EINVAL);
196
197 /*
198 * Allocate and initialize.
cd1604fa 199 */
1cd9e039 200 blkg = blkg_alloc(blkcg, q);
cd1604fa
TH
201
202 /* did alloc fail? */
1cd9e039 203 if (unlikely(!blkg)) {
cd1604fa
TH
204 blkg = ERR_PTR(-ENOMEM);
205 goto out;
206 }
207
208 /* insert */
209 spin_lock(&blkcg->lock);
31e4c28d 210 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
e8989fae 211 list_add(&blkg->q_node, &q->blkg_list);
cd1604fa
TH
212 spin_unlock(&blkcg->lock);
213out:
cd1604fa 214 return blkg;
31e4c28d 215}
cd1604fa 216EXPORT_SYMBOL_GPL(blkg_lookup_create);
31e4c28d 217
e8989fae 218static void blkg_destroy(struct blkio_group *blkg)
03aa264a
TH
219{
220 struct request_queue *q = blkg->q;
9f13ef67 221 struct blkio_cgroup *blkcg = blkg->blkcg;
03aa264a
TH
222
223 lockdep_assert_held(q->queue_lock);
9f13ef67 224 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
225
226 /* Something wrong if we are trying to remove same group twice */
e8989fae 227 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 228 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
e8989fae 229 list_del_init(&blkg->q_node);
9f13ef67 230 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 231
03aa264a
TH
232 /*
233 * Put the reference taken at the time of creation so that when all
234 * queues are gone, group can be destroyed.
235 */
236 blkg_put(blkg);
237}
238
e8989fae
TH
239/*
240 * XXX: This updates blkg policy data in-place for root blkg, which is
241 * necessary across elevator switch and policy registration as root blkgs
242 * aren't shot down. This broken and racy implementation is temporary.
243 * Eventually, blkg shoot down will be replaced by proper in-place update.
244 */
ec399347
TH
245void update_root_blkg_pd(struct request_queue *q,
246 const struct blkio_policy_type *pol)
e8989fae 247{
e8989fae
TH
248 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
249 struct blkg_policy_data *pd;
250
251 if (!blkg)
252 return;
253
ec399347
TH
254 kfree(blkg->pd[pol->plid]);
255 blkg->pd[pol->plid] = NULL;
e8989fae
TH
256
257 if (!pol)
258 return;
259
260 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
261 WARN_ON_ONCE(!pd);
262
ec399347 263 blkg->pd[pol->plid] = pd;
e8989fae
TH
264 pd->blkg = blkg;
265 pol->ops.blkio_init_group_fn(blkg);
266}
267EXPORT_SYMBOL_GPL(update_root_blkg_pd);
268
9f13ef67
TH
269/**
270 * blkg_destroy_all - destroy all blkgs associated with a request_queue
271 * @q: request_queue of interest
272 * @destroy_root: whether to destroy root blkg or not
273 *
274 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
275 * destroyed; otherwise, root blkg is left alone.
276 */
e8989fae 277void blkg_destroy_all(struct request_queue *q, bool destroy_root)
72e06c25 278{
03aa264a 279 struct blkio_group *blkg, *n;
72e06c25 280
9f13ef67 281 spin_lock_irq(q->queue_lock);
72e06c25 282
9f13ef67
TH
283 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
284 struct blkio_cgroup *blkcg = blkg->blkcg;
72e06c25 285
9f13ef67
TH
286 /* skip root? */
287 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
288 continue;
72e06c25 289
9f13ef67
TH
290 spin_lock(&blkcg->lock);
291 blkg_destroy(blkg);
292 spin_unlock(&blkcg->lock);
72e06c25 293 }
9f13ef67
TH
294
295 spin_unlock_irq(q->queue_lock);
72e06c25 296}
03aa264a 297EXPORT_SYMBOL_GPL(blkg_destroy_all);
72e06c25 298
1adaf3dd
TH
299static void blkg_rcu_free(struct rcu_head *rcu_head)
300{
301 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
302}
303
304void __blkg_release(struct blkio_group *blkg)
305{
306 /* release the extra blkcg reference this blkg has been holding */
307 css_put(&blkg->blkcg->css);
308
309 /*
310 * A group is freed in rcu manner. But having an rcu lock does not
311 * mean that one can access all the fields of blkg and assume these
312 * are valid. For example, don't try to follow throtl_data and
313 * request queue links.
314 *
315 * Having a reference to blkg under an rcu allows acess to only
316 * values local to groups like group stats and group rate limits
317 */
318 call_rcu(&blkg->rcu_head, blkg_rcu_free);
319}
320EXPORT_SYMBOL_GPL(__blkg_release);
321
303a3acb 322static int
84c124da 323blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb 324{
997a026c 325 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
303a3acb
DS
326 struct blkio_group *blkg;
327 struct hlist_node *n;
bc0d6501 328 int i;
303a3acb 329
bc0d6501 330 mutex_lock(&blkcg_pol_mutex);
303a3acb 331 spin_lock_irq(&blkcg->lock);
997a026c
TH
332
333 /*
334 * Note that stat reset is racy - it doesn't synchronize against
335 * stat updates. This is a debug feature which shouldn't exist
336 * anyway. If you get hit by a race, retry.
337 */
303a3acb 338 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
8bd435b3 339 for (i = 0; i < BLKCG_MAX_POLS; i++) {
bc0d6501 340 struct blkio_policy_type *pol = blkio_policy[i];
549d3aa8 341
bc0d6501 342 if (pol && pol->ops.blkio_reset_group_stats_fn)
9ade5ea4 343 pol->ops.blkio_reset_group_stats_fn(blkg);
bc0d6501 344 }
303a3acb 345 }
f0bdc8cd 346
303a3acb 347 spin_unlock_irq(&blkcg->lock);
bc0d6501 348 mutex_unlock(&blkcg_pol_mutex);
303a3acb
DS
349 return 0;
350}
351
d3d32e69 352static const char *blkg_dev_name(struct blkio_group *blkg)
303a3acb 353{
d3d32e69
TH
354 /* some drivers (floppy) instantiate a queue w/o disk registered */
355 if (blkg->q->backing_dev_info.dev)
356 return dev_name(blkg->q->backing_dev_info.dev);
357 return NULL;
303a3acb
DS
358}
359
d3d32e69
TH
360/**
361 * blkcg_print_blkgs - helper for printing per-blkg data
362 * @sf: seq_file to print to
363 * @blkcg: blkcg of interest
364 * @prfill: fill function to print out a blkg
365 * @pol: policy in question
366 * @data: data to be passed to @prfill
367 * @show_total: to print out sum of prfill return values or not
368 *
369 * This function invokes @prfill on each blkg of @blkcg if pd for the
370 * policy specified by @pol exists. @prfill is invoked with @sf, the
371 * policy data and @data. If @show_total is %true, the sum of the return
372 * values from @prfill is printed with "Total" label at the end.
373 *
374 * This is to be used to construct print functions for
375 * cftype->read_seq_string method.
376 */
829fdb50 377void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
d366e7ec 378 u64 (*prfill)(struct seq_file *, void *, int),
ec399347
TH
379 const struct blkio_policy_type *pol, int data,
380 bool show_total)
5624a4e4 381{
d3d32e69
TH
382 struct blkio_group *blkg;
383 struct hlist_node *n;
384 u64 total = 0;
5624a4e4 385
d3d32e69
TH
386 spin_lock_irq(&blkcg->lock);
387 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
ec399347
TH
388 if (blkg->pd[pol->plid])
389 total += prfill(sf, blkg->pd[pol->plid]->pdata, data);
d3d32e69
TH
390 spin_unlock_irq(&blkcg->lock);
391
392 if (show_total)
393 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
394}
829fdb50 395EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
396
397/**
398 * __blkg_prfill_u64 - prfill helper for a single u64 value
399 * @sf: seq_file to print to
d366e7ec 400 * @pdata: policy private data of interest
d3d32e69
TH
401 * @v: value to print
402 *
d366e7ec 403 * Print @v to @sf for the device assocaited with @pdata.
d3d32e69 404 */
d366e7ec 405u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v)
d3d32e69 406{
d366e7ec 407 const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
d3d32e69
TH
408
409 if (!dname)
410 return 0;
411
412 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
413 return v;
414}
829fdb50 415EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69
TH
416
417/**
418 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
419 * @sf: seq_file to print to
d366e7ec 420 * @pdata: policy private data of interest
d3d32e69
TH
421 * @rwstat: rwstat to print
422 *
d366e7ec 423 * Print @rwstat to @sf for the device assocaited with @pdata.
d3d32e69 424 */
d366e7ec 425u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata,
829fdb50 426 const struct blkg_rwstat *rwstat)
d3d32e69
TH
427{
428 static const char *rwstr[] = {
429 [BLKG_RWSTAT_READ] = "Read",
430 [BLKG_RWSTAT_WRITE] = "Write",
431 [BLKG_RWSTAT_SYNC] = "Sync",
432 [BLKG_RWSTAT_ASYNC] = "Async",
433 };
d366e7ec 434 const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
d3d32e69
TH
435 u64 v;
436 int i;
437
438 if (!dname)
439 return 0;
440
441 for (i = 0; i < BLKG_RWSTAT_NR; i++)
442 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
443 (unsigned long long)rwstat->cnt[i]);
444
445 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
446 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
447 return v;
448}
449
5bc4afb1
TH
450/**
451 * blkg_prfill_stat - prfill callback for blkg_stat
452 * @sf: seq_file to print to
453 * @pdata: policy private data of interest
454 * @off: offset to the blkg_stat in @pdata
455 *
456 * prfill callback for printing a blkg_stat.
457 */
458u64 blkg_prfill_stat(struct seq_file *sf, void *pdata, int off)
d3d32e69 459{
d366e7ec 460 return __blkg_prfill_u64(sf, pdata, blkg_stat_read(pdata + off));
d3d32e69 461}
5bc4afb1 462EXPORT_SYMBOL_GPL(blkg_prfill_stat);
d3d32e69 463
5bc4afb1
TH
464/**
465 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
466 * @sf: seq_file to print to
467 * @pdata: policy private data of interest
468 * @off: offset to the blkg_rwstat in @pdata
469 *
470 * prfill callback for printing a blkg_rwstat.
471 */
472u64 blkg_prfill_rwstat(struct seq_file *sf, void *pdata, int off)
d3d32e69 473{
d366e7ec 474 struct blkg_rwstat rwstat = blkg_rwstat_read(pdata + off);
d3d32e69 475
d366e7ec 476 return __blkg_prfill_rwstat(sf, pdata, &rwstat);
d3d32e69 477}
5bc4afb1 478EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
d3d32e69 479
3a8b31d3
TH
480/**
481 * blkg_conf_prep - parse and prepare for per-blkg config update
482 * @blkcg: target block cgroup
da8b0662 483 * @pol: target policy
3a8b31d3
TH
484 * @input: input string
485 * @ctx: blkg_conf_ctx to be filled
486 *
487 * Parse per-blkg config update from @input and initialize @ctx with the
488 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
da8b0662
TH
489 * value. This function returns with RCU read lock and queue lock held and
490 * must be paired with blkg_conf_finish().
3a8b31d3 491 */
da8b0662
TH
492int blkg_conf_prep(struct blkio_cgroup *blkcg,
493 const struct blkio_policy_type *pol, const char *input,
829fdb50 494 struct blkg_conf_ctx *ctx)
da8b0662 495 __acquires(rcu) __acquires(disk->queue->queue_lock)
34d0f179 496{
3a8b31d3
TH
497 struct gendisk *disk;
498 struct blkio_group *blkg;
726fa694
TH
499 unsigned int major, minor;
500 unsigned long long v;
501 int part, ret;
34d0f179 502
726fa694
TH
503 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
504 return -EINVAL;
3a8b31d3 505
726fa694 506 disk = get_gendisk(MKDEV(major, minor), &part);
4bfd482e 507 if (!disk || part)
726fa694 508 return -EINVAL;
e56da7e2
TH
509
510 rcu_read_lock();
4bfd482e 511 spin_lock_irq(disk->queue->queue_lock);
da8b0662 512
aaec55a0 513 blkg = blkg_lookup_create(blkcg, disk->queue, false);
e56da7e2 514
4bfd482e
TH
515 if (IS_ERR(blkg)) {
516 ret = PTR_ERR(blkg);
3a8b31d3 517 rcu_read_unlock();
da8b0662 518 spin_unlock_irq(disk->queue->queue_lock);
3a8b31d3
TH
519 put_disk(disk);
520 /*
521 * If queue was bypassing, we should retry. Do so after a
522 * short msleep(). It isn't strictly necessary but queue
523 * can be bypassing for some time and it's always nice to
524 * avoid busy looping.
525 */
526 if (ret == -EBUSY) {
527 msleep(10);
528 ret = restart_syscall();
7702e8f4 529 }
726fa694 530 return ret;
062a644d 531 }
3a8b31d3
TH
532
533 ctx->disk = disk;
534 ctx->blkg = blkg;
726fa694
TH
535 ctx->v = v;
536 return 0;
34d0f179 537}
829fdb50 538EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 539
3a8b31d3
TH
540/**
541 * blkg_conf_finish - finish up per-blkg config update
542 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
543 *
544 * Finish up after per-blkg config update. This function must be paired
545 * with blkg_conf_prep().
546 */
829fdb50 547void blkg_conf_finish(struct blkg_conf_ctx *ctx)
da8b0662 548 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
34d0f179 549{
da8b0662 550 spin_unlock_irq(ctx->disk->queue->queue_lock);
3a8b31d3
TH
551 rcu_read_unlock();
552 put_disk(ctx->disk);
34d0f179 553}
829fdb50 554EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 555
31e4c28d 556struct cftype blkio_files[] = {
84c124da
DS
557 {
558 .name = "reset_stats",
559 .write_u64 = blkiocg_reset_stats,
22084190 560 },
4baf6e33 561 { } /* terminate */
31e4c28d
VG
562};
563
9f13ef67
TH
564/**
565 * blkiocg_pre_destroy - cgroup pre_destroy callback
9f13ef67
TH
566 * @cgroup: cgroup of interest
567 *
568 * This function is called when @cgroup is about to go away and responsible
569 * for shooting down all blkgs associated with @cgroup. blkgs should be
570 * removed while holding both q and blkcg locks. As blkcg lock is nested
571 * inside q lock, this function performs reverse double lock dancing.
572 *
573 * This is the blkcg counterpart of ioc_release_fn().
574 */
959d851c 575static int blkiocg_pre_destroy(struct cgroup *cgroup)
31e4c28d
VG
576{
577 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769 578
9f13ef67 579 spin_lock_irq(&blkcg->lock);
7ee9c562 580
9f13ef67
TH
581 while (!hlist_empty(&blkcg->blkg_list)) {
582 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
583 struct blkio_group, blkcg_node);
c875f4d0 584 struct request_queue *q = blkg->q;
b1c35769 585
9f13ef67
TH
586 if (spin_trylock(q->queue_lock)) {
587 blkg_destroy(blkg);
588 spin_unlock(q->queue_lock);
589 } else {
590 spin_unlock_irq(&blkcg->lock);
9f13ef67 591 cpu_relax();
a5567932 592 spin_lock_irq(&blkcg->lock);
0f3942a3 593 }
9f13ef67 594 }
b1c35769 595
9f13ef67 596 spin_unlock_irq(&blkcg->lock);
7ee9c562
TH
597 return 0;
598}
599
959d851c 600static void blkiocg_destroy(struct cgroup *cgroup)
7ee9c562
TH
601{
602 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
603
67523c48
BB
604 if (blkcg != &blkio_root_cgroup)
605 kfree(blkcg);
31e4c28d
VG
606}
607
761b3ef5 608static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
31e4c28d 609{
9a9e8a26 610 static atomic64_t id_seq = ATOMIC64_INIT(0);
0341509f
LZ
611 struct blkio_cgroup *blkcg;
612 struct cgroup *parent = cgroup->parent;
31e4c28d 613
0341509f 614 if (!parent) {
31e4c28d
VG
615 blkcg = &blkio_root_cgroup;
616 goto done;
617 }
618
31e4c28d
VG
619 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
620 if (!blkcg)
621 return ERR_PTR(-ENOMEM);
622
3381cb8d 623 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
9a9e8a26 624 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
31e4c28d
VG
625done:
626 spin_lock_init(&blkcg->lock);
627 INIT_HLIST_HEAD(&blkcg->blkg_list);
628
629 return &blkcg->css;
630}
631
5efd6113
TH
632/**
633 * blkcg_init_queue - initialize blkcg part of request queue
634 * @q: request_queue to initialize
635 *
636 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
637 * part of new request_queue @q.
638 *
639 * RETURNS:
640 * 0 on success, -errno on failure.
641 */
642int blkcg_init_queue(struct request_queue *q)
643{
923adde1
TH
644 int ret;
645
5efd6113
TH
646 might_sleep();
647
923adde1
TH
648 ret = blk_throtl_init(q);
649 if (ret)
650 return ret;
651
652 mutex_lock(&all_q_mutex);
653 INIT_LIST_HEAD(&q->all_q_node);
654 list_add_tail(&q->all_q_node, &all_q_list);
655 mutex_unlock(&all_q_mutex);
656
657 return 0;
5efd6113
TH
658}
659
660/**
661 * blkcg_drain_queue - drain blkcg part of request_queue
662 * @q: request_queue to drain
663 *
664 * Called from blk_drain_queue(). Responsible for draining blkcg part.
665 */
666void blkcg_drain_queue(struct request_queue *q)
667{
668 lockdep_assert_held(q->queue_lock);
669
670 blk_throtl_drain(q);
671}
672
673/**
674 * blkcg_exit_queue - exit and release blkcg part of request_queue
675 * @q: request_queue being released
676 *
677 * Called from blk_release_queue(). Responsible for exiting blkcg part.
678 */
679void blkcg_exit_queue(struct request_queue *q)
680{
923adde1
TH
681 mutex_lock(&all_q_mutex);
682 list_del_init(&q->all_q_node);
683 mutex_unlock(&all_q_mutex);
684
e8989fae
TH
685 blkg_destroy_all(q, true);
686
5efd6113
TH
687 blk_throtl_exit(q);
688}
689
31e4c28d
VG
690/*
691 * We cannot support shared io contexts, as we have no mean to support
692 * two tasks with the same ioc in two different groups without major rework
693 * of the main cic data structures. For now we allow a task to change
694 * its cgroup only if it's the only owner of its ioc.
695 */
761b3ef5 696static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
31e4c28d 697{
bb9d97b6 698 struct task_struct *task;
31e4c28d
VG
699 struct io_context *ioc;
700 int ret = 0;
701
702 /* task_lock() is needed to avoid races with exit_io_context() */
bb9d97b6
TH
703 cgroup_taskset_for_each(task, cgrp, tset) {
704 task_lock(task);
705 ioc = task->io_context;
706 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
707 ret = -EINVAL;
708 task_unlock(task);
709 if (ret)
710 break;
711 }
31e4c28d
VG
712 return ret;
713}
714
923adde1
TH
715static void blkcg_bypass_start(void)
716 __acquires(&all_q_mutex)
717{
718 struct request_queue *q;
719
720 mutex_lock(&all_q_mutex);
721
722 list_for_each_entry(q, &all_q_list, all_q_node) {
723 blk_queue_bypass_start(q);
e8989fae 724 blkg_destroy_all(q, false);
923adde1
TH
725 }
726}
727
728static void blkcg_bypass_end(void)
729 __releases(&all_q_mutex)
730{
731 struct request_queue *q;
732
733 list_for_each_entry(q, &all_q_list, all_q_node)
734 blk_queue_bypass_end(q);
735
736 mutex_unlock(&all_q_mutex);
737}
738
676f7c8f
TH
739struct cgroup_subsys blkio_subsys = {
740 .name = "blkio",
741 .create = blkiocg_create,
742 .can_attach = blkiocg_can_attach,
959d851c 743 .pre_destroy = blkiocg_pre_destroy,
676f7c8f 744 .destroy = blkiocg_destroy,
676f7c8f 745 .subsys_id = blkio_subsys_id,
4baf6e33 746 .base_cftypes = blkio_files,
676f7c8f
TH
747 .module = THIS_MODULE,
748};
749EXPORT_SYMBOL_GPL(blkio_subsys);
750
8bd435b3
TH
751/**
752 * blkio_policy_register - register a blkcg policy
753 * @blkiop: blkcg policy to register
754 *
755 * Register @blkiop with blkcg core. Might sleep and @blkiop may be
756 * modified on successful registration. Returns 0 on success and -errno on
757 * failure.
758 */
759int blkio_policy_register(struct blkio_policy_type *blkiop)
3e252066 760{
e8989fae 761 struct request_queue *q;
8bd435b3 762 int i, ret;
e8989fae 763
bc0d6501
TH
764 mutex_lock(&blkcg_pol_mutex);
765
8bd435b3
TH
766 /* find an empty slot */
767 ret = -ENOSPC;
768 for (i = 0; i < BLKCG_MAX_POLS; i++)
769 if (!blkio_policy[i])
770 break;
771 if (i >= BLKCG_MAX_POLS)
772 goto out_unlock;
035d10b2 773
8bd435b3
TH
774 /* register and update blkgs */
775 blkiop->plid = i;
776 blkio_policy[i] = blkiop;
777
778 blkcg_bypass_start();
e8989fae 779 list_for_each_entry(q, &all_q_list, all_q_node)
ec399347 780 update_root_blkg_pd(q, blkiop);
923adde1 781 blkcg_bypass_end();
44ea53de 782
8bd435b3 783 /* everything is in place, add intf files for the new policy */
44ea53de
TH
784 if (blkiop->cftypes)
785 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
8bd435b3
TH
786 ret = 0;
787out_unlock:
bc0d6501 788 mutex_unlock(&blkcg_pol_mutex);
8bd435b3 789 return ret;
3e252066
VG
790}
791EXPORT_SYMBOL_GPL(blkio_policy_register);
792
8bd435b3
TH
793/**
794 * blkiop_policy_unregister - unregister a blkcg policy
795 * @blkiop: blkcg policy to unregister
796 *
797 * Undo blkio_policy_register(@blkiop). Might sleep.
798 */
3e252066
VG
799void blkio_policy_unregister(struct blkio_policy_type *blkiop)
800{
e8989fae
TH
801 struct request_queue *q;
802
bc0d6501
TH
803 mutex_lock(&blkcg_pol_mutex);
804
8bd435b3
TH
805 if (WARN_ON(blkio_policy[blkiop->plid] != blkiop))
806 goto out_unlock;
807
808 /* kill the intf files first */
44ea53de
TH
809 if (blkiop->cftypes)
810 cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes);
811
8bd435b3 812 /* unregister and update blkgs */
035d10b2 813 blkio_policy[blkiop->plid] = NULL;
035d10b2 814
8bd435b3 815 blkcg_bypass_start();
e8989fae 816 list_for_each_entry(q, &all_q_list, all_q_node)
ec399347 817 update_root_blkg_pd(q, blkiop);
923adde1 818 blkcg_bypass_end();
8bd435b3 819out_unlock:
bc0d6501 820 mutex_unlock(&blkcg_pol_mutex);
3e252066
VG
821}
822EXPORT_SYMBOL_GPL(blkio_policy_unregister);