]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - block/blk-cgroup.c
block: RCU free request_queue
[mirror_ubuntu-hirsute-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190 14#include <linux/kdev_t.h>
9d6a986c 15#include <linux/module.h>
accee785 16#include <linux/err.h>
9195291e 17#include <linux/blkdev.h>
5a0e3ad6 18#include <linux/slab.h>
34d0f179 19#include <linux/genhd.h>
72e06c25 20#include <linux/delay.h>
9a9e8a26 21#include <linux/atomic.h>
72e06c25 22#include "blk-cgroup.h"
5efd6113 23#include "blk.h"
3e252066 24
84c124da
DS
25#define MAX_KEY_LEN 100
26
bc0d6501 27static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1 28
e71357e1
TH
29struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
30 .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
3c798398 31EXPORT_SYMBOL_GPL(blkcg_root);
9d6a986c 32
3c798398 33static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
035d10b2 34
16b3de66
TH
35static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
36 struct request_queue *q, bool update_hint);
37
38/**
39 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
40 * @d_blkg: loop cursor pointing to the current descendant
41 * @pos_cgrp: used for iteration
42 * @p_blkg: target blkg to walk descendants of
43 *
44 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
45 * read locked. If called under either blkcg or queue lock, the iteration
46 * is guaranteed to include all and only online blkgs. The caller may
47 * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
48 * subtree.
49 */
50#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \
51 cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
52 if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
53 (p_blkg)->q, false)))
54
a2b1693b 55static bool blkcg_policy_enabled(struct request_queue *q,
3c798398 56 const struct blkcg_policy *pol)
a2b1693b
TH
57{
58 return pol && test_bit(pol->plid, q->blkcg_pols);
59}
60
0381411e
TH
61/**
62 * blkg_free - free a blkg
63 * @blkg: blkg to free
64 *
65 * Free @blkg which may be partially allocated.
66 */
3c798398 67static void blkg_free(struct blkcg_gq *blkg)
0381411e 68{
e8989fae 69 int i;
549d3aa8
TH
70
71 if (!blkg)
72 return;
73
8bd435b3 74 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 75 struct blkcg_policy *pol = blkcg_policy[i];
e8989fae
TH
76 struct blkg_policy_data *pd = blkg->pd[i];
77
9ade5ea4
TH
78 if (!pd)
79 continue;
80
f9fcc2d3
TH
81 if (pol && pol->pd_exit_fn)
82 pol->pd_exit_fn(blkg);
9ade5ea4 83
9ade5ea4 84 kfree(pd);
0381411e 85 }
e8989fae 86
a051661c 87 blk_exit_rl(&blkg->rl);
549d3aa8 88 kfree(blkg);
0381411e
TH
89}
90
91/**
92 * blkg_alloc - allocate a blkg
93 * @blkcg: block cgroup the new blkg is associated with
94 * @q: request_queue the new blkg is associated with
15974993 95 * @gfp_mask: allocation mask to use
0381411e 96 *
e8989fae 97 * Allocate a new blkg assocating @blkcg and @q.
0381411e 98 */
15974993
TH
99static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
100 gfp_t gfp_mask)
0381411e 101{
3c798398 102 struct blkcg_gq *blkg;
e8989fae 103 int i;
0381411e
TH
104
105 /* alloc and init base part */
15974993 106 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
0381411e
TH
107 if (!blkg)
108 return NULL;
109
c875f4d0 110 blkg->q = q;
e8989fae 111 INIT_LIST_HEAD(&blkg->q_node);
0381411e 112 blkg->blkcg = blkcg;
1adaf3dd 113 blkg->refcnt = 1;
0381411e 114
a051661c
TH
115 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
116 if (blkcg != &blkcg_root) {
117 if (blk_init_rl(&blkg->rl, q, gfp_mask))
118 goto err_free;
119 blkg->rl.blkg = blkg;
120 }
121
8bd435b3 122 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 123 struct blkcg_policy *pol = blkcg_policy[i];
e8989fae 124 struct blkg_policy_data *pd;
0381411e 125
a2b1693b 126 if (!blkcg_policy_enabled(q, pol))
e8989fae
TH
127 continue;
128
129 /* alloc per-policy data and attach it to blkg */
15974993 130 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
a051661c
TH
131 if (!pd)
132 goto err_free;
549d3aa8 133
e8989fae
TH
134 blkg->pd[i] = pd;
135 pd->blkg = blkg;
b276a876 136 pd->plid = i;
e8989fae 137
9b2ea86b 138 /* invoke per-policy init */
356d2e58 139 if (pol->pd_init_fn)
f9fcc2d3 140 pol->pd_init_fn(blkg);
e8989fae
TH
141 }
142
0381411e 143 return blkg;
a051661c
TH
144
145err_free:
146 blkg_free(blkg);
147 return NULL;
0381411e
TH
148}
149
16b3de66
TH
150/**
151 * __blkg_lookup - internal version of blkg_lookup()
152 * @blkcg: blkcg of interest
153 * @q: request_queue of interest
154 * @update_hint: whether to update lookup hint with the result or not
155 *
156 * This is internal version and shouldn't be used by policy
157 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
158 * @q's bypass state. If @update_hint is %true, the caller should be
159 * holding @q->queue_lock and lookup hint is updated on success.
160 */
3c798398 161static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
86cde6b6 162 struct request_queue *q, bool update_hint)
80fd9979 163{
3c798398 164 struct blkcg_gq *blkg;
80fd9979 165
a637120e
TH
166 blkg = rcu_dereference(blkcg->blkg_hint);
167 if (blkg && blkg->q == q)
168 return blkg;
169
170 /*
86cde6b6
TH
171 * Hint didn't match. Look up from the radix tree. Note that the
172 * hint can only be updated under queue_lock as otherwise @blkg
173 * could have already been removed from blkg_tree. The caller is
174 * responsible for grabbing queue_lock if @update_hint.
a637120e
TH
175 */
176 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
86cde6b6
TH
177 if (blkg && blkg->q == q) {
178 if (update_hint) {
179 lockdep_assert_held(q->queue_lock);
180 rcu_assign_pointer(blkcg->blkg_hint, blkg);
181 }
a637120e 182 return blkg;
86cde6b6 183 }
a637120e 184
80fd9979
TH
185 return NULL;
186}
187
188/**
189 * blkg_lookup - lookup blkg for the specified blkcg - q pair
190 * @blkcg: blkcg of interest
191 * @q: request_queue of interest
192 *
193 * Lookup blkg for the @blkcg - @q pair. This function should be called
194 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
195 * - see blk_queue_bypass_start() for details.
196 */
3c798398 197struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
80fd9979
TH
198{
199 WARN_ON_ONCE(!rcu_read_lock_held());
200
201 if (unlikely(blk_queue_bypass(q)))
202 return NULL;
86cde6b6 203 return __blkg_lookup(blkcg, q, false);
80fd9979
TH
204}
205EXPORT_SYMBOL_GPL(blkg_lookup);
206
15974993
TH
207/*
208 * If @new_blkg is %NULL, this function tries to allocate a new one as
209 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
210 */
86cde6b6
TH
211static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
212 struct request_queue *q,
213 struct blkcg_gq *new_blkg)
5624a4e4 214{
3c798398 215 struct blkcg_gq *blkg;
f427d909 216 int i, ret;
5624a4e4 217
cd1604fa
TH
218 WARN_ON_ONCE(!rcu_read_lock_held());
219 lockdep_assert_held(q->queue_lock);
220
7ee9c562 221 /* blkg holds a reference to blkcg */
15974993 222 if (!css_tryget(&blkcg->css)) {
93e6d5d8
TH
223 ret = -EINVAL;
224 goto err_free_blkg;
15974993 225 }
cd1604fa 226
496fb780 227 /* allocate */
15974993
TH
228 if (!new_blkg) {
229 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
230 if (unlikely(!new_blkg)) {
93e6d5d8
TH
231 ret = -ENOMEM;
232 goto err_put_css;
15974993
TH
233 }
234 }
235 blkg = new_blkg;
cd1604fa 236
3c547865
TH
237 /* link parent and insert */
238 if (blkcg_parent(blkcg)) {
239 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
240 if (WARN_ON_ONCE(!blkg->parent)) {
241 blkg = ERR_PTR(-EINVAL);
242 goto err_put_css;
243 }
244 blkg_get(blkg->parent);
245 }
246
cd1604fa 247 spin_lock(&blkcg->lock);
a637120e
TH
248 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
249 if (likely(!ret)) {
250 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
251 list_add(&blkg->q_node, &q->blkg_list);
f427d909
TH
252
253 for (i = 0; i < BLKCG_MAX_POLS; i++) {
254 struct blkcg_policy *pol = blkcg_policy[i];
255
256 if (blkg->pd[i] && pol->pd_online_fn)
257 pol->pd_online_fn(blkg);
258 }
a637120e 259 }
f427d909 260 blkg->online = true;
cd1604fa 261 spin_unlock(&blkcg->lock);
496fb780 262
a637120e
TH
263 if (!ret)
264 return blkg;
15974993 265
3c547865
TH
266 /* @blkg failed fully initialized, use the usual release path */
267 blkg_put(blkg);
268 return ERR_PTR(ret);
269
93e6d5d8 270err_put_css:
496fb780 271 css_put(&blkcg->css);
93e6d5d8 272err_free_blkg:
15974993 273 blkg_free(new_blkg);
93e6d5d8 274 return ERR_PTR(ret);
31e4c28d 275}
3c96cb32 276
86cde6b6
TH
277/**
278 * blkg_lookup_create - lookup blkg, try to create one if not there
279 * @blkcg: blkcg of interest
280 * @q: request_queue of interest
281 *
282 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
3c547865
TH
283 * create one. blkg creation is performed recursively from blkcg_root such
284 * that all non-root blkg's have access to the parent blkg. This function
285 * should be called under RCU read lock and @q->queue_lock.
86cde6b6
TH
286 *
287 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
288 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
289 * dead and bypassing, returns ERR_PTR(-EBUSY).
290 */
3c798398
TH
291struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
292 struct request_queue *q)
3c96cb32 293{
86cde6b6
TH
294 struct blkcg_gq *blkg;
295
296 WARN_ON_ONCE(!rcu_read_lock_held());
297 lockdep_assert_held(q->queue_lock);
298
3c96cb32
TH
299 /*
300 * This could be the first entry point of blkcg implementation and
301 * we shouldn't allow anything to go through for a bypassing queue.
302 */
303 if (unlikely(blk_queue_bypass(q)))
3f3299d5 304 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
86cde6b6
TH
305
306 blkg = __blkg_lookup(blkcg, q, true);
307 if (blkg)
308 return blkg;
309
3c547865
TH
310 /*
311 * Create blkgs walking down from blkcg_root to @blkcg, so that all
312 * non-root blkgs have access to their parents.
313 */
314 while (true) {
315 struct blkcg *pos = blkcg;
316 struct blkcg *parent = blkcg_parent(blkcg);
317
318 while (parent && !__blkg_lookup(parent, q, false)) {
319 pos = parent;
320 parent = blkcg_parent(parent);
321 }
322
323 blkg = blkg_create(pos, q, NULL);
324 if (pos == blkcg || IS_ERR(blkg))
325 return blkg;
326 }
3c96cb32 327}
cd1604fa 328EXPORT_SYMBOL_GPL(blkg_lookup_create);
31e4c28d 329
3c798398 330static void blkg_destroy(struct blkcg_gq *blkg)
03aa264a 331{
3c798398 332 struct blkcg *blkcg = blkg->blkcg;
f427d909 333 int i;
03aa264a 334
27e1f9d1 335 lockdep_assert_held(blkg->q->queue_lock);
9f13ef67 336 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
337
338 /* Something wrong if we are trying to remove same group twice */
e8989fae 339 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 340 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
a637120e 341
f427d909
TH
342 for (i = 0; i < BLKCG_MAX_POLS; i++) {
343 struct blkcg_policy *pol = blkcg_policy[i];
344
345 if (blkg->pd[i] && pol->pd_offline_fn)
346 pol->pd_offline_fn(blkg);
347 }
348 blkg->online = false;
349
a637120e 350 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
e8989fae 351 list_del_init(&blkg->q_node);
9f13ef67 352 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 353
a637120e
TH
354 /*
355 * Both setting lookup hint to and clearing it from @blkg are done
356 * under queue_lock. If it's not pointing to @blkg now, it never
357 * will. Hint assignment itself can race safely.
358 */
359 if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
360 rcu_assign_pointer(blkcg->blkg_hint, NULL);
361
03aa264a
TH
362 /*
363 * Put the reference taken at the time of creation so that when all
364 * queues are gone, group can be destroyed.
365 */
366 blkg_put(blkg);
367}
368
9f13ef67
TH
369/**
370 * blkg_destroy_all - destroy all blkgs associated with a request_queue
371 * @q: request_queue of interest
9f13ef67 372 *
3c96cb32 373 * Destroy all blkgs associated with @q.
9f13ef67 374 */
3c96cb32 375static void blkg_destroy_all(struct request_queue *q)
72e06c25 376{
3c798398 377 struct blkcg_gq *blkg, *n;
72e06c25 378
6d18b008 379 lockdep_assert_held(q->queue_lock);
72e06c25 380
9f13ef67 381 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
3c798398 382 struct blkcg *blkcg = blkg->blkcg;
72e06c25 383
9f13ef67
TH
384 spin_lock(&blkcg->lock);
385 blkg_destroy(blkg);
386 spin_unlock(&blkcg->lock);
72e06c25 387 }
65635cbc
JN
388
389 /*
390 * root blkg is destroyed. Just clear the pointer since
391 * root_rl does not take reference on root blkg.
392 */
393 q->root_blkg = NULL;
394 q->root_rl.blkg = NULL;
72e06c25
TH
395}
396
1adaf3dd
TH
397static void blkg_rcu_free(struct rcu_head *rcu_head)
398{
3c798398 399 blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
1adaf3dd
TH
400}
401
3c798398 402void __blkg_release(struct blkcg_gq *blkg)
1adaf3dd 403{
3c547865 404 /* release the blkcg and parent blkg refs this blkg has been holding */
1adaf3dd 405 css_put(&blkg->blkcg->css);
3c547865
TH
406 if (blkg->parent)
407 blkg_put(blkg->parent);
1adaf3dd
TH
408
409 /*
410 * A group is freed in rcu manner. But having an rcu lock does not
411 * mean that one can access all the fields of blkg and assume these
412 * are valid. For example, don't try to follow throtl_data and
413 * request queue links.
414 *
415 * Having a reference to blkg under an rcu allows acess to only
416 * values local to groups like group stats and group rate limits
417 */
418 call_rcu(&blkg->rcu_head, blkg_rcu_free);
419}
420EXPORT_SYMBOL_GPL(__blkg_release);
421
a051661c
TH
422/*
423 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
424 * because the root blkg uses @q->root_rl instead of its own rl.
425 */
426struct request_list *__blk_queue_next_rl(struct request_list *rl,
427 struct request_queue *q)
428{
429 struct list_head *ent;
430 struct blkcg_gq *blkg;
431
432 /*
433 * Determine the current blkg list_head. The first entry is
434 * root_rl which is off @q->blkg_list and mapped to the head.
435 */
436 if (rl == &q->root_rl) {
437 ent = &q->blkg_list;
65c77fd9
JN
438 /* There are no more block groups, hence no request lists */
439 if (list_empty(ent))
440 return NULL;
a051661c
TH
441 } else {
442 blkg = container_of(rl, struct blkcg_gq, rl);
443 ent = &blkg->q_node;
444 }
445
446 /* walk to the next list_head, skip root blkcg */
447 ent = ent->next;
448 if (ent == &q->root_blkg->q_node)
449 ent = ent->next;
450 if (ent == &q->blkg_list)
451 return NULL;
452
453 blkg = container_of(ent, struct blkcg_gq, q_node);
454 return &blkg->rl;
455}
456
3c798398
TH
457static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
458 u64 val)
303a3acb 459{
3c798398
TH
460 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
461 struct blkcg_gq *blkg;
303a3acb 462 struct hlist_node *n;
bc0d6501 463 int i;
303a3acb 464
bc0d6501 465 mutex_lock(&blkcg_pol_mutex);
303a3acb 466 spin_lock_irq(&blkcg->lock);
997a026c
TH
467
468 /*
469 * Note that stat reset is racy - it doesn't synchronize against
470 * stat updates. This is a debug feature which shouldn't exist
471 * anyway. If you get hit by a race, retry.
472 */
303a3acb 473 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
8bd435b3 474 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 475 struct blkcg_policy *pol = blkcg_policy[i];
549d3aa8 476
a2b1693b 477 if (blkcg_policy_enabled(blkg->q, pol) &&
f9fcc2d3
TH
478 pol->pd_reset_stats_fn)
479 pol->pd_reset_stats_fn(blkg);
bc0d6501 480 }
303a3acb 481 }
f0bdc8cd 482
303a3acb 483 spin_unlock_irq(&blkcg->lock);
bc0d6501 484 mutex_unlock(&blkcg_pol_mutex);
303a3acb
DS
485 return 0;
486}
487
3c798398 488static const char *blkg_dev_name(struct blkcg_gq *blkg)
303a3acb 489{
d3d32e69
TH
490 /* some drivers (floppy) instantiate a queue w/o disk registered */
491 if (blkg->q->backing_dev_info.dev)
492 return dev_name(blkg->q->backing_dev_info.dev);
493 return NULL;
303a3acb
DS
494}
495
d3d32e69
TH
496/**
497 * blkcg_print_blkgs - helper for printing per-blkg data
498 * @sf: seq_file to print to
499 * @blkcg: blkcg of interest
500 * @prfill: fill function to print out a blkg
501 * @pol: policy in question
502 * @data: data to be passed to @prfill
503 * @show_total: to print out sum of prfill return values or not
504 *
505 * This function invokes @prfill on each blkg of @blkcg if pd for the
506 * policy specified by @pol exists. @prfill is invoked with @sf, the
507 * policy data and @data. If @show_total is %true, the sum of the return
508 * values from @prfill is printed with "Total" label at the end.
509 *
510 * This is to be used to construct print functions for
511 * cftype->read_seq_string method.
512 */
3c798398 513void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
514 u64 (*prfill)(struct seq_file *,
515 struct blkg_policy_data *, int),
3c798398 516 const struct blkcg_policy *pol, int data,
ec399347 517 bool show_total)
5624a4e4 518{
3c798398 519 struct blkcg_gq *blkg;
d3d32e69
TH
520 struct hlist_node *n;
521 u64 total = 0;
5624a4e4 522
d3d32e69
TH
523 spin_lock_irq(&blkcg->lock);
524 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
a2b1693b 525 if (blkcg_policy_enabled(blkg->q, pol))
f95a04af 526 total += prfill(sf, blkg->pd[pol->plid], data);
d3d32e69
TH
527 spin_unlock_irq(&blkcg->lock);
528
529 if (show_total)
530 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
531}
829fdb50 532EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
533
534/**
535 * __blkg_prfill_u64 - prfill helper for a single u64 value
536 * @sf: seq_file to print to
f95a04af 537 * @pd: policy private data of interest
d3d32e69
TH
538 * @v: value to print
539 *
f95a04af 540 * Print @v to @sf for the device assocaited with @pd.
d3d32e69 541 */
f95a04af 542u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69 543{
f95a04af 544 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
545
546 if (!dname)
547 return 0;
548
549 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
550 return v;
551}
829fdb50 552EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69
TH
553
554/**
555 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
556 * @sf: seq_file to print to
f95a04af 557 * @pd: policy private data of interest
d3d32e69
TH
558 * @rwstat: rwstat to print
559 *
f95a04af 560 * Print @rwstat to @sf for the device assocaited with @pd.
d3d32e69 561 */
f95a04af 562u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb50 563 const struct blkg_rwstat *rwstat)
d3d32e69
TH
564{
565 static const char *rwstr[] = {
566 [BLKG_RWSTAT_READ] = "Read",
567 [BLKG_RWSTAT_WRITE] = "Write",
568 [BLKG_RWSTAT_SYNC] = "Sync",
569 [BLKG_RWSTAT_ASYNC] = "Async",
570 };
f95a04af 571 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
572 u64 v;
573 int i;
574
575 if (!dname)
576 return 0;
577
578 for (i = 0; i < BLKG_RWSTAT_NR; i++)
579 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
580 (unsigned long long)rwstat->cnt[i]);
581
582 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
583 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
584 return v;
585}
b50da39f 586EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
d3d32e69 587
5bc4afb1
TH
588/**
589 * blkg_prfill_stat - prfill callback for blkg_stat
590 * @sf: seq_file to print to
f95a04af
TH
591 * @pd: policy private data of interest
592 * @off: offset to the blkg_stat in @pd
5bc4afb1
TH
593 *
594 * prfill callback for printing a blkg_stat.
595 */
f95a04af 596u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
d3d32e69 597{
f95a04af 598 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
d3d32e69 599}
5bc4afb1 600EXPORT_SYMBOL_GPL(blkg_prfill_stat);
d3d32e69 601
5bc4afb1
TH
602/**
603 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
604 * @sf: seq_file to print to
f95a04af
TH
605 * @pd: policy private data of interest
606 * @off: offset to the blkg_rwstat in @pd
5bc4afb1
TH
607 *
608 * prfill callback for printing a blkg_rwstat.
609 */
f95a04af
TH
610u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
611 int off)
d3d32e69 612{
f95a04af 613 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
d3d32e69 614
f95a04af 615 return __blkg_prfill_rwstat(sf, pd, &rwstat);
d3d32e69 616}
5bc4afb1 617EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
d3d32e69 618
16b3de66
TH
619/**
620 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
621 * @pd: policy private data of interest
622 * @off: offset to the blkg_stat in @pd
623 *
624 * Collect the blkg_stat specified by @off from @pd and all its online
625 * descendants and return the sum. The caller must be holding the queue
626 * lock for online tests.
627 */
628u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
629{
630 struct blkcg_policy *pol = blkcg_policy[pd->plid];
631 struct blkcg_gq *pos_blkg;
632 struct cgroup *pos_cgrp;
633 u64 sum;
634
635 lockdep_assert_held(pd->blkg->q->queue_lock);
636
637 sum = blkg_stat_read((void *)pd + off);
638
639 rcu_read_lock();
640 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
641 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
642 struct blkg_stat *stat = (void *)pos_pd + off;
643
644 if (pos_blkg->online)
645 sum += blkg_stat_read(stat);
646 }
647 rcu_read_unlock();
648
649 return sum;
650}
651EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
652
653/**
654 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
655 * @pd: policy private data of interest
656 * @off: offset to the blkg_stat in @pd
657 *
658 * Collect the blkg_rwstat specified by @off from @pd and all its online
659 * descendants and return the sum. The caller must be holding the queue
660 * lock for online tests.
661 */
662struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
663 int off)
664{
665 struct blkcg_policy *pol = blkcg_policy[pd->plid];
666 struct blkcg_gq *pos_blkg;
667 struct cgroup *pos_cgrp;
668 struct blkg_rwstat sum;
669 int i;
670
671 lockdep_assert_held(pd->blkg->q->queue_lock);
672
673 sum = blkg_rwstat_read((void *)pd + off);
674
675 rcu_read_lock();
676 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
677 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
678 struct blkg_rwstat *rwstat = (void *)pos_pd + off;
679 struct blkg_rwstat tmp;
680
681 if (!pos_blkg->online)
682 continue;
683
684 tmp = blkg_rwstat_read(rwstat);
685
686 for (i = 0; i < BLKG_RWSTAT_NR; i++)
687 sum.cnt[i] += tmp.cnt[i];
688 }
689 rcu_read_unlock();
690
691 return sum;
692}
693EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
694
3a8b31d3
TH
695/**
696 * blkg_conf_prep - parse and prepare for per-blkg config update
697 * @blkcg: target block cgroup
da8b0662 698 * @pol: target policy
3a8b31d3
TH
699 * @input: input string
700 * @ctx: blkg_conf_ctx to be filled
701 *
702 * Parse per-blkg config update from @input and initialize @ctx with the
703 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
da8b0662
TH
704 * value. This function returns with RCU read lock and queue lock held and
705 * must be paired with blkg_conf_finish().
3a8b31d3 706 */
3c798398
TH
707int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
708 const char *input, struct blkg_conf_ctx *ctx)
da8b0662 709 __acquires(rcu) __acquires(disk->queue->queue_lock)
34d0f179 710{
3a8b31d3 711 struct gendisk *disk;
3c798398 712 struct blkcg_gq *blkg;
726fa694
TH
713 unsigned int major, minor;
714 unsigned long long v;
715 int part, ret;
34d0f179 716
726fa694
TH
717 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
718 return -EINVAL;
3a8b31d3 719
726fa694 720 disk = get_gendisk(MKDEV(major, minor), &part);
4bfd482e 721 if (!disk || part)
726fa694 722 return -EINVAL;
e56da7e2
TH
723
724 rcu_read_lock();
4bfd482e 725 spin_lock_irq(disk->queue->queue_lock);
da8b0662 726
a2b1693b 727 if (blkcg_policy_enabled(disk->queue, pol))
3c96cb32 728 blkg = blkg_lookup_create(blkcg, disk->queue);
a2b1693b
TH
729 else
730 blkg = ERR_PTR(-EINVAL);
e56da7e2 731
4bfd482e
TH
732 if (IS_ERR(blkg)) {
733 ret = PTR_ERR(blkg);
3a8b31d3 734 rcu_read_unlock();
da8b0662 735 spin_unlock_irq(disk->queue->queue_lock);
3a8b31d3
TH
736 put_disk(disk);
737 /*
738 * If queue was bypassing, we should retry. Do so after a
739 * short msleep(). It isn't strictly necessary but queue
740 * can be bypassing for some time and it's always nice to
741 * avoid busy looping.
742 */
743 if (ret == -EBUSY) {
744 msleep(10);
745 ret = restart_syscall();
7702e8f4 746 }
726fa694 747 return ret;
062a644d 748 }
3a8b31d3
TH
749
750 ctx->disk = disk;
751 ctx->blkg = blkg;
726fa694
TH
752 ctx->v = v;
753 return 0;
34d0f179 754}
829fdb50 755EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 756
3a8b31d3
TH
757/**
758 * blkg_conf_finish - finish up per-blkg config update
759 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
760 *
761 * Finish up after per-blkg config update. This function must be paired
762 * with blkg_conf_prep().
763 */
829fdb50 764void blkg_conf_finish(struct blkg_conf_ctx *ctx)
da8b0662 765 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
34d0f179 766{
da8b0662 767 spin_unlock_irq(ctx->disk->queue->queue_lock);
3a8b31d3
TH
768 rcu_read_unlock();
769 put_disk(ctx->disk);
34d0f179 770}
829fdb50 771EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 772
3c798398 773struct cftype blkcg_files[] = {
84c124da
DS
774 {
775 .name = "reset_stats",
3c798398 776 .write_u64 = blkcg_reset_stats,
22084190 777 },
4baf6e33 778 { } /* terminate */
31e4c28d
VG
779};
780
9f13ef67 781/**
92fb9748 782 * blkcg_css_offline - cgroup css_offline callback
9f13ef67
TH
783 * @cgroup: cgroup of interest
784 *
785 * This function is called when @cgroup is about to go away and responsible
786 * for shooting down all blkgs associated with @cgroup. blkgs should be
787 * removed while holding both q and blkcg locks. As blkcg lock is nested
788 * inside q lock, this function performs reverse double lock dancing.
789 *
790 * This is the blkcg counterpart of ioc_release_fn().
791 */
92fb9748 792static void blkcg_css_offline(struct cgroup *cgroup)
31e4c28d 793{
3c798398 794 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
b1c35769 795
9f13ef67 796 spin_lock_irq(&blkcg->lock);
7ee9c562 797
9f13ef67 798 while (!hlist_empty(&blkcg->blkg_list)) {
3c798398
TH
799 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
800 struct blkcg_gq, blkcg_node);
c875f4d0 801 struct request_queue *q = blkg->q;
b1c35769 802
9f13ef67
TH
803 if (spin_trylock(q->queue_lock)) {
804 blkg_destroy(blkg);
805 spin_unlock(q->queue_lock);
806 } else {
807 spin_unlock_irq(&blkcg->lock);
9f13ef67 808 cpu_relax();
a5567932 809 spin_lock_irq(&blkcg->lock);
0f3942a3 810 }
9f13ef67 811 }
b1c35769 812
9f13ef67 813 spin_unlock_irq(&blkcg->lock);
7ee9c562
TH
814}
815
92fb9748 816static void blkcg_css_free(struct cgroup *cgroup)
7ee9c562 817{
3c798398 818 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
7ee9c562 819
3c798398 820 if (blkcg != &blkcg_root)
67523c48 821 kfree(blkcg);
31e4c28d
VG
822}
823
92fb9748 824static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
31e4c28d 825{
9a9e8a26 826 static atomic64_t id_seq = ATOMIC64_INIT(0);
3c798398 827 struct blkcg *blkcg;
0341509f 828 struct cgroup *parent = cgroup->parent;
31e4c28d 829
0341509f 830 if (!parent) {
3c798398 831 blkcg = &blkcg_root;
31e4c28d
VG
832 goto done;
833 }
834
31e4c28d
VG
835 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
836 if (!blkcg)
837 return ERR_PTR(-ENOMEM);
838
3381cb8d 839 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
e71357e1 840 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
9a9e8a26 841 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
31e4c28d
VG
842done:
843 spin_lock_init(&blkcg->lock);
a637120e 844 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
31e4c28d
VG
845 INIT_HLIST_HEAD(&blkcg->blkg_list);
846
847 return &blkcg->css;
848}
849
5efd6113
TH
850/**
851 * blkcg_init_queue - initialize blkcg part of request queue
852 * @q: request_queue to initialize
853 *
854 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
855 * part of new request_queue @q.
856 *
857 * RETURNS:
858 * 0 on success, -errno on failure.
859 */
860int blkcg_init_queue(struct request_queue *q)
861{
862 might_sleep();
863
3c96cb32 864 return blk_throtl_init(q);
5efd6113
TH
865}
866
867/**
868 * blkcg_drain_queue - drain blkcg part of request_queue
869 * @q: request_queue to drain
870 *
871 * Called from blk_drain_queue(). Responsible for draining blkcg part.
872 */
873void blkcg_drain_queue(struct request_queue *q)
874{
875 lockdep_assert_held(q->queue_lock);
876
877 blk_throtl_drain(q);
878}
879
880/**
881 * blkcg_exit_queue - exit and release blkcg part of request_queue
882 * @q: request_queue being released
883 *
884 * Called from blk_release_queue(). Responsible for exiting blkcg part.
885 */
886void blkcg_exit_queue(struct request_queue *q)
887{
6d18b008 888 spin_lock_irq(q->queue_lock);
3c96cb32 889 blkg_destroy_all(q);
6d18b008
TH
890 spin_unlock_irq(q->queue_lock);
891
5efd6113
TH
892 blk_throtl_exit(q);
893}
894
31e4c28d
VG
895/*
896 * We cannot support shared io contexts, as we have no mean to support
897 * two tasks with the same ioc in two different groups without major rework
898 * of the main cic data structures. For now we allow a task to change
899 * its cgroup only if it's the only owner of its ioc.
900 */
3c798398 901static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
31e4c28d 902{
bb9d97b6 903 struct task_struct *task;
31e4c28d
VG
904 struct io_context *ioc;
905 int ret = 0;
906
907 /* task_lock() is needed to avoid races with exit_io_context() */
bb9d97b6
TH
908 cgroup_taskset_for_each(task, cgrp, tset) {
909 task_lock(task);
910 ioc = task->io_context;
911 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
912 ret = -EINVAL;
913 task_unlock(task);
914 if (ret)
915 break;
916 }
31e4c28d
VG
917 return ret;
918}
919
676f7c8f
TH
920struct cgroup_subsys blkio_subsys = {
921 .name = "blkio",
92fb9748
TH
922 .css_alloc = blkcg_css_alloc,
923 .css_offline = blkcg_css_offline,
924 .css_free = blkcg_css_free,
3c798398 925 .can_attach = blkcg_can_attach,
676f7c8f 926 .subsys_id = blkio_subsys_id,
3c798398 927 .base_cftypes = blkcg_files,
676f7c8f 928 .module = THIS_MODULE,
8c7f6edb
TH
929
930 /*
931 * blkio subsystem is utterly broken in terms of hierarchy support.
932 * It treats all cgroups equally regardless of where they're
933 * located in the hierarchy - all cgroups are treated as if they're
934 * right below the root. Fix it and remove the following.
935 */
936 .broken_hierarchy = true,
676f7c8f
TH
937};
938EXPORT_SYMBOL_GPL(blkio_subsys);
939
a2b1693b
TH
940/**
941 * blkcg_activate_policy - activate a blkcg policy on a request_queue
942 * @q: request_queue of interest
943 * @pol: blkcg policy to activate
944 *
945 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
946 * bypass mode to populate its blkgs with policy_data for @pol.
947 *
948 * Activation happens with @q bypassed, so nobody would be accessing blkgs
949 * from IO path. Update of each blkg is protected by both queue and blkcg
950 * locks so that holding either lock and testing blkcg_policy_enabled() is
951 * always enough for dereferencing policy data.
952 *
953 * The caller is responsible for synchronizing [de]activations and policy
954 * [un]registerations. Returns 0 on success, -errno on failure.
955 */
956int blkcg_activate_policy(struct request_queue *q,
3c798398 957 const struct blkcg_policy *pol)
a2b1693b
TH
958{
959 LIST_HEAD(pds);
86cde6b6 960 struct blkcg_gq *blkg, *new_blkg;
a2b1693b
TH
961 struct blkg_policy_data *pd, *n;
962 int cnt = 0, ret;
15974993 963 bool preloaded;
a2b1693b
TH
964
965 if (blkcg_policy_enabled(q, pol))
966 return 0;
967
15974993 968 /* preallocations for root blkg */
86cde6b6
TH
969 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
970 if (!new_blkg)
15974993
TH
971 return -ENOMEM;
972
973 preloaded = !radix_tree_preload(GFP_KERNEL);
974
a2b1693b
TH
975 blk_queue_bypass_start(q);
976
86cde6b6
TH
977 /*
978 * Make sure the root blkg exists and count the existing blkgs. As
979 * @q is bypassing at this point, blkg_lookup_create() can't be
980 * used. Open code it.
981 */
a2b1693b
TH
982 spin_lock_irq(q->queue_lock);
983
984 rcu_read_lock();
86cde6b6
TH
985 blkg = __blkg_lookup(&blkcg_root, q, false);
986 if (blkg)
987 blkg_free(new_blkg);
988 else
989 blkg = blkg_create(&blkcg_root, q, new_blkg);
a2b1693b
TH
990 rcu_read_unlock();
991
15974993
TH
992 if (preloaded)
993 radix_tree_preload_end();
994
a2b1693b
TH
995 if (IS_ERR(blkg)) {
996 ret = PTR_ERR(blkg);
997 goto out_unlock;
998 }
999 q->root_blkg = blkg;
a051661c 1000 q->root_rl.blkg = blkg;
a2b1693b
TH
1001
1002 list_for_each_entry(blkg, &q->blkg_list, q_node)
1003 cnt++;
1004
1005 spin_unlock_irq(q->queue_lock);
1006
1007 /* allocate policy_data for all existing blkgs */
1008 while (cnt--) {
f95a04af 1009 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
a2b1693b
TH
1010 if (!pd) {
1011 ret = -ENOMEM;
1012 goto out_free;
1013 }
1014 list_add_tail(&pd->alloc_node, &pds);
1015 }
1016
1017 /*
1018 * Install the allocated pds. With @q bypassing, no new blkg
1019 * should have been created while the queue lock was dropped.
1020 */
1021 spin_lock_irq(q->queue_lock);
1022
1023 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1024 if (WARN_ON(list_empty(&pds))) {
1025 /* umm... this shouldn't happen, just abort */
1026 ret = -ENOMEM;
1027 goto out_unlock;
1028 }
1029 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
1030 list_del_init(&pd->alloc_node);
1031
1032 /* grab blkcg lock too while installing @pd on @blkg */
1033 spin_lock(&blkg->blkcg->lock);
1034
1035 blkg->pd[pol->plid] = pd;
1036 pd->blkg = blkg;
b276a876 1037 pd->plid = pol->plid;
f9fcc2d3 1038 pol->pd_init_fn(blkg);
a2b1693b
TH
1039
1040 spin_unlock(&blkg->blkcg->lock);
1041 }
1042
1043 __set_bit(pol->plid, q->blkcg_pols);
1044 ret = 0;
1045out_unlock:
1046 spin_unlock_irq(q->queue_lock);
1047out_free:
1048 blk_queue_bypass_end(q);
1049 list_for_each_entry_safe(pd, n, &pds, alloc_node)
1050 kfree(pd);
1051 return ret;
1052}
1053EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1054
1055/**
1056 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1057 * @q: request_queue of interest
1058 * @pol: blkcg policy to deactivate
1059 *
1060 * Deactivate @pol on @q. Follows the same synchronization rules as
1061 * blkcg_activate_policy().
1062 */
1063void blkcg_deactivate_policy(struct request_queue *q,
3c798398 1064 const struct blkcg_policy *pol)
a2b1693b 1065{
3c798398 1066 struct blkcg_gq *blkg;
a2b1693b
TH
1067
1068 if (!blkcg_policy_enabled(q, pol))
1069 return;
1070
1071 blk_queue_bypass_start(q);
1072 spin_lock_irq(q->queue_lock);
1073
1074 __clear_bit(pol->plid, q->blkcg_pols);
1075
6d18b008
TH
1076 /* if no policy is left, no need for blkgs - shoot them down */
1077 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
1078 blkg_destroy_all(q);
1079
a2b1693b
TH
1080 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1081 /* grab blkcg lock too while removing @pd from @blkg */
1082 spin_lock(&blkg->blkcg->lock);
1083
f427d909
TH
1084 if (pol->pd_offline_fn)
1085 pol->pd_offline_fn(blkg);
f9fcc2d3
TH
1086 if (pol->pd_exit_fn)
1087 pol->pd_exit_fn(blkg);
a2b1693b
TH
1088
1089 kfree(blkg->pd[pol->plid]);
1090 blkg->pd[pol->plid] = NULL;
1091
1092 spin_unlock(&blkg->blkcg->lock);
1093 }
1094
1095 spin_unlock_irq(q->queue_lock);
1096 blk_queue_bypass_end(q);
1097}
1098EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1099
8bd435b3 1100/**
3c798398
TH
1101 * blkcg_policy_register - register a blkcg policy
1102 * @pol: blkcg policy to register
8bd435b3 1103 *
3c798398
TH
1104 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1105 * successful registration. Returns 0 on success and -errno on failure.
8bd435b3 1106 */
3c798398 1107int blkcg_policy_register(struct blkcg_policy *pol)
3e252066 1108{
8bd435b3 1109 int i, ret;
e8989fae 1110
f95a04af
TH
1111 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
1112 return -EINVAL;
1113
bc0d6501
TH
1114 mutex_lock(&blkcg_pol_mutex);
1115
8bd435b3
TH
1116 /* find an empty slot */
1117 ret = -ENOSPC;
1118 for (i = 0; i < BLKCG_MAX_POLS; i++)
3c798398 1119 if (!blkcg_policy[i])
8bd435b3
TH
1120 break;
1121 if (i >= BLKCG_MAX_POLS)
1122 goto out_unlock;
035d10b2 1123
8bd435b3 1124 /* register and update blkgs */
3c798398
TH
1125 pol->plid = i;
1126 blkcg_policy[i] = pol;
8bd435b3 1127
8bd435b3 1128 /* everything is in place, add intf files for the new policy */
3c798398
TH
1129 if (pol->cftypes)
1130 WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
8bd435b3
TH
1131 ret = 0;
1132out_unlock:
bc0d6501 1133 mutex_unlock(&blkcg_pol_mutex);
8bd435b3 1134 return ret;
3e252066 1135}
3c798398 1136EXPORT_SYMBOL_GPL(blkcg_policy_register);
3e252066 1137
8bd435b3 1138/**
3c798398
TH
1139 * blkcg_policy_unregister - unregister a blkcg policy
1140 * @pol: blkcg policy to unregister
8bd435b3 1141 *
3c798398 1142 * Undo blkcg_policy_register(@pol). Might sleep.
8bd435b3 1143 */
3c798398 1144void blkcg_policy_unregister(struct blkcg_policy *pol)
3e252066 1145{
bc0d6501
TH
1146 mutex_lock(&blkcg_pol_mutex);
1147
3c798398 1148 if (WARN_ON(blkcg_policy[pol->plid] != pol))
8bd435b3
TH
1149 goto out_unlock;
1150
1151 /* kill the intf files first */
3c798398
TH
1152 if (pol->cftypes)
1153 cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
44ea53de 1154
8bd435b3 1155 /* unregister and update blkgs */
3c798398 1156 blkcg_policy[pol->plid] = NULL;
8bd435b3 1157out_unlock:
bc0d6501 1158 mutex_unlock(&blkcg_pol_mutex);
3e252066 1159}
3c798398 1160EXPORT_SYMBOL_GPL(blkcg_policy_unregister);