]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 | 14 | #include <linux/kdev_t.h> |
9d6a986c | 15 | #include <linux/module.h> |
accee785 | 16 | #include <linux/err.h> |
9195291e | 17 | #include <linux/blkdev.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
34d0f179 | 19 | #include <linux/genhd.h> |
72e06c25 | 20 | #include <linux/delay.h> |
9a9e8a26 | 21 | #include <linux/atomic.h> |
72e06c25 | 22 | #include "blk-cgroup.h" |
5efd6113 | 23 | #include "blk.h" |
3e252066 | 24 | |
84c124da DS |
25 | #define MAX_KEY_LEN 100 |
26 | ||
bc0d6501 | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
923adde1 | 28 | |
3c798398 TH |
29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
30 | EXPORT_SYMBOL_GPL(blkcg_root); | |
9d6a986c | 31 | |
3c798398 | 32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
035d10b2 | 33 | |
a2b1693b | 34 | static bool blkcg_policy_enabled(struct request_queue *q, |
3c798398 | 35 | const struct blkcg_policy *pol) |
a2b1693b TH |
36 | { |
37 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
38 | } | |
39 | ||
0381411e TH |
40 | /** |
41 | * blkg_free - free a blkg | |
42 | * @blkg: blkg to free | |
43 | * | |
44 | * Free @blkg which may be partially allocated. | |
45 | */ | |
3c798398 | 46 | static void blkg_free(struct blkcg_gq *blkg) |
0381411e | 47 | { |
e8989fae | 48 | int i; |
549d3aa8 TH |
49 | |
50 | if (!blkg) | |
51 | return; | |
52 | ||
8bd435b3 | 53 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 54 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae TH |
55 | struct blkg_policy_data *pd = blkg->pd[i]; |
56 | ||
9ade5ea4 TH |
57 | if (!pd) |
58 | continue; | |
59 | ||
f9fcc2d3 TH |
60 | if (pol && pol->pd_exit_fn) |
61 | pol->pd_exit_fn(blkg); | |
9ade5ea4 | 62 | |
9ade5ea4 | 63 | kfree(pd); |
0381411e | 64 | } |
e8989fae | 65 | |
a051661c | 66 | blk_exit_rl(&blkg->rl); |
549d3aa8 | 67 | kfree(blkg); |
0381411e TH |
68 | } |
69 | ||
70 | /** | |
71 | * blkg_alloc - allocate a blkg | |
72 | * @blkcg: block cgroup the new blkg is associated with | |
73 | * @q: request_queue the new blkg is associated with | |
15974993 | 74 | * @gfp_mask: allocation mask to use |
0381411e | 75 | * |
e8989fae | 76 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e | 77 | */ |
15974993 TH |
78 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
79 | gfp_t gfp_mask) | |
0381411e | 80 | { |
3c798398 | 81 | struct blkcg_gq *blkg; |
e8989fae | 82 | int i; |
0381411e TH |
83 | |
84 | /* alloc and init base part */ | |
15974993 | 85 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
0381411e TH |
86 | if (!blkg) |
87 | return NULL; | |
88 | ||
c875f4d0 | 89 | blkg->q = q; |
e8989fae | 90 | INIT_LIST_HEAD(&blkg->q_node); |
0381411e | 91 | blkg->blkcg = blkcg; |
1adaf3dd | 92 | blkg->refcnt = 1; |
0381411e | 93 | |
a051661c TH |
94 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ |
95 | if (blkcg != &blkcg_root) { | |
96 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) | |
97 | goto err_free; | |
98 | blkg->rl.blkg = blkg; | |
99 | } | |
100 | ||
8bd435b3 | 101 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 102 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae | 103 | struct blkg_policy_data *pd; |
0381411e | 104 | |
a2b1693b | 105 | if (!blkcg_policy_enabled(q, pol)) |
e8989fae TH |
106 | continue; |
107 | ||
108 | /* alloc per-policy data and attach it to blkg */ | |
15974993 | 109 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); |
a051661c TH |
110 | if (!pd) |
111 | goto err_free; | |
549d3aa8 | 112 | |
e8989fae TH |
113 | blkg->pd[i] = pd; |
114 | pd->blkg = blkg; | |
e8989fae | 115 | |
9b2ea86b | 116 | /* invoke per-policy init */ |
356d2e58 | 117 | if (pol->pd_init_fn) |
f9fcc2d3 | 118 | pol->pd_init_fn(blkg); |
e8989fae TH |
119 | } |
120 | ||
0381411e | 121 | return blkg; |
a051661c TH |
122 | |
123 | err_free: | |
124 | blkg_free(blkg); | |
125 | return NULL; | |
0381411e TH |
126 | } |
127 | ||
3c798398 | 128 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
86cde6b6 | 129 | struct request_queue *q, bool update_hint) |
80fd9979 | 130 | { |
3c798398 | 131 | struct blkcg_gq *blkg; |
80fd9979 | 132 | |
a637120e TH |
133 | blkg = rcu_dereference(blkcg->blkg_hint); |
134 | if (blkg && blkg->q == q) | |
135 | return blkg; | |
136 | ||
137 | /* | |
86cde6b6 TH |
138 | * Hint didn't match. Look up from the radix tree. Note that the |
139 | * hint can only be updated under queue_lock as otherwise @blkg | |
140 | * could have already been removed from blkg_tree. The caller is | |
141 | * responsible for grabbing queue_lock if @update_hint. | |
a637120e TH |
142 | */ |
143 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
86cde6b6 TH |
144 | if (blkg && blkg->q == q) { |
145 | if (update_hint) { | |
146 | lockdep_assert_held(q->queue_lock); | |
147 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
148 | } | |
a637120e | 149 | return blkg; |
86cde6b6 | 150 | } |
a637120e | 151 | |
80fd9979 TH |
152 | return NULL; |
153 | } | |
154 | ||
155 | /** | |
156 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
157 | * @blkcg: blkcg of interest | |
158 | * @q: request_queue of interest | |
159 | * | |
160 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
161 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | |
162 | * - see blk_queue_bypass_start() for details. | |
163 | */ | |
3c798398 | 164 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
80fd9979 TH |
165 | { |
166 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
167 | ||
168 | if (unlikely(blk_queue_bypass(q))) | |
169 | return NULL; | |
86cde6b6 | 170 | return __blkg_lookup(blkcg, q, false); |
80fd9979 TH |
171 | } |
172 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
173 | ||
15974993 TH |
174 | /* |
175 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
176 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. | |
177 | */ | |
86cde6b6 TH |
178 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, |
179 | struct request_queue *q, | |
180 | struct blkcg_gq *new_blkg) | |
5624a4e4 | 181 | { |
3c798398 | 182 | struct blkcg_gq *blkg; |
496fb780 | 183 | int ret; |
5624a4e4 | 184 | |
cd1604fa TH |
185 | WARN_ON_ONCE(!rcu_read_lock_held()); |
186 | lockdep_assert_held(q->queue_lock); | |
187 | ||
7ee9c562 | 188 | /* blkg holds a reference to blkcg */ |
15974993 | 189 | if (!css_tryget(&blkcg->css)) { |
93e6d5d8 TH |
190 | ret = -EINVAL; |
191 | goto err_free_blkg; | |
15974993 | 192 | } |
cd1604fa | 193 | |
496fb780 | 194 | /* allocate */ |
15974993 TH |
195 | if (!new_blkg) { |
196 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); | |
197 | if (unlikely(!new_blkg)) { | |
93e6d5d8 TH |
198 | ret = -ENOMEM; |
199 | goto err_put_css; | |
15974993 TH |
200 | } |
201 | } | |
202 | blkg = new_blkg; | |
cd1604fa TH |
203 | |
204 | /* insert */ | |
205 | spin_lock(&blkcg->lock); | |
a637120e TH |
206 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
207 | if (likely(!ret)) { | |
208 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
209 | list_add(&blkg->q_node, &q->blkg_list); | |
210 | } | |
cd1604fa | 211 | spin_unlock(&blkcg->lock); |
496fb780 | 212 | |
a637120e TH |
213 | if (!ret) |
214 | return blkg; | |
15974993 | 215 | |
93e6d5d8 | 216 | err_put_css: |
496fb780 | 217 | css_put(&blkcg->css); |
93e6d5d8 | 218 | err_free_blkg: |
15974993 | 219 | blkg_free(new_blkg); |
93e6d5d8 | 220 | return ERR_PTR(ret); |
31e4c28d | 221 | } |
3c96cb32 | 222 | |
86cde6b6 TH |
223 | /** |
224 | * blkg_lookup_create - lookup blkg, try to create one if not there | |
225 | * @blkcg: blkcg of interest | |
226 | * @q: request_queue of interest | |
227 | * | |
228 | * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to | |
229 | * create one. This function should be called under RCU read lock and | |
230 | * @q->queue_lock. | |
231 | * | |
232 | * Returns pointer to the looked up or created blkg on success, ERR_PTR() | |
233 | * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not | |
234 | * dead and bypassing, returns ERR_PTR(-EBUSY). | |
235 | */ | |
3c798398 TH |
236 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
237 | struct request_queue *q) | |
3c96cb32 | 238 | { |
86cde6b6 TH |
239 | struct blkcg_gq *blkg; |
240 | ||
241 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
242 | lockdep_assert_held(q->queue_lock); | |
243 | ||
3c96cb32 TH |
244 | /* |
245 | * This could be the first entry point of blkcg implementation and | |
246 | * we shouldn't allow anything to go through for a bypassing queue. | |
247 | */ | |
248 | if (unlikely(blk_queue_bypass(q))) | |
3f3299d5 | 249 | return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); |
86cde6b6 TH |
250 | |
251 | blkg = __blkg_lookup(blkcg, q, true); | |
252 | if (blkg) | |
253 | return blkg; | |
254 | ||
255 | return blkg_create(blkcg, q, NULL); | |
3c96cb32 | 256 | } |
cd1604fa | 257 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 258 | |
3c798398 | 259 | static void blkg_destroy(struct blkcg_gq *blkg) |
03aa264a | 260 | { |
3c798398 | 261 | struct blkcg *blkcg = blkg->blkcg; |
03aa264a | 262 | |
27e1f9d1 | 263 | lockdep_assert_held(blkg->q->queue_lock); |
9f13ef67 | 264 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
265 | |
266 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 267 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 268 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
a637120e TH |
269 | |
270 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); | |
e8989fae | 271 | list_del_init(&blkg->q_node); |
9f13ef67 | 272 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 273 | |
a637120e TH |
274 | /* |
275 | * Both setting lookup hint to and clearing it from @blkg are done | |
276 | * under queue_lock. If it's not pointing to @blkg now, it never | |
277 | * will. Hint assignment itself can race safely. | |
278 | */ | |
279 | if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) | |
280 | rcu_assign_pointer(blkcg->blkg_hint, NULL); | |
281 | ||
03aa264a TH |
282 | /* |
283 | * Put the reference taken at the time of creation so that when all | |
284 | * queues are gone, group can be destroyed. | |
285 | */ | |
286 | blkg_put(blkg); | |
287 | } | |
288 | ||
9f13ef67 TH |
289 | /** |
290 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
291 | * @q: request_queue of interest | |
9f13ef67 | 292 | * |
3c96cb32 | 293 | * Destroy all blkgs associated with @q. |
9f13ef67 | 294 | */ |
3c96cb32 | 295 | static void blkg_destroy_all(struct request_queue *q) |
72e06c25 | 296 | { |
3c798398 | 297 | struct blkcg_gq *blkg, *n; |
72e06c25 | 298 | |
6d18b008 | 299 | lockdep_assert_held(q->queue_lock); |
72e06c25 | 300 | |
9f13ef67 | 301 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
3c798398 | 302 | struct blkcg *blkcg = blkg->blkcg; |
72e06c25 | 303 | |
9f13ef67 TH |
304 | spin_lock(&blkcg->lock); |
305 | blkg_destroy(blkg); | |
306 | spin_unlock(&blkcg->lock); | |
72e06c25 | 307 | } |
65635cbc JN |
308 | |
309 | /* | |
310 | * root blkg is destroyed. Just clear the pointer since | |
311 | * root_rl does not take reference on root blkg. | |
312 | */ | |
313 | q->root_blkg = NULL; | |
314 | q->root_rl.blkg = NULL; | |
72e06c25 TH |
315 | } |
316 | ||
1adaf3dd TH |
317 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
318 | { | |
3c798398 | 319 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); |
1adaf3dd TH |
320 | } |
321 | ||
3c798398 | 322 | void __blkg_release(struct blkcg_gq *blkg) |
1adaf3dd TH |
323 | { |
324 | /* release the extra blkcg reference this blkg has been holding */ | |
325 | css_put(&blkg->blkcg->css); | |
326 | ||
327 | /* | |
328 | * A group is freed in rcu manner. But having an rcu lock does not | |
329 | * mean that one can access all the fields of blkg and assume these | |
330 | * are valid. For example, don't try to follow throtl_data and | |
331 | * request queue links. | |
332 | * | |
333 | * Having a reference to blkg under an rcu allows acess to only | |
334 | * values local to groups like group stats and group rate limits | |
335 | */ | |
336 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
337 | } | |
338 | EXPORT_SYMBOL_GPL(__blkg_release); | |
339 | ||
a051661c TH |
340 | /* |
341 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky | |
342 | * because the root blkg uses @q->root_rl instead of its own rl. | |
343 | */ | |
344 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
345 | struct request_queue *q) | |
346 | { | |
347 | struct list_head *ent; | |
348 | struct blkcg_gq *blkg; | |
349 | ||
350 | /* | |
351 | * Determine the current blkg list_head. The first entry is | |
352 | * root_rl which is off @q->blkg_list and mapped to the head. | |
353 | */ | |
354 | if (rl == &q->root_rl) { | |
355 | ent = &q->blkg_list; | |
65c77fd9 JN |
356 | /* There are no more block groups, hence no request lists */ |
357 | if (list_empty(ent)) | |
358 | return NULL; | |
a051661c TH |
359 | } else { |
360 | blkg = container_of(rl, struct blkcg_gq, rl); | |
361 | ent = &blkg->q_node; | |
362 | } | |
363 | ||
364 | /* walk to the next list_head, skip root blkcg */ | |
365 | ent = ent->next; | |
366 | if (ent == &q->root_blkg->q_node) | |
367 | ent = ent->next; | |
368 | if (ent == &q->blkg_list) | |
369 | return NULL; | |
370 | ||
371 | blkg = container_of(ent, struct blkcg_gq, q_node); | |
372 | return &blkg->rl; | |
373 | } | |
374 | ||
3c798398 TH |
375 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, |
376 | u64 val) | |
303a3acb | 377 | { |
3c798398 TH |
378 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
379 | struct blkcg_gq *blkg; | |
303a3acb | 380 | struct hlist_node *n; |
bc0d6501 | 381 | int i; |
303a3acb | 382 | |
bc0d6501 | 383 | mutex_lock(&blkcg_pol_mutex); |
303a3acb | 384 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
385 | |
386 | /* | |
387 | * Note that stat reset is racy - it doesn't synchronize against | |
388 | * stat updates. This is a debug feature which shouldn't exist | |
389 | * anyway. If you get hit by a race, retry. | |
390 | */ | |
303a3acb | 391 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
8bd435b3 | 392 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 393 | struct blkcg_policy *pol = blkcg_policy[i]; |
549d3aa8 | 394 | |
a2b1693b | 395 | if (blkcg_policy_enabled(blkg->q, pol) && |
f9fcc2d3 TH |
396 | pol->pd_reset_stats_fn) |
397 | pol->pd_reset_stats_fn(blkg); | |
bc0d6501 | 398 | } |
303a3acb | 399 | } |
f0bdc8cd | 400 | |
303a3acb | 401 | spin_unlock_irq(&blkcg->lock); |
bc0d6501 | 402 | mutex_unlock(&blkcg_pol_mutex); |
303a3acb DS |
403 | return 0; |
404 | } | |
405 | ||
3c798398 | 406 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
303a3acb | 407 | { |
d3d32e69 TH |
408 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
409 | if (blkg->q->backing_dev_info.dev) | |
410 | return dev_name(blkg->q->backing_dev_info.dev); | |
411 | return NULL; | |
303a3acb DS |
412 | } |
413 | ||
d3d32e69 TH |
414 | /** |
415 | * blkcg_print_blkgs - helper for printing per-blkg data | |
416 | * @sf: seq_file to print to | |
417 | * @blkcg: blkcg of interest | |
418 | * @prfill: fill function to print out a blkg | |
419 | * @pol: policy in question | |
420 | * @data: data to be passed to @prfill | |
421 | * @show_total: to print out sum of prfill return values or not | |
422 | * | |
423 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
424 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
425 | * policy data and @data. If @show_total is %true, the sum of the return | |
426 | * values from @prfill is printed with "Total" label at the end. | |
427 | * | |
428 | * This is to be used to construct print functions for | |
429 | * cftype->read_seq_string method. | |
430 | */ | |
3c798398 | 431 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
432 | u64 (*prfill)(struct seq_file *, |
433 | struct blkg_policy_data *, int), | |
3c798398 | 434 | const struct blkcg_policy *pol, int data, |
ec399347 | 435 | bool show_total) |
5624a4e4 | 436 | { |
3c798398 | 437 | struct blkcg_gq *blkg; |
d3d32e69 TH |
438 | struct hlist_node *n; |
439 | u64 total = 0; | |
5624a4e4 | 440 | |
d3d32e69 TH |
441 | spin_lock_irq(&blkcg->lock); |
442 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
a2b1693b | 443 | if (blkcg_policy_enabled(blkg->q, pol)) |
f95a04af | 444 | total += prfill(sf, blkg->pd[pol->plid], data); |
d3d32e69 TH |
445 | spin_unlock_irq(&blkcg->lock); |
446 | ||
447 | if (show_total) | |
448 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
449 | } | |
829fdb50 | 450 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
451 | |
452 | /** | |
453 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
454 | * @sf: seq_file to print to | |
f95a04af | 455 | * @pd: policy private data of interest |
d3d32e69 TH |
456 | * @v: value to print |
457 | * | |
f95a04af | 458 | * Print @v to @sf for the device assocaited with @pd. |
d3d32e69 | 459 | */ |
f95a04af | 460 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
d3d32e69 | 461 | { |
f95a04af | 462 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
463 | |
464 | if (!dname) | |
465 | return 0; | |
466 | ||
467 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
468 | return v; | |
469 | } | |
829fdb50 | 470 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
471 | |
472 | /** | |
473 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
474 | * @sf: seq_file to print to | |
f95a04af | 475 | * @pd: policy private data of interest |
d3d32e69 TH |
476 | * @rwstat: rwstat to print |
477 | * | |
f95a04af | 478 | * Print @rwstat to @sf for the device assocaited with @pd. |
d3d32e69 | 479 | */ |
f95a04af | 480 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
829fdb50 | 481 | const struct blkg_rwstat *rwstat) |
d3d32e69 TH |
482 | { |
483 | static const char *rwstr[] = { | |
484 | [BLKG_RWSTAT_READ] = "Read", | |
485 | [BLKG_RWSTAT_WRITE] = "Write", | |
486 | [BLKG_RWSTAT_SYNC] = "Sync", | |
487 | [BLKG_RWSTAT_ASYNC] = "Async", | |
488 | }; | |
f95a04af | 489 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
490 | u64 v; |
491 | int i; | |
492 | ||
493 | if (!dname) | |
494 | return 0; | |
495 | ||
496 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
497 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
498 | (unsigned long long)rwstat->cnt[i]); | |
499 | ||
500 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
501 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
502 | return v; | |
503 | } | |
504 | ||
5bc4afb1 TH |
505 | /** |
506 | * blkg_prfill_stat - prfill callback for blkg_stat | |
507 | * @sf: seq_file to print to | |
f95a04af TH |
508 | * @pd: policy private data of interest |
509 | * @off: offset to the blkg_stat in @pd | |
5bc4afb1 TH |
510 | * |
511 | * prfill callback for printing a blkg_stat. | |
512 | */ | |
f95a04af | 513 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
d3d32e69 | 514 | { |
f95a04af | 515 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
d3d32e69 | 516 | } |
5bc4afb1 | 517 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
d3d32e69 | 518 | |
5bc4afb1 TH |
519 | /** |
520 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
521 | * @sf: seq_file to print to | |
f95a04af TH |
522 | * @pd: policy private data of interest |
523 | * @off: offset to the blkg_rwstat in @pd | |
5bc4afb1 TH |
524 | * |
525 | * prfill callback for printing a blkg_rwstat. | |
526 | */ | |
f95a04af TH |
527 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
528 | int off) | |
d3d32e69 | 529 | { |
f95a04af | 530 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
d3d32e69 | 531 | |
f95a04af | 532 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
d3d32e69 | 533 | } |
5bc4afb1 | 534 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
d3d32e69 | 535 | |
3a8b31d3 TH |
536 | /** |
537 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
538 | * @blkcg: target block cgroup | |
da8b0662 | 539 | * @pol: target policy |
3a8b31d3 TH |
540 | * @input: input string |
541 | * @ctx: blkg_conf_ctx to be filled | |
542 | * | |
543 | * Parse per-blkg config update from @input and initialize @ctx with the | |
544 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
da8b0662 TH |
545 | * value. This function returns with RCU read lock and queue lock held and |
546 | * must be paired with blkg_conf_finish(). | |
3a8b31d3 | 547 | */ |
3c798398 TH |
548 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
549 | const char *input, struct blkg_conf_ctx *ctx) | |
da8b0662 | 550 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
34d0f179 | 551 | { |
3a8b31d3 | 552 | struct gendisk *disk; |
3c798398 | 553 | struct blkcg_gq *blkg; |
726fa694 TH |
554 | unsigned int major, minor; |
555 | unsigned long long v; | |
556 | int part, ret; | |
34d0f179 | 557 | |
726fa694 TH |
558 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
559 | return -EINVAL; | |
3a8b31d3 | 560 | |
726fa694 | 561 | disk = get_gendisk(MKDEV(major, minor), &part); |
4bfd482e | 562 | if (!disk || part) |
726fa694 | 563 | return -EINVAL; |
e56da7e2 TH |
564 | |
565 | rcu_read_lock(); | |
4bfd482e | 566 | spin_lock_irq(disk->queue->queue_lock); |
da8b0662 | 567 | |
a2b1693b | 568 | if (blkcg_policy_enabled(disk->queue, pol)) |
3c96cb32 | 569 | blkg = blkg_lookup_create(blkcg, disk->queue); |
a2b1693b TH |
570 | else |
571 | blkg = ERR_PTR(-EINVAL); | |
e56da7e2 | 572 | |
4bfd482e TH |
573 | if (IS_ERR(blkg)) { |
574 | ret = PTR_ERR(blkg); | |
3a8b31d3 | 575 | rcu_read_unlock(); |
da8b0662 | 576 | spin_unlock_irq(disk->queue->queue_lock); |
3a8b31d3 TH |
577 | put_disk(disk); |
578 | /* | |
579 | * If queue was bypassing, we should retry. Do so after a | |
580 | * short msleep(). It isn't strictly necessary but queue | |
581 | * can be bypassing for some time and it's always nice to | |
582 | * avoid busy looping. | |
583 | */ | |
584 | if (ret == -EBUSY) { | |
585 | msleep(10); | |
586 | ret = restart_syscall(); | |
7702e8f4 | 587 | } |
726fa694 | 588 | return ret; |
062a644d | 589 | } |
3a8b31d3 TH |
590 | |
591 | ctx->disk = disk; | |
592 | ctx->blkg = blkg; | |
726fa694 TH |
593 | ctx->v = v; |
594 | return 0; | |
34d0f179 | 595 | } |
829fdb50 | 596 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 597 | |
3a8b31d3 TH |
598 | /** |
599 | * blkg_conf_finish - finish up per-blkg config update | |
600 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
601 | * | |
602 | * Finish up after per-blkg config update. This function must be paired | |
603 | * with blkg_conf_prep(). | |
604 | */ | |
829fdb50 | 605 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
da8b0662 | 606 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
34d0f179 | 607 | { |
da8b0662 | 608 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
3a8b31d3 TH |
609 | rcu_read_unlock(); |
610 | put_disk(ctx->disk); | |
34d0f179 | 611 | } |
829fdb50 | 612 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 613 | |
3c798398 | 614 | struct cftype blkcg_files[] = { |
84c124da DS |
615 | { |
616 | .name = "reset_stats", | |
3c798398 | 617 | .write_u64 = blkcg_reset_stats, |
22084190 | 618 | }, |
4baf6e33 | 619 | { } /* terminate */ |
31e4c28d VG |
620 | }; |
621 | ||
9f13ef67 | 622 | /** |
92fb9748 | 623 | * blkcg_css_offline - cgroup css_offline callback |
9f13ef67 TH |
624 | * @cgroup: cgroup of interest |
625 | * | |
626 | * This function is called when @cgroup is about to go away and responsible | |
627 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
628 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
629 | * inside q lock, this function performs reverse double lock dancing. | |
630 | * | |
631 | * This is the blkcg counterpart of ioc_release_fn(). | |
632 | */ | |
92fb9748 | 633 | static void blkcg_css_offline(struct cgroup *cgroup) |
31e4c28d | 634 | { |
3c798398 | 635 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
b1c35769 | 636 | |
9f13ef67 | 637 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 638 | |
9f13ef67 | 639 | while (!hlist_empty(&blkcg->blkg_list)) { |
3c798398 TH |
640 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
641 | struct blkcg_gq, blkcg_node); | |
c875f4d0 | 642 | struct request_queue *q = blkg->q; |
b1c35769 | 643 | |
9f13ef67 TH |
644 | if (spin_trylock(q->queue_lock)) { |
645 | blkg_destroy(blkg); | |
646 | spin_unlock(q->queue_lock); | |
647 | } else { | |
648 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 649 | cpu_relax(); |
a5567932 | 650 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 651 | } |
9f13ef67 | 652 | } |
b1c35769 | 653 | |
9f13ef67 | 654 | spin_unlock_irq(&blkcg->lock); |
7ee9c562 TH |
655 | } |
656 | ||
92fb9748 | 657 | static void blkcg_css_free(struct cgroup *cgroup) |
7ee9c562 | 658 | { |
3c798398 | 659 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
7ee9c562 | 660 | |
3c798398 | 661 | if (blkcg != &blkcg_root) |
67523c48 | 662 | kfree(blkcg); |
31e4c28d VG |
663 | } |
664 | ||
92fb9748 | 665 | static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup) |
31e4c28d | 666 | { |
9a9e8a26 | 667 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
3c798398 | 668 | struct blkcg *blkcg; |
0341509f | 669 | struct cgroup *parent = cgroup->parent; |
31e4c28d | 670 | |
0341509f | 671 | if (!parent) { |
3c798398 | 672 | blkcg = &blkcg_root; |
31e4c28d VG |
673 | goto done; |
674 | } | |
675 | ||
31e4c28d VG |
676 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
677 | if (!blkcg) | |
678 | return ERR_PTR(-ENOMEM); | |
679 | ||
3381cb8d | 680 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
9a9e8a26 | 681 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
31e4c28d VG |
682 | done: |
683 | spin_lock_init(&blkcg->lock); | |
a637120e | 684 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); |
31e4c28d VG |
685 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
686 | ||
687 | return &blkcg->css; | |
688 | } | |
689 | ||
5efd6113 TH |
690 | /** |
691 | * blkcg_init_queue - initialize blkcg part of request queue | |
692 | * @q: request_queue to initialize | |
693 | * | |
694 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
695 | * part of new request_queue @q. | |
696 | * | |
697 | * RETURNS: | |
698 | * 0 on success, -errno on failure. | |
699 | */ | |
700 | int blkcg_init_queue(struct request_queue *q) | |
701 | { | |
702 | might_sleep(); | |
703 | ||
3c96cb32 | 704 | return blk_throtl_init(q); |
5efd6113 TH |
705 | } |
706 | ||
707 | /** | |
708 | * blkcg_drain_queue - drain blkcg part of request_queue | |
709 | * @q: request_queue to drain | |
710 | * | |
711 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
712 | */ | |
713 | void blkcg_drain_queue(struct request_queue *q) | |
714 | { | |
715 | lockdep_assert_held(q->queue_lock); | |
716 | ||
717 | blk_throtl_drain(q); | |
718 | } | |
719 | ||
720 | /** | |
721 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
722 | * @q: request_queue being released | |
723 | * | |
724 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
725 | */ | |
726 | void blkcg_exit_queue(struct request_queue *q) | |
727 | { | |
6d18b008 | 728 | spin_lock_irq(q->queue_lock); |
3c96cb32 | 729 | blkg_destroy_all(q); |
6d18b008 TH |
730 | spin_unlock_irq(q->queue_lock); |
731 | ||
5efd6113 TH |
732 | blk_throtl_exit(q); |
733 | } | |
734 | ||
31e4c28d VG |
735 | /* |
736 | * We cannot support shared io contexts, as we have no mean to support | |
737 | * two tasks with the same ioc in two different groups without major rework | |
738 | * of the main cic data structures. For now we allow a task to change | |
739 | * its cgroup only if it's the only owner of its ioc. | |
740 | */ | |
3c798398 | 741 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
31e4c28d | 742 | { |
bb9d97b6 | 743 | struct task_struct *task; |
31e4c28d VG |
744 | struct io_context *ioc; |
745 | int ret = 0; | |
746 | ||
747 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
748 | cgroup_taskset_for_each(task, cgrp, tset) { |
749 | task_lock(task); | |
750 | ioc = task->io_context; | |
751 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
752 | ret = -EINVAL; | |
753 | task_unlock(task); | |
754 | if (ret) | |
755 | break; | |
756 | } | |
31e4c28d VG |
757 | return ret; |
758 | } | |
759 | ||
676f7c8f TH |
760 | struct cgroup_subsys blkio_subsys = { |
761 | .name = "blkio", | |
92fb9748 TH |
762 | .css_alloc = blkcg_css_alloc, |
763 | .css_offline = blkcg_css_offline, | |
764 | .css_free = blkcg_css_free, | |
3c798398 | 765 | .can_attach = blkcg_can_attach, |
676f7c8f | 766 | .subsys_id = blkio_subsys_id, |
3c798398 | 767 | .base_cftypes = blkcg_files, |
676f7c8f | 768 | .module = THIS_MODULE, |
8c7f6edb TH |
769 | |
770 | /* | |
771 | * blkio subsystem is utterly broken in terms of hierarchy support. | |
772 | * It treats all cgroups equally regardless of where they're | |
773 | * located in the hierarchy - all cgroups are treated as if they're | |
774 | * right below the root. Fix it and remove the following. | |
775 | */ | |
776 | .broken_hierarchy = true, | |
676f7c8f TH |
777 | }; |
778 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
779 | ||
a2b1693b TH |
780 | /** |
781 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
782 | * @q: request_queue of interest | |
783 | * @pol: blkcg policy to activate | |
784 | * | |
785 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
786 | * bypass mode to populate its blkgs with policy_data for @pol. | |
787 | * | |
788 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
789 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
790 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
791 | * always enough for dereferencing policy data. | |
792 | * | |
793 | * The caller is responsible for synchronizing [de]activations and policy | |
794 | * [un]registerations. Returns 0 on success, -errno on failure. | |
795 | */ | |
796 | int blkcg_activate_policy(struct request_queue *q, | |
3c798398 | 797 | const struct blkcg_policy *pol) |
a2b1693b TH |
798 | { |
799 | LIST_HEAD(pds); | |
86cde6b6 | 800 | struct blkcg_gq *blkg, *new_blkg; |
a2b1693b TH |
801 | struct blkg_policy_data *pd, *n; |
802 | int cnt = 0, ret; | |
15974993 | 803 | bool preloaded; |
a2b1693b TH |
804 | |
805 | if (blkcg_policy_enabled(q, pol)) | |
806 | return 0; | |
807 | ||
15974993 | 808 | /* preallocations for root blkg */ |
86cde6b6 TH |
809 | new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); |
810 | if (!new_blkg) | |
15974993 TH |
811 | return -ENOMEM; |
812 | ||
813 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
814 | ||
a2b1693b TH |
815 | blk_queue_bypass_start(q); |
816 | ||
86cde6b6 TH |
817 | /* |
818 | * Make sure the root blkg exists and count the existing blkgs. As | |
819 | * @q is bypassing at this point, blkg_lookup_create() can't be | |
820 | * used. Open code it. | |
821 | */ | |
a2b1693b TH |
822 | spin_lock_irq(q->queue_lock); |
823 | ||
824 | rcu_read_lock(); | |
86cde6b6 TH |
825 | blkg = __blkg_lookup(&blkcg_root, q, false); |
826 | if (blkg) | |
827 | blkg_free(new_blkg); | |
828 | else | |
829 | blkg = blkg_create(&blkcg_root, q, new_blkg); | |
a2b1693b TH |
830 | rcu_read_unlock(); |
831 | ||
15974993 TH |
832 | if (preloaded) |
833 | radix_tree_preload_end(); | |
834 | ||
a2b1693b TH |
835 | if (IS_ERR(blkg)) { |
836 | ret = PTR_ERR(blkg); | |
837 | goto out_unlock; | |
838 | } | |
839 | q->root_blkg = blkg; | |
a051661c | 840 | q->root_rl.blkg = blkg; |
a2b1693b TH |
841 | |
842 | list_for_each_entry(blkg, &q->blkg_list, q_node) | |
843 | cnt++; | |
844 | ||
845 | spin_unlock_irq(q->queue_lock); | |
846 | ||
847 | /* allocate policy_data for all existing blkgs */ | |
848 | while (cnt--) { | |
f95a04af | 849 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
a2b1693b TH |
850 | if (!pd) { |
851 | ret = -ENOMEM; | |
852 | goto out_free; | |
853 | } | |
854 | list_add_tail(&pd->alloc_node, &pds); | |
855 | } | |
856 | ||
857 | /* | |
858 | * Install the allocated pds. With @q bypassing, no new blkg | |
859 | * should have been created while the queue lock was dropped. | |
860 | */ | |
861 | spin_lock_irq(q->queue_lock); | |
862 | ||
863 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
864 | if (WARN_ON(list_empty(&pds))) { | |
865 | /* umm... this shouldn't happen, just abort */ | |
866 | ret = -ENOMEM; | |
867 | goto out_unlock; | |
868 | } | |
869 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); | |
870 | list_del_init(&pd->alloc_node); | |
871 | ||
872 | /* grab blkcg lock too while installing @pd on @blkg */ | |
873 | spin_lock(&blkg->blkcg->lock); | |
874 | ||
875 | blkg->pd[pol->plid] = pd; | |
876 | pd->blkg = blkg; | |
f9fcc2d3 | 877 | pol->pd_init_fn(blkg); |
a2b1693b TH |
878 | |
879 | spin_unlock(&blkg->blkcg->lock); | |
880 | } | |
881 | ||
882 | __set_bit(pol->plid, q->blkcg_pols); | |
883 | ret = 0; | |
884 | out_unlock: | |
885 | spin_unlock_irq(q->queue_lock); | |
886 | out_free: | |
887 | blk_queue_bypass_end(q); | |
888 | list_for_each_entry_safe(pd, n, &pds, alloc_node) | |
889 | kfree(pd); | |
890 | return ret; | |
891 | } | |
892 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
893 | ||
894 | /** | |
895 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
896 | * @q: request_queue of interest | |
897 | * @pol: blkcg policy to deactivate | |
898 | * | |
899 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
900 | * blkcg_activate_policy(). | |
901 | */ | |
902 | void blkcg_deactivate_policy(struct request_queue *q, | |
3c798398 | 903 | const struct blkcg_policy *pol) |
a2b1693b | 904 | { |
3c798398 | 905 | struct blkcg_gq *blkg; |
a2b1693b TH |
906 | |
907 | if (!blkcg_policy_enabled(q, pol)) | |
908 | return; | |
909 | ||
910 | blk_queue_bypass_start(q); | |
911 | spin_lock_irq(q->queue_lock); | |
912 | ||
913 | __clear_bit(pol->plid, q->blkcg_pols); | |
914 | ||
6d18b008 TH |
915 | /* if no policy is left, no need for blkgs - shoot them down */ |
916 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) | |
917 | blkg_destroy_all(q); | |
918 | ||
a2b1693b TH |
919 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
920 | /* grab blkcg lock too while removing @pd from @blkg */ | |
921 | spin_lock(&blkg->blkcg->lock); | |
922 | ||
f9fcc2d3 TH |
923 | if (pol->pd_exit_fn) |
924 | pol->pd_exit_fn(blkg); | |
a2b1693b TH |
925 | |
926 | kfree(blkg->pd[pol->plid]); | |
927 | blkg->pd[pol->plid] = NULL; | |
928 | ||
929 | spin_unlock(&blkg->blkcg->lock); | |
930 | } | |
931 | ||
932 | spin_unlock_irq(q->queue_lock); | |
933 | blk_queue_bypass_end(q); | |
934 | } | |
935 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
936 | ||
8bd435b3 | 937 | /** |
3c798398 TH |
938 | * blkcg_policy_register - register a blkcg policy |
939 | * @pol: blkcg policy to register | |
8bd435b3 | 940 | * |
3c798398 TH |
941 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
942 | * successful registration. Returns 0 on success and -errno on failure. | |
8bd435b3 | 943 | */ |
3c798398 | 944 | int blkcg_policy_register(struct blkcg_policy *pol) |
3e252066 | 945 | { |
8bd435b3 | 946 | int i, ret; |
e8989fae | 947 | |
f95a04af TH |
948 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
949 | return -EINVAL; | |
950 | ||
bc0d6501 TH |
951 | mutex_lock(&blkcg_pol_mutex); |
952 | ||
8bd435b3 TH |
953 | /* find an empty slot */ |
954 | ret = -ENOSPC; | |
955 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
3c798398 | 956 | if (!blkcg_policy[i]) |
8bd435b3 TH |
957 | break; |
958 | if (i >= BLKCG_MAX_POLS) | |
959 | goto out_unlock; | |
035d10b2 | 960 | |
8bd435b3 | 961 | /* register and update blkgs */ |
3c798398 TH |
962 | pol->plid = i; |
963 | blkcg_policy[i] = pol; | |
8bd435b3 | 964 | |
8bd435b3 | 965 | /* everything is in place, add intf files for the new policy */ |
3c798398 TH |
966 | if (pol->cftypes) |
967 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); | |
8bd435b3 TH |
968 | ret = 0; |
969 | out_unlock: | |
bc0d6501 | 970 | mutex_unlock(&blkcg_pol_mutex); |
8bd435b3 | 971 | return ret; |
3e252066 | 972 | } |
3c798398 | 973 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
3e252066 | 974 | |
8bd435b3 | 975 | /** |
3c798398 TH |
976 | * blkcg_policy_unregister - unregister a blkcg policy |
977 | * @pol: blkcg policy to unregister | |
8bd435b3 | 978 | * |
3c798398 | 979 | * Undo blkcg_policy_register(@pol). Might sleep. |
8bd435b3 | 980 | */ |
3c798398 | 981 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
3e252066 | 982 | { |
bc0d6501 TH |
983 | mutex_lock(&blkcg_pol_mutex); |
984 | ||
3c798398 | 985 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
8bd435b3 TH |
986 | goto out_unlock; |
987 | ||
988 | /* kill the intf files first */ | |
3c798398 TH |
989 | if (pol->cftypes) |
990 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); | |
44ea53de | 991 | |
8bd435b3 | 992 | /* unregister and update blkgs */ |
3c798398 | 993 | blkcg_policy[pol->plid] = NULL; |
8bd435b3 | 994 | out_unlock: |
bc0d6501 | 995 | mutex_unlock(&blkcg_pol_mutex); |
3e252066 | 996 | } |
3c798398 | 997 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |