]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 | 14 | #include <linux/kdev_t.h> |
9d6a986c | 15 | #include <linux/module.h> |
accee785 | 16 | #include <linux/err.h> |
9195291e | 17 | #include <linux/blkdev.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
34d0f179 | 19 | #include <linux/genhd.h> |
72e06c25 | 20 | #include <linux/delay.h> |
9a9e8a26 | 21 | #include <linux/atomic.h> |
eea8f41c | 22 | #include <linux/blk-cgroup.h> |
5efd6113 | 23 | #include "blk.h" |
3e252066 | 24 | |
84c124da DS |
25 | #define MAX_KEY_LEN 100 |
26 | ||
bc0d6501 | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
923adde1 | 28 | |
e71357e1 TH |
29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT, |
30 | .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, }; | |
3c798398 | 31 | EXPORT_SYMBOL_GPL(blkcg_root); |
9d6a986c | 32 | |
496d5e75 TH |
33 | struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; |
34 | ||
3c798398 | 35 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
035d10b2 | 36 | |
a2b1693b | 37 | static bool blkcg_policy_enabled(struct request_queue *q, |
3c798398 | 38 | const struct blkcg_policy *pol) |
a2b1693b TH |
39 | { |
40 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
41 | } | |
42 | ||
0381411e TH |
43 | /** |
44 | * blkg_free - free a blkg | |
45 | * @blkg: blkg to free | |
46 | * | |
47 | * Free @blkg which may be partially allocated. | |
48 | */ | |
3c798398 | 49 | static void blkg_free(struct blkcg_gq *blkg) |
0381411e | 50 | { |
e8989fae | 51 | int i; |
549d3aa8 TH |
52 | |
53 | if (!blkg) | |
54 | return; | |
55 | ||
db613670 TH |
56 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
57 | kfree(blkg->pd[i]); | |
e8989fae | 58 | |
a051661c | 59 | blk_exit_rl(&blkg->rl); |
549d3aa8 | 60 | kfree(blkg); |
0381411e TH |
61 | } |
62 | ||
63 | /** | |
64 | * blkg_alloc - allocate a blkg | |
65 | * @blkcg: block cgroup the new blkg is associated with | |
66 | * @q: request_queue the new blkg is associated with | |
15974993 | 67 | * @gfp_mask: allocation mask to use |
0381411e | 68 | * |
e8989fae | 69 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e | 70 | */ |
15974993 TH |
71 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
72 | gfp_t gfp_mask) | |
0381411e | 73 | { |
3c798398 | 74 | struct blkcg_gq *blkg; |
e8989fae | 75 | int i; |
0381411e TH |
76 | |
77 | /* alloc and init base part */ | |
15974993 | 78 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
0381411e TH |
79 | if (!blkg) |
80 | return NULL; | |
81 | ||
c875f4d0 | 82 | blkg->q = q; |
e8989fae | 83 | INIT_LIST_HEAD(&blkg->q_node); |
0381411e | 84 | blkg->blkcg = blkcg; |
a5049a8a | 85 | atomic_set(&blkg->refcnt, 1); |
0381411e | 86 | |
a051661c TH |
87 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ |
88 | if (blkcg != &blkcg_root) { | |
89 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) | |
90 | goto err_free; | |
91 | blkg->rl.blkg = blkg; | |
92 | } | |
93 | ||
8bd435b3 | 94 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 95 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae | 96 | struct blkg_policy_data *pd; |
0381411e | 97 | |
a2b1693b | 98 | if (!blkcg_policy_enabled(q, pol)) |
e8989fae TH |
99 | continue; |
100 | ||
101 | /* alloc per-policy data and attach it to blkg */ | |
15974993 | 102 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); |
a051661c TH |
103 | if (!pd) |
104 | goto err_free; | |
549d3aa8 | 105 | |
e8989fae TH |
106 | blkg->pd[i] = pd; |
107 | pd->blkg = blkg; | |
b276a876 | 108 | pd->plid = i; |
e8989fae TH |
109 | } |
110 | ||
0381411e | 111 | return blkg; |
a051661c TH |
112 | |
113 | err_free: | |
114 | blkg_free(blkg); | |
115 | return NULL; | |
0381411e TH |
116 | } |
117 | ||
16b3de66 TH |
118 | /** |
119 | * __blkg_lookup - internal version of blkg_lookup() | |
120 | * @blkcg: blkcg of interest | |
121 | * @q: request_queue of interest | |
122 | * @update_hint: whether to update lookup hint with the result or not | |
123 | * | |
124 | * This is internal version and shouldn't be used by policy | |
125 | * implementations. Looks up blkgs for the @blkcg - @q pair regardless of | |
126 | * @q's bypass state. If @update_hint is %true, the caller should be | |
127 | * holding @q->queue_lock and lookup hint is updated on success. | |
128 | */ | |
dd4a4ffc TH |
129 | struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, |
130 | bool update_hint) | |
80fd9979 | 131 | { |
3c798398 | 132 | struct blkcg_gq *blkg; |
80fd9979 | 133 | |
a637120e TH |
134 | blkg = rcu_dereference(blkcg->blkg_hint); |
135 | if (blkg && blkg->q == q) | |
136 | return blkg; | |
137 | ||
138 | /* | |
86cde6b6 TH |
139 | * Hint didn't match. Look up from the radix tree. Note that the |
140 | * hint can only be updated under queue_lock as otherwise @blkg | |
141 | * could have already been removed from blkg_tree. The caller is | |
142 | * responsible for grabbing queue_lock if @update_hint. | |
a637120e TH |
143 | */ |
144 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
86cde6b6 TH |
145 | if (blkg && blkg->q == q) { |
146 | if (update_hint) { | |
147 | lockdep_assert_held(q->queue_lock); | |
148 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
149 | } | |
a637120e | 150 | return blkg; |
86cde6b6 | 151 | } |
a637120e | 152 | |
80fd9979 TH |
153 | return NULL; |
154 | } | |
155 | ||
156 | /** | |
157 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
158 | * @blkcg: blkcg of interest | |
159 | * @q: request_queue of interest | |
160 | * | |
161 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
162 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | |
163 | * - see blk_queue_bypass_start() for details. | |
164 | */ | |
3c798398 | 165 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
80fd9979 TH |
166 | { |
167 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
168 | ||
169 | if (unlikely(blk_queue_bypass(q))) | |
170 | return NULL; | |
86cde6b6 | 171 | return __blkg_lookup(blkcg, q, false); |
80fd9979 TH |
172 | } |
173 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
174 | ||
15974993 TH |
175 | /* |
176 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
177 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. | |
178 | */ | |
86cde6b6 TH |
179 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, |
180 | struct request_queue *q, | |
181 | struct blkcg_gq *new_blkg) | |
5624a4e4 | 182 | { |
3c798398 | 183 | struct blkcg_gq *blkg; |
f427d909 | 184 | int i, ret; |
5624a4e4 | 185 | |
cd1604fa TH |
186 | WARN_ON_ONCE(!rcu_read_lock_held()); |
187 | lockdep_assert_held(q->queue_lock); | |
188 | ||
7ee9c562 | 189 | /* blkg holds a reference to blkcg */ |
ec903c0c | 190 | if (!css_tryget_online(&blkcg->css)) { |
93e6d5d8 TH |
191 | ret = -EINVAL; |
192 | goto err_free_blkg; | |
15974993 | 193 | } |
cd1604fa | 194 | |
496fb780 | 195 | /* allocate */ |
15974993 TH |
196 | if (!new_blkg) { |
197 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); | |
198 | if (unlikely(!new_blkg)) { | |
93e6d5d8 TH |
199 | ret = -ENOMEM; |
200 | goto err_put_css; | |
15974993 TH |
201 | } |
202 | } | |
203 | blkg = new_blkg; | |
cd1604fa | 204 | |
db613670 | 205 | /* link parent */ |
3c547865 TH |
206 | if (blkcg_parent(blkcg)) { |
207 | blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); | |
208 | if (WARN_ON_ONCE(!blkg->parent)) { | |
2423c9c3 | 209 | ret = -EINVAL; |
3c547865 TH |
210 | goto err_put_css; |
211 | } | |
212 | blkg_get(blkg->parent); | |
213 | } | |
214 | ||
db613670 TH |
215 | /* invoke per-policy init */ |
216 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
217 | struct blkcg_policy *pol = blkcg_policy[i]; | |
218 | ||
219 | if (blkg->pd[i] && pol->pd_init_fn) | |
220 | pol->pd_init_fn(blkg); | |
221 | } | |
222 | ||
223 | /* insert */ | |
cd1604fa | 224 | spin_lock(&blkcg->lock); |
a637120e TH |
225 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
226 | if (likely(!ret)) { | |
227 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
228 | list_add(&blkg->q_node, &q->blkg_list); | |
f427d909 TH |
229 | |
230 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
231 | struct blkcg_policy *pol = blkcg_policy[i]; | |
232 | ||
233 | if (blkg->pd[i] && pol->pd_online_fn) | |
234 | pol->pd_online_fn(blkg); | |
235 | } | |
a637120e | 236 | } |
f427d909 | 237 | blkg->online = true; |
cd1604fa | 238 | spin_unlock(&blkcg->lock); |
496fb780 | 239 | |
ec13b1d6 | 240 | if (!ret) |
a637120e | 241 | return blkg; |
15974993 | 242 | |
3c547865 TH |
243 | /* @blkg failed fully initialized, use the usual release path */ |
244 | blkg_put(blkg); | |
245 | return ERR_PTR(ret); | |
246 | ||
93e6d5d8 | 247 | err_put_css: |
496fb780 | 248 | css_put(&blkcg->css); |
93e6d5d8 | 249 | err_free_blkg: |
15974993 | 250 | blkg_free(new_blkg); |
93e6d5d8 | 251 | return ERR_PTR(ret); |
31e4c28d | 252 | } |
3c96cb32 | 253 | |
86cde6b6 TH |
254 | /** |
255 | * blkg_lookup_create - lookup blkg, try to create one if not there | |
256 | * @blkcg: blkcg of interest | |
257 | * @q: request_queue of interest | |
258 | * | |
259 | * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to | |
3c547865 TH |
260 | * create one. blkg creation is performed recursively from blkcg_root such |
261 | * that all non-root blkg's have access to the parent blkg. This function | |
262 | * should be called under RCU read lock and @q->queue_lock. | |
86cde6b6 TH |
263 | * |
264 | * Returns pointer to the looked up or created blkg on success, ERR_PTR() | |
265 | * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not | |
266 | * dead and bypassing, returns ERR_PTR(-EBUSY). | |
267 | */ | |
3c798398 TH |
268 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
269 | struct request_queue *q) | |
3c96cb32 | 270 | { |
86cde6b6 TH |
271 | struct blkcg_gq *blkg; |
272 | ||
273 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
274 | lockdep_assert_held(q->queue_lock); | |
275 | ||
3c96cb32 TH |
276 | /* |
277 | * This could be the first entry point of blkcg implementation and | |
278 | * we shouldn't allow anything to go through for a bypassing queue. | |
279 | */ | |
280 | if (unlikely(blk_queue_bypass(q))) | |
3f3299d5 | 281 | return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); |
86cde6b6 TH |
282 | |
283 | blkg = __blkg_lookup(blkcg, q, true); | |
284 | if (blkg) | |
285 | return blkg; | |
286 | ||
3c547865 TH |
287 | /* |
288 | * Create blkgs walking down from blkcg_root to @blkcg, so that all | |
289 | * non-root blkgs have access to their parents. | |
290 | */ | |
291 | while (true) { | |
292 | struct blkcg *pos = blkcg; | |
293 | struct blkcg *parent = blkcg_parent(blkcg); | |
294 | ||
295 | while (parent && !__blkg_lookup(parent, q, false)) { | |
296 | pos = parent; | |
297 | parent = blkcg_parent(parent); | |
298 | } | |
299 | ||
300 | blkg = blkg_create(pos, q, NULL); | |
301 | if (pos == blkcg || IS_ERR(blkg)) | |
302 | return blkg; | |
303 | } | |
3c96cb32 | 304 | } |
cd1604fa | 305 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 306 | |
3c798398 | 307 | static void blkg_destroy(struct blkcg_gq *blkg) |
03aa264a | 308 | { |
3c798398 | 309 | struct blkcg *blkcg = blkg->blkcg; |
f427d909 | 310 | int i; |
03aa264a | 311 | |
27e1f9d1 | 312 | lockdep_assert_held(blkg->q->queue_lock); |
9f13ef67 | 313 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
314 | |
315 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 316 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 317 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
a637120e | 318 | |
f427d909 TH |
319 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
320 | struct blkcg_policy *pol = blkcg_policy[i]; | |
321 | ||
322 | if (blkg->pd[i] && pol->pd_offline_fn) | |
323 | pol->pd_offline_fn(blkg); | |
324 | } | |
325 | blkg->online = false; | |
326 | ||
a637120e | 327 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
e8989fae | 328 | list_del_init(&blkg->q_node); |
9f13ef67 | 329 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 330 | |
a637120e TH |
331 | /* |
332 | * Both setting lookup hint to and clearing it from @blkg are done | |
333 | * under queue_lock. If it's not pointing to @blkg now, it never | |
334 | * will. Hint assignment itself can race safely. | |
335 | */ | |
ec6c676a | 336 | if (rcu_access_pointer(blkcg->blkg_hint) == blkg) |
a637120e TH |
337 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
338 | ||
03aa264a TH |
339 | /* |
340 | * Put the reference taken at the time of creation so that when all | |
341 | * queues are gone, group can be destroyed. | |
342 | */ | |
343 | blkg_put(blkg); | |
344 | } | |
345 | ||
9f13ef67 TH |
346 | /** |
347 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
348 | * @q: request_queue of interest | |
9f13ef67 | 349 | * |
3c96cb32 | 350 | * Destroy all blkgs associated with @q. |
9f13ef67 | 351 | */ |
3c96cb32 | 352 | static void blkg_destroy_all(struct request_queue *q) |
72e06c25 | 353 | { |
3c798398 | 354 | struct blkcg_gq *blkg, *n; |
72e06c25 | 355 | |
6d18b008 | 356 | lockdep_assert_held(q->queue_lock); |
72e06c25 | 357 | |
9f13ef67 | 358 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
3c798398 | 359 | struct blkcg *blkcg = blkg->blkcg; |
72e06c25 | 360 | |
9f13ef67 TH |
361 | spin_lock(&blkcg->lock); |
362 | blkg_destroy(blkg); | |
363 | spin_unlock(&blkcg->lock); | |
72e06c25 TH |
364 | } |
365 | } | |
366 | ||
2a4fd070 TH |
367 | /* |
368 | * A group is RCU protected, but having an rcu lock does not mean that one | |
369 | * can access all the fields of blkg and assume these are valid. For | |
370 | * example, don't try to follow throtl_data and request queue links. | |
371 | * | |
372 | * Having a reference to blkg under an rcu allows accesses to only values | |
373 | * local to groups like group stats and group rate limits. | |
374 | */ | |
375 | void __blkg_release_rcu(struct rcu_head *rcu_head) | |
1adaf3dd | 376 | { |
2a4fd070 | 377 | struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); |
db613670 TH |
378 | int i; |
379 | ||
380 | /* tell policies that this one is being freed */ | |
381 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
382 | struct blkcg_policy *pol = blkcg_policy[i]; | |
383 | ||
384 | if (blkg->pd[i] && pol->pd_exit_fn) | |
385 | pol->pd_exit_fn(blkg); | |
386 | } | |
387 | ||
3c547865 | 388 | /* release the blkcg and parent blkg refs this blkg has been holding */ |
1adaf3dd | 389 | css_put(&blkg->blkcg->css); |
a5049a8a | 390 | if (blkg->parent) |
3c547865 | 391 | blkg_put(blkg->parent); |
1adaf3dd | 392 | |
2a4fd070 | 393 | blkg_free(blkg); |
1adaf3dd | 394 | } |
2a4fd070 | 395 | EXPORT_SYMBOL_GPL(__blkg_release_rcu); |
1adaf3dd | 396 | |
a051661c TH |
397 | /* |
398 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky | |
399 | * because the root blkg uses @q->root_rl instead of its own rl. | |
400 | */ | |
401 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
402 | struct request_queue *q) | |
403 | { | |
404 | struct list_head *ent; | |
405 | struct blkcg_gq *blkg; | |
406 | ||
407 | /* | |
408 | * Determine the current blkg list_head. The first entry is | |
409 | * root_rl which is off @q->blkg_list and mapped to the head. | |
410 | */ | |
411 | if (rl == &q->root_rl) { | |
412 | ent = &q->blkg_list; | |
65c77fd9 JN |
413 | /* There are no more block groups, hence no request lists */ |
414 | if (list_empty(ent)) | |
415 | return NULL; | |
a051661c TH |
416 | } else { |
417 | blkg = container_of(rl, struct blkcg_gq, rl); | |
418 | ent = &blkg->q_node; | |
419 | } | |
420 | ||
421 | /* walk to the next list_head, skip root blkcg */ | |
422 | ent = ent->next; | |
423 | if (ent == &q->root_blkg->q_node) | |
424 | ent = ent->next; | |
425 | if (ent == &q->blkg_list) | |
426 | return NULL; | |
427 | ||
428 | blkg = container_of(ent, struct blkcg_gq, q_node); | |
429 | return &blkg->rl; | |
430 | } | |
431 | ||
182446d0 TH |
432 | static int blkcg_reset_stats(struct cgroup_subsys_state *css, |
433 | struct cftype *cftype, u64 val) | |
303a3acb | 434 | { |
182446d0 | 435 | struct blkcg *blkcg = css_to_blkcg(css); |
3c798398 | 436 | struct blkcg_gq *blkg; |
bc0d6501 | 437 | int i; |
303a3acb | 438 | |
36c38fb7 TH |
439 | /* |
440 | * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex | |
441 | * which ends up putting cgroup's internal cgroup_tree_mutex under | |
442 | * it; however, cgroup_tree_mutex is nested above cgroup file | |
443 | * active protection and grabbing blkcg_pol_mutex from a cgroup | |
444 | * file operation creates a possible circular dependency. cgroup | |
445 | * internal locking is planned to go through further simplification | |
446 | * and this issue should go away soon. For now, let's trylock | |
447 | * blkcg_pol_mutex and restart the write on failure. | |
448 | * | |
449 | * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com | |
450 | */ | |
451 | if (!mutex_trylock(&blkcg_pol_mutex)) | |
452 | return restart_syscall(); | |
303a3acb | 453 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
454 | |
455 | /* | |
456 | * Note that stat reset is racy - it doesn't synchronize against | |
457 | * stat updates. This is a debug feature which shouldn't exist | |
458 | * anyway. If you get hit by a race, retry. | |
459 | */ | |
b67bfe0d | 460 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
8bd435b3 | 461 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 462 | struct blkcg_policy *pol = blkcg_policy[i]; |
549d3aa8 | 463 | |
a2b1693b | 464 | if (blkcg_policy_enabled(blkg->q, pol) && |
f9fcc2d3 TH |
465 | pol->pd_reset_stats_fn) |
466 | pol->pd_reset_stats_fn(blkg); | |
bc0d6501 | 467 | } |
303a3acb | 468 | } |
f0bdc8cd | 469 | |
303a3acb | 470 | spin_unlock_irq(&blkcg->lock); |
bc0d6501 | 471 | mutex_unlock(&blkcg_pol_mutex); |
303a3acb DS |
472 | return 0; |
473 | } | |
474 | ||
3c798398 | 475 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
303a3acb | 476 | { |
d3d32e69 TH |
477 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
478 | if (blkg->q->backing_dev_info.dev) | |
479 | return dev_name(blkg->q->backing_dev_info.dev); | |
480 | return NULL; | |
303a3acb DS |
481 | } |
482 | ||
d3d32e69 TH |
483 | /** |
484 | * blkcg_print_blkgs - helper for printing per-blkg data | |
485 | * @sf: seq_file to print to | |
486 | * @blkcg: blkcg of interest | |
487 | * @prfill: fill function to print out a blkg | |
488 | * @pol: policy in question | |
489 | * @data: data to be passed to @prfill | |
490 | * @show_total: to print out sum of prfill return values or not | |
491 | * | |
492 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
493 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
810ecfa7 TH |
494 | * policy data and @data and the matching queue lock held. If @show_total |
495 | * is %true, the sum of the return values from @prfill is printed with | |
496 | * "Total" label at the end. | |
d3d32e69 TH |
497 | * |
498 | * This is to be used to construct print functions for | |
499 | * cftype->read_seq_string method. | |
500 | */ | |
3c798398 | 501 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
502 | u64 (*prfill)(struct seq_file *, |
503 | struct blkg_policy_data *, int), | |
3c798398 | 504 | const struct blkcg_policy *pol, int data, |
ec399347 | 505 | bool show_total) |
5624a4e4 | 506 | { |
3c798398 | 507 | struct blkcg_gq *blkg; |
d3d32e69 | 508 | u64 total = 0; |
5624a4e4 | 509 | |
810ecfa7 | 510 | rcu_read_lock(); |
ee89f812 | 511 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
810ecfa7 | 512 | spin_lock_irq(blkg->q->queue_lock); |
a2b1693b | 513 | if (blkcg_policy_enabled(blkg->q, pol)) |
f95a04af | 514 | total += prfill(sf, blkg->pd[pol->plid], data); |
810ecfa7 TH |
515 | spin_unlock_irq(blkg->q->queue_lock); |
516 | } | |
517 | rcu_read_unlock(); | |
d3d32e69 TH |
518 | |
519 | if (show_total) | |
520 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
521 | } | |
829fdb50 | 522 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
523 | |
524 | /** | |
525 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
526 | * @sf: seq_file to print to | |
f95a04af | 527 | * @pd: policy private data of interest |
d3d32e69 TH |
528 | * @v: value to print |
529 | * | |
f95a04af | 530 | * Print @v to @sf for the device assocaited with @pd. |
d3d32e69 | 531 | */ |
f95a04af | 532 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
d3d32e69 | 533 | { |
f95a04af | 534 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
535 | |
536 | if (!dname) | |
537 | return 0; | |
538 | ||
539 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
540 | return v; | |
541 | } | |
829fdb50 | 542 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
543 | |
544 | /** | |
545 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
546 | * @sf: seq_file to print to | |
f95a04af | 547 | * @pd: policy private data of interest |
d3d32e69 TH |
548 | * @rwstat: rwstat to print |
549 | * | |
f95a04af | 550 | * Print @rwstat to @sf for the device assocaited with @pd. |
d3d32e69 | 551 | */ |
f95a04af | 552 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
829fdb50 | 553 | const struct blkg_rwstat *rwstat) |
d3d32e69 TH |
554 | { |
555 | static const char *rwstr[] = { | |
556 | [BLKG_RWSTAT_READ] = "Read", | |
557 | [BLKG_RWSTAT_WRITE] = "Write", | |
558 | [BLKG_RWSTAT_SYNC] = "Sync", | |
559 | [BLKG_RWSTAT_ASYNC] = "Async", | |
560 | }; | |
f95a04af | 561 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
562 | u64 v; |
563 | int i; | |
564 | ||
565 | if (!dname) | |
566 | return 0; | |
567 | ||
568 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
569 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
570 | (unsigned long long)rwstat->cnt[i]); | |
571 | ||
572 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
573 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
574 | return v; | |
575 | } | |
b50da39f | 576 | EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); |
d3d32e69 | 577 | |
5bc4afb1 TH |
578 | /** |
579 | * blkg_prfill_stat - prfill callback for blkg_stat | |
580 | * @sf: seq_file to print to | |
f95a04af TH |
581 | * @pd: policy private data of interest |
582 | * @off: offset to the blkg_stat in @pd | |
5bc4afb1 TH |
583 | * |
584 | * prfill callback for printing a blkg_stat. | |
585 | */ | |
f95a04af | 586 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
d3d32e69 | 587 | { |
f95a04af | 588 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
d3d32e69 | 589 | } |
5bc4afb1 | 590 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
d3d32e69 | 591 | |
5bc4afb1 TH |
592 | /** |
593 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
594 | * @sf: seq_file to print to | |
f95a04af TH |
595 | * @pd: policy private data of interest |
596 | * @off: offset to the blkg_rwstat in @pd | |
5bc4afb1 TH |
597 | * |
598 | * prfill callback for printing a blkg_rwstat. | |
599 | */ | |
f95a04af TH |
600 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
601 | int off) | |
d3d32e69 | 602 | { |
f95a04af | 603 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
d3d32e69 | 604 | |
f95a04af | 605 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
d3d32e69 | 606 | } |
5bc4afb1 | 607 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
d3d32e69 | 608 | |
16b3de66 TH |
609 | /** |
610 | * blkg_stat_recursive_sum - collect hierarchical blkg_stat | |
611 | * @pd: policy private data of interest | |
612 | * @off: offset to the blkg_stat in @pd | |
613 | * | |
614 | * Collect the blkg_stat specified by @off from @pd and all its online | |
615 | * descendants and return the sum. The caller must be holding the queue | |
616 | * lock for online tests. | |
617 | */ | |
618 | u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) | |
619 | { | |
620 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; | |
621 | struct blkcg_gq *pos_blkg; | |
492eb21b | 622 | struct cgroup_subsys_state *pos_css; |
bd8815a6 | 623 | u64 sum = 0; |
16b3de66 TH |
624 | |
625 | lockdep_assert_held(pd->blkg->q->queue_lock); | |
626 | ||
16b3de66 | 627 | rcu_read_lock(); |
492eb21b | 628 | blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { |
16b3de66 TH |
629 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); |
630 | struct blkg_stat *stat = (void *)pos_pd + off; | |
631 | ||
632 | if (pos_blkg->online) | |
633 | sum += blkg_stat_read(stat); | |
634 | } | |
635 | rcu_read_unlock(); | |
636 | ||
637 | return sum; | |
638 | } | |
639 | EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); | |
640 | ||
641 | /** | |
642 | * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat | |
643 | * @pd: policy private data of interest | |
644 | * @off: offset to the blkg_stat in @pd | |
645 | * | |
646 | * Collect the blkg_rwstat specified by @off from @pd and all its online | |
647 | * descendants and return the sum. The caller must be holding the queue | |
648 | * lock for online tests. | |
649 | */ | |
650 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | |
651 | int off) | |
652 | { | |
653 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; | |
654 | struct blkcg_gq *pos_blkg; | |
492eb21b | 655 | struct cgroup_subsys_state *pos_css; |
bd8815a6 | 656 | struct blkg_rwstat sum = { }; |
16b3de66 TH |
657 | int i; |
658 | ||
659 | lockdep_assert_held(pd->blkg->q->queue_lock); | |
660 | ||
16b3de66 | 661 | rcu_read_lock(); |
492eb21b | 662 | blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { |
16b3de66 TH |
663 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); |
664 | struct blkg_rwstat *rwstat = (void *)pos_pd + off; | |
665 | struct blkg_rwstat tmp; | |
666 | ||
667 | if (!pos_blkg->online) | |
668 | continue; | |
669 | ||
670 | tmp = blkg_rwstat_read(rwstat); | |
671 | ||
672 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
673 | sum.cnt[i] += tmp.cnt[i]; | |
674 | } | |
675 | rcu_read_unlock(); | |
676 | ||
677 | return sum; | |
678 | } | |
679 | EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); | |
680 | ||
3a8b31d3 TH |
681 | /** |
682 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
683 | * @blkcg: target block cgroup | |
da8b0662 | 684 | * @pol: target policy |
3a8b31d3 TH |
685 | * @input: input string |
686 | * @ctx: blkg_conf_ctx to be filled | |
687 | * | |
688 | * Parse per-blkg config update from @input and initialize @ctx with the | |
689 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
da8b0662 TH |
690 | * value. This function returns with RCU read lock and queue lock held and |
691 | * must be paired with blkg_conf_finish(). | |
3a8b31d3 | 692 | */ |
3c798398 TH |
693 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
694 | const char *input, struct blkg_conf_ctx *ctx) | |
da8b0662 | 695 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
34d0f179 | 696 | { |
3a8b31d3 | 697 | struct gendisk *disk; |
3c798398 | 698 | struct blkcg_gq *blkg; |
726fa694 TH |
699 | unsigned int major, minor; |
700 | unsigned long long v; | |
701 | int part, ret; | |
34d0f179 | 702 | |
726fa694 TH |
703 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
704 | return -EINVAL; | |
3a8b31d3 | 705 | |
726fa694 | 706 | disk = get_gendisk(MKDEV(major, minor), &part); |
4bfd482e | 707 | if (!disk || part) |
726fa694 | 708 | return -EINVAL; |
e56da7e2 TH |
709 | |
710 | rcu_read_lock(); | |
4bfd482e | 711 | spin_lock_irq(disk->queue->queue_lock); |
da8b0662 | 712 | |
a2b1693b | 713 | if (blkcg_policy_enabled(disk->queue, pol)) |
3c96cb32 | 714 | blkg = blkg_lookup_create(blkcg, disk->queue); |
a2b1693b TH |
715 | else |
716 | blkg = ERR_PTR(-EINVAL); | |
e56da7e2 | 717 | |
4bfd482e TH |
718 | if (IS_ERR(blkg)) { |
719 | ret = PTR_ERR(blkg); | |
3a8b31d3 | 720 | rcu_read_unlock(); |
da8b0662 | 721 | spin_unlock_irq(disk->queue->queue_lock); |
3a8b31d3 TH |
722 | put_disk(disk); |
723 | /* | |
724 | * If queue was bypassing, we should retry. Do so after a | |
725 | * short msleep(). It isn't strictly necessary but queue | |
726 | * can be bypassing for some time and it's always nice to | |
727 | * avoid busy looping. | |
728 | */ | |
729 | if (ret == -EBUSY) { | |
730 | msleep(10); | |
731 | ret = restart_syscall(); | |
7702e8f4 | 732 | } |
726fa694 | 733 | return ret; |
062a644d | 734 | } |
3a8b31d3 TH |
735 | |
736 | ctx->disk = disk; | |
737 | ctx->blkg = blkg; | |
726fa694 TH |
738 | ctx->v = v; |
739 | return 0; | |
34d0f179 | 740 | } |
829fdb50 | 741 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 742 | |
3a8b31d3 TH |
743 | /** |
744 | * blkg_conf_finish - finish up per-blkg config update | |
745 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
746 | * | |
747 | * Finish up after per-blkg config update. This function must be paired | |
748 | * with blkg_conf_prep(). | |
749 | */ | |
829fdb50 | 750 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
da8b0662 | 751 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
34d0f179 | 752 | { |
da8b0662 | 753 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
3a8b31d3 TH |
754 | rcu_read_unlock(); |
755 | put_disk(ctx->disk); | |
34d0f179 | 756 | } |
829fdb50 | 757 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 758 | |
3c798398 | 759 | struct cftype blkcg_files[] = { |
84c124da DS |
760 | { |
761 | .name = "reset_stats", | |
3c798398 | 762 | .write_u64 = blkcg_reset_stats, |
22084190 | 763 | }, |
4baf6e33 | 764 | { } /* terminate */ |
31e4c28d VG |
765 | }; |
766 | ||
9f13ef67 | 767 | /** |
92fb9748 | 768 | * blkcg_css_offline - cgroup css_offline callback |
eb95419b | 769 | * @css: css of interest |
9f13ef67 | 770 | * |
eb95419b TH |
771 | * This function is called when @css is about to go away and responsible |
772 | * for shooting down all blkgs associated with @css. blkgs should be | |
9f13ef67 TH |
773 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
774 | * inside q lock, this function performs reverse double lock dancing. | |
775 | * | |
776 | * This is the blkcg counterpart of ioc_release_fn(). | |
777 | */ | |
eb95419b | 778 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
31e4c28d | 779 | { |
eb95419b | 780 | struct blkcg *blkcg = css_to_blkcg(css); |
b1c35769 | 781 | |
9f13ef67 | 782 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 783 | |
9f13ef67 | 784 | while (!hlist_empty(&blkcg->blkg_list)) { |
3c798398 TH |
785 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
786 | struct blkcg_gq, blkcg_node); | |
c875f4d0 | 787 | struct request_queue *q = blkg->q; |
b1c35769 | 788 | |
9f13ef67 TH |
789 | if (spin_trylock(q->queue_lock)) { |
790 | blkg_destroy(blkg); | |
791 | spin_unlock(q->queue_lock); | |
792 | } else { | |
793 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 794 | cpu_relax(); |
a5567932 | 795 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 796 | } |
9f13ef67 | 797 | } |
b1c35769 | 798 | |
9f13ef67 | 799 | spin_unlock_irq(&blkcg->lock); |
7ee9c562 TH |
800 | } |
801 | ||
eb95419b | 802 | static void blkcg_css_free(struct cgroup_subsys_state *css) |
7ee9c562 | 803 | { |
eb95419b | 804 | struct blkcg *blkcg = css_to_blkcg(css); |
7ee9c562 | 805 | |
3c798398 | 806 | if (blkcg != &blkcg_root) |
67523c48 | 807 | kfree(blkcg); |
31e4c28d VG |
808 | } |
809 | ||
eb95419b TH |
810 | static struct cgroup_subsys_state * |
811 | blkcg_css_alloc(struct cgroup_subsys_state *parent_css) | |
31e4c28d | 812 | { |
3c798398 | 813 | struct blkcg *blkcg; |
31e4c28d | 814 | |
eb95419b | 815 | if (!parent_css) { |
3c798398 | 816 | blkcg = &blkcg_root; |
31e4c28d VG |
817 | goto done; |
818 | } | |
819 | ||
31e4c28d VG |
820 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
821 | if (!blkcg) | |
822 | return ERR_PTR(-ENOMEM); | |
823 | ||
3381cb8d | 824 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
e71357e1 | 825 | blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT; |
31e4c28d VG |
826 | done: |
827 | spin_lock_init(&blkcg->lock); | |
a637120e | 828 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); |
31e4c28d VG |
829 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
830 | ||
831 | return &blkcg->css; | |
832 | } | |
833 | ||
5efd6113 TH |
834 | /** |
835 | * blkcg_init_queue - initialize blkcg part of request queue | |
836 | * @q: request_queue to initialize | |
837 | * | |
838 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
839 | * part of new request_queue @q. | |
840 | * | |
841 | * RETURNS: | |
842 | * 0 on success, -errno on failure. | |
843 | */ | |
844 | int blkcg_init_queue(struct request_queue *q) | |
845 | { | |
ec13b1d6 TH |
846 | struct blkcg_gq *new_blkg, *blkg; |
847 | bool preloaded; | |
848 | int ret; | |
849 | ||
850 | new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); | |
851 | if (!new_blkg) | |
852 | return -ENOMEM; | |
853 | ||
854 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
5efd6113 | 855 | |
ec13b1d6 TH |
856 | /* |
857 | * Make sure the root blkg exists and count the existing blkgs. As | |
858 | * @q is bypassing at this point, blkg_lookup_create() can't be | |
859 | * used. Open code insertion. | |
860 | */ | |
861 | rcu_read_lock(); | |
862 | spin_lock_irq(q->queue_lock); | |
863 | blkg = blkg_create(&blkcg_root, q, new_blkg); | |
864 | spin_unlock_irq(q->queue_lock); | |
865 | rcu_read_unlock(); | |
866 | ||
867 | if (preloaded) | |
868 | radix_tree_preload_end(); | |
869 | ||
870 | if (IS_ERR(blkg)) { | |
871 | kfree(new_blkg); | |
872 | return PTR_ERR(blkg); | |
873 | } | |
874 | ||
875 | q->root_blkg = blkg; | |
876 | q->root_rl.blkg = blkg; | |
877 | ||
878 | ret = blk_throtl_init(q); | |
879 | if (ret) { | |
880 | spin_lock_irq(q->queue_lock); | |
881 | blkg_destroy_all(q); | |
882 | spin_unlock_irq(q->queue_lock); | |
883 | } | |
884 | return ret; | |
5efd6113 TH |
885 | } |
886 | ||
887 | /** | |
888 | * blkcg_drain_queue - drain blkcg part of request_queue | |
889 | * @q: request_queue to drain | |
890 | * | |
891 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
892 | */ | |
893 | void blkcg_drain_queue(struct request_queue *q) | |
894 | { | |
895 | lockdep_assert_held(q->queue_lock); | |
896 | ||
0b462c89 TH |
897 | /* |
898 | * @q could be exiting and already have destroyed all blkgs as | |
899 | * indicated by NULL root_blkg. If so, don't confuse policies. | |
900 | */ | |
901 | if (!q->root_blkg) | |
902 | return; | |
903 | ||
5efd6113 TH |
904 | blk_throtl_drain(q); |
905 | } | |
906 | ||
907 | /** | |
908 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
909 | * @q: request_queue being released | |
910 | * | |
911 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
912 | */ | |
913 | void blkcg_exit_queue(struct request_queue *q) | |
914 | { | |
6d18b008 | 915 | spin_lock_irq(q->queue_lock); |
3c96cb32 | 916 | blkg_destroy_all(q); |
6d18b008 TH |
917 | spin_unlock_irq(q->queue_lock); |
918 | ||
5efd6113 TH |
919 | blk_throtl_exit(q); |
920 | } | |
921 | ||
31e4c28d VG |
922 | /* |
923 | * We cannot support shared io contexts, as we have no mean to support | |
924 | * two tasks with the same ioc in two different groups without major rework | |
925 | * of the main cic data structures. For now we allow a task to change | |
926 | * its cgroup only if it's the only owner of its ioc. | |
927 | */ | |
eb95419b TH |
928 | static int blkcg_can_attach(struct cgroup_subsys_state *css, |
929 | struct cgroup_taskset *tset) | |
31e4c28d | 930 | { |
bb9d97b6 | 931 | struct task_struct *task; |
31e4c28d VG |
932 | struct io_context *ioc; |
933 | int ret = 0; | |
934 | ||
935 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
924f0d9a | 936 | cgroup_taskset_for_each(task, tset) { |
bb9d97b6 TH |
937 | task_lock(task); |
938 | ioc = task->io_context; | |
939 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
940 | ret = -EINVAL; | |
941 | task_unlock(task); | |
942 | if (ret) | |
943 | break; | |
944 | } | |
31e4c28d VG |
945 | return ret; |
946 | } | |
947 | ||
073219e9 | 948 | struct cgroup_subsys blkio_cgrp_subsys = { |
92fb9748 TH |
949 | .css_alloc = blkcg_css_alloc, |
950 | .css_offline = blkcg_css_offline, | |
951 | .css_free = blkcg_css_free, | |
3c798398 | 952 | .can_attach = blkcg_can_attach, |
5577964e | 953 | .legacy_cftypes = blkcg_files, |
1ced953b TH |
954 | #ifdef CONFIG_MEMCG |
955 | /* | |
956 | * This ensures that, if available, memcg is automatically enabled | |
957 | * together on the default hierarchy so that the owner cgroup can | |
958 | * be retrieved from writeback pages. | |
959 | */ | |
960 | .depends_on = 1 << memory_cgrp_id, | |
961 | #endif | |
676f7c8f | 962 | }; |
073219e9 | 963 | EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); |
676f7c8f | 964 | |
a2b1693b TH |
965 | /** |
966 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
967 | * @q: request_queue of interest | |
968 | * @pol: blkcg policy to activate | |
969 | * | |
970 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
971 | * bypass mode to populate its blkgs with policy_data for @pol. | |
972 | * | |
973 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
974 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
975 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
976 | * always enough for dereferencing policy data. | |
977 | * | |
978 | * The caller is responsible for synchronizing [de]activations and policy | |
979 | * [un]registerations. Returns 0 on success, -errno on failure. | |
980 | */ | |
981 | int blkcg_activate_policy(struct request_queue *q, | |
3c798398 | 982 | const struct blkcg_policy *pol) |
a2b1693b TH |
983 | { |
984 | LIST_HEAD(pds); | |
ec13b1d6 | 985 | struct blkcg_gq *blkg; |
a2b1693b TH |
986 | struct blkg_policy_data *pd, *n; |
987 | int cnt = 0, ret; | |
988 | ||
989 | if (blkcg_policy_enabled(q, pol)) | |
990 | return 0; | |
991 | ||
ec13b1d6 | 992 | /* count and allocate policy_data for all existing blkgs */ |
a2b1693b | 993 | blk_queue_bypass_start(q); |
a2b1693b | 994 | spin_lock_irq(q->queue_lock); |
a2b1693b TH |
995 | list_for_each_entry(blkg, &q->blkg_list, q_node) |
996 | cnt++; | |
a2b1693b TH |
997 | spin_unlock_irq(q->queue_lock); |
998 | ||
a2b1693b | 999 | while (cnt--) { |
f95a04af | 1000 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
a2b1693b TH |
1001 | if (!pd) { |
1002 | ret = -ENOMEM; | |
1003 | goto out_free; | |
1004 | } | |
1005 | list_add_tail(&pd->alloc_node, &pds); | |
1006 | } | |
1007 | ||
1008 | /* | |
1009 | * Install the allocated pds. With @q bypassing, no new blkg | |
1010 | * should have been created while the queue lock was dropped. | |
1011 | */ | |
1012 | spin_lock_irq(q->queue_lock); | |
1013 | ||
1014 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
1015 | if (WARN_ON(list_empty(&pds))) { | |
1016 | /* umm... this shouldn't happen, just abort */ | |
1017 | ret = -ENOMEM; | |
1018 | goto out_unlock; | |
1019 | } | |
1020 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); | |
1021 | list_del_init(&pd->alloc_node); | |
1022 | ||
1023 | /* grab blkcg lock too while installing @pd on @blkg */ | |
1024 | spin_lock(&blkg->blkcg->lock); | |
1025 | ||
1026 | blkg->pd[pol->plid] = pd; | |
1027 | pd->blkg = blkg; | |
b276a876 | 1028 | pd->plid = pol->plid; |
f9fcc2d3 | 1029 | pol->pd_init_fn(blkg); |
a2b1693b TH |
1030 | |
1031 | spin_unlock(&blkg->blkcg->lock); | |
1032 | } | |
1033 | ||
1034 | __set_bit(pol->plid, q->blkcg_pols); | |
1035 | ret = 0; | |
1036 | out_unlock: | |
1037 | spin_unlock_irq(q->queue_lock); | |
1038 | out_free: | |
1039 | blk_queue_bypass_end(q); | |
1040 | list_for_each_entry_safe(pd, n, &pds, alloc_node) | |
1041 | kfree(pd); | |
1042 | return ret; | |
1043 | } | |
1044 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
1045 | ||
1046 | /** | |
1047 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
1048 | * @q: request_queue of interest | |
1049 | * @pol: blkcg policy to deactivate | |
1050 | * | |
1051 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
1052 | * blkcg_activate_policy(). | |
1053 | */ | |
1054 | void blkcg_deactivate_policy(struct request_queue *q, | |
3c798398 | 1055 | const struct blkcg_policy *pol) |
a2b1693b | 1056 | { |
3c798398 | 1057 | struct blkcg_gq *blkg; |
a2b1693b TH |
1058 | |
1059 | if (!blkcg_policy_enabled(q, pol)) | |
1060 | return; | |
1061 | ||
1062 | blk_queue_bypass_start(q); | |
1063 | spin_lock_irq(q->queue_lock); | |
1064 | ||
1065 | __clear_bit(pol->plid, q->blkcg_pols); | |
1066 | ||
1067 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
1068 | /* grab blkcg lock too while removing @pd from @blkg */ | |
1069 | spin_lock(&blkg->blkcg->lock); | |
1070 | ||
f427d909 TH |
1071 | if (pol->pd_offline_fn) |
1072 | pol->pd_offline_fn(blkg); | |
f9fcc2d3 TH |
1073 | if (pol->pd_exit_fn) |
1074 | pol->pd_exit_fn(blkg); | |
a2b1693b TH |
1075 | |
1076 | kfree(blkg->pd[pol->plid]); | |
1077 | blkg->pd[pol->plid] = NULL; | |
1078 | ||
1079 | spin_unlock(&blkg->blkcg->lock); | |
1080 | } | |
1081 | ||
1082 | spin_unlock_irq(q->queue_lock); | |
1083 | blk_queue_bypass_end(q); | |
1084 | } | |
1085 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
1086 | ||
8bd435b3 | 1087 | /** |
3c798398 TH |
1088 | * blkcg_policy_register - register a blkcg policy |
1089 | * @pol: blkcg policy to register | |
8bd435b3 | 1090 | * |
3c798398 TH |
1091 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
1092 | * successful registration. Returns 0 on success and -errno on failure. | |
8bd435b3 | 1093 | */ |
d5bf0291 | 1094 | int blkcg_policy_register(struct blkcg_policy *pol) |
3e252066 | 1095 | { |
8bd435b3 | 1096 | int i, ret; |
e8989fae | 1097 | |
f95a04af TH |
1098 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
1099 | return -EINVAL; | |
1100 | ||
bc0d6501 TH |
1101 | mutex_lock(&blkcg_pol_mutex); |
1102 | ||
8bd435b3 TH |
1103 | /* find an empty slot */ |
1104 | ret = -ENOSPC; | |
1105 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
3c798398 | 1106 | if (!blkcg_policy[i]) |
8bd435b3 TH |
1107 | break; |
1108 | if (i >= BLKCG_MAX_POLS) | |
1109 | goto out_unlock; | |
035d10b2 | 1110 | |
8bd435b3 | 1111 | /* register and update blkgs */ |
3c798398 TH |
1112 | pol->plid = i; |
1113 | blkcg_policy[i] = pol; | |
8bd435b3 | 1114 | |
8bd435b3 | 1115 | /* everything is in place, add intf files for the new policy */ |
3c798398 | 1116 | if (pol->cftypes) |
2cf669a5 TH |
1117 | WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, |
1118 | pol->cftypes)); | |
8bd435b3 TH |
1119 | ret = 0; |
1120 | out_unlock: | |
bc0d6501 | 1121 | mutex_unlock(&blkcg_pol_mutex); |
8bd435b3 | 1122 | return ret; |
3e252066 | 1123 | } |
3c798398 | 1124 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
3e252066 | 1125 | |
8bd435b3 | 1126 | /** |
3c798398 TH |
1127 | * blkcg_policy_unregister - unregister a blkcg policy |
1128 | * @pol: blkcg policy to unregister | |
8bd435b3 | 1129 | * |
3c798398 | 1130 | * Undo blkcg_policy_register(@pol). Might sleep. |
8bd435b3 | 1131 | */ |
3c798398 | 1132 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
3e252066 | 1133 | { |
bc0d6501 TH |
1134 | mutex_lock(&blkcg_pol_mutex); |
1135 | ||
3c798398 | 1136 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
8bd435b3 TH |
1137 | goto out_unlock; |
1138 | ||
1139 | /* kill the intf files first */ | |
3c798398 | 1140 | if (pol->cftypes) |
2bb566cb | 1141 | cgroup_rm_cftypes(pol->cftypes); |
44ea53de | 1142 | |
8bd435b3 | 1143 | /* unregister and update blkgs */ |
3c798398 | 1144 | blkcg_policy[pol->plid] = NULL; |
8bd435b3 | 1145 | out_unlock: |
bc0d6501 | 1146 | mutex_unlock(&blkcg_pol_mutex); |
3e252066 | 1147 | } |
3c798398 | 1148 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |