]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/blk-cgroup.c
blkcg: move blkg_for_each_descendant_pre() to block/blk-cgroup.h
[mirror_ubuntu-artful-kernel.git] / block / blk-cgroup.c
1 /*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13 #include <linux/ioprio.h>
14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
16 #include <linux/err.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/genhd.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include "blk-cgroup.h"
23 #include "blk.h"
24
25 #define MAX_KEY_LEN 100
26
27 static DEFINE_MUTEX(blkcg_pol_mutex);
28
29 struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
30 .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
31 EXPORT_SYMBOL_GPL(blkcg_root);
32
33 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
34
35 static bool blkcg_policy_enabled(struct request_queue *q,
36 const struct blkcg_policy *pol)
37 {
38 return pol && test_bit(pol->plid, q->blkcg_pols);
39 }
40
41 /**
42 * blkg_free - free a blkg
43 * @blkg: blkg to free
44 *
45 * Free @blkg which may be partially allocated.
46 */
47 static void blkg_free(struct blkcg_gq *blkg)
48 {
49 int i;
50
51 if (!blkg)
52 return;
53
54 for (i = 0; i < BLKCG_MAX_POLS; i++) {
55 struct blkcg_policy *pol = blkcg_policy[i];
56 struct blkg_policy_data *pd = blkg->pd[i];
57
58 if (!pd)
59 continue;
60
61 if (pol && pol->pd_exit_fn)
62 pol->pd_exit_fn(blkg);
63
64 kfree(pd);
65 }
66
67 blk_exit_rl(&blkg->rl);
68 kfree(blkg);
69 }
70
71 /**
72 * blkg_alloc - allocate a blkg
73 * @blkcg: block cgroup the new blkg is associated with
74 * @q: request_queue the new blkg is associated with
75 * @gfp_mask: allocation mask to use
76 *
77 * Allocate a new blkg assocating @blkcg and @q.
78 */
79 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
80 gfp_t gfp_mask)
81 {
82 struct blkcg_gq *blkg;
83 int i;
84
85 /* alloc and init base part */
86 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
87 if (!blkg)
88 return NULL;
89
90 blkg->q = q;
91 INIT_LIST_HEAD(&blkg->q_node);
92 blkg->blkcg = blkcg;
93 blkg->refcnt = 1;
94
95 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
96 if (blkcg != &blkcg_root) {
97 if (blk_init_rl(&blkg->rl, q, gfp_mask))
98 goto err_free;
99 blkg->rl.blkg = blkg;
100 }
101
102 for (i = 0; i < BLKCG_MAX_POLS; i++) {
103 struct blkcg_policy *pol = blkcg_policy[i];
104 struct blkg_policy_data *pd;
105
106 if (!blkcg_policy_enabled(q, pol))
107 continue;
108
109 /* alloc per-policy data and attach it to blkg */
110 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
111 if (!pd)
112 goto err_free;
113
114 blkg->pd[i] = pd;
115 pd->blkg = blkg;
116 pd->plid = i;
117
118 /* invoke per-policy init */
119 if (pol->pd_init_fn)
120 pol->pd_init_fn(blkg);
121 }
122
123 return blkg;
124
125 err_free:
126 blkg_free(blkg);
127 return NULL;
128 }
129
130 /**
131 * __blkg_lookup - internal version of blkg_lookup()
132 * @blkcg: blkcg of interest
133 * @q: request_queue of interest
134 * @update_hint: whether to update lookup hint with the result or not
135 *
136 * This is internal version and shouldn't be used by policy
137 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
138 * @q's bypass state. If @update_hint is %true, the caller should be
139 * holding @q->queue_lock and lookup hint is updated on success.
140 */
141 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
142 bool update_hint)
143 {
144 struct blkcg_gq *blkg;
145
146 blkg = rcu_dereference(blkcg->blkg_hint);
147 if (blkg && blkg->q == q)
148 return blkg;
149
150 /*
151 * Hint didn't match. Look up from the radix tree. Note that the
152 * hint can only be updated under queue_lock as otherwise @blkg
153 * could have already been removed from blkg_tree. The caller is
154 * responsible for grabbing queue_lock if @update_hint.
155 */
156 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
157 if (blkg && blkg->q == q) {
158 if (update_hint) {
159 lockdep_assert_held(q->queue_lock);
160 rcu_assign_pointer(blkcg->blkg_hint, blkg);
161 }
162 return blkg;
163 }
164
165 return NULL;
166 }
167
168 /**
169 * blkg_lookup - lookup blkg for the specified blkcg - q pair
170 * @blkcg: blkcg of interest
171 * @q: request_queue of interest
172 *
173 * Lookup blkg for the @blkcg - @q pair. This function should be called
174 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
175 * - see blk_queue_bypass_start() for details.
176 */
177 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
178 {
179 WARN_ON_ONCE(!rcu_read_lock_held());
180
181 if (unlikely(blk_queue_bypass(q)))
182 return NULL;
183 return __blkg_lookup(blkcg, q, false);
184 }
185 EXPORT_SYMBOL_GPL(blkg_lookup);
186
187 /*
188 * If @new_blkg is %NULL, this function tries to allocate a new one as
189 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
190 */
191 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
192 struct request_queue *q,
193 struct blkcg_gq *new_blkg)
194 {
195 struct blkcg_gq *blkg;
196 int i, ret;
197
198 WARN_ON_ONCE(!rcu_read_lock_held());
199 lockdep_assert_held(q->queue_lock);
200
201 /* blkg holds a reference to blkcg */
202 if (!css_tryget(&blkcg->css)) {
203 ret = -EINVAL;
204 goto err_free_blkg;
205 }
206
207 /* allocate */
208 if (!new_blkg) {
209 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
210 if (unlikely(!new_blkg)) {
211 ret = -ENOMEM;
212 goto err_put_css;
213 }
214 }
215 blkg = new_blkg;
216
217 /* link parent and insert */
218 if (blkcg_parent(blkcg)) {
219 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
220 if (WARN_ON_ONCE(!blkg->parent)) {
221 ret = -EINVAL;
222 goto err_put_css;
223 }
224 blkg_get(blkg->parent);
225 }
226
227 spin_lock(&blkcg->lock);
228 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
229 if (likely(!ret)) {
230 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
231 list_add(&blkg->q_node, &q->blkg_list);
232
233 for (i = 0; i < BLKCG_MAX_POLS; i++) {
234 struct blkcg_policy *pol = blkcg_policy[i];
235
236 if (blkg->pd[i] && pol->pd_online_fn)
237 pol->pd_online_fn(blkg);
238 }
239 }
240 blkg->online = true;
241 spin_unlock(&blkcg->lock);
242
243 if (!ret)
244 return blkg;
245
246 /* @blkg failed fully initialized, use the usual release path */
247 blkg_put(blkg);
248 return ERR_PTR(ret);
249
250 err_put_css:
251 css_put(&blkcg->css);
252 err_free_blkg:
253 blkg_free(new_blkg);
254 return ERR_PTR(ret);
255 }
256
257 /**
258 * blkg_lookup_create - lookup blkg, try to create one if not there
259 * @blkcg: blkcg of interest
260 * @q: request_queue of interest
261 *
262 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
263 * create one. blkg creation is performed recursively from blkcg_root such
264 * that all non-root blkg's have access to the parent blkg. This function
265 * should be called under RCU read lock and @q->queue_lock.
266 *
267 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
268 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
269 * dead and bypassing, returns ERR_PTR(-EBUSY).
270 */
271 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
272 struct request_queue *q)
273 {
274 struct blkcg_gq *blkg;
275
276 WARN_ON_ONCE(!rcu_read_lock_held());
277 lockdep_assert_held(q->queue_lock);
278
279 /*
280 * This could be the first entry point of blkcg implementation and
281 * we shouldn't allow anything to go through for a bypassing queue.
282 */
283 if (unlikely(blk_queue_bypass(q)))
284 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
285
286 blkg = __blkg_lookup(blkcg, q, true);
287 if (blkg)
288 return blkg;
289
290 /*
291 * Create blkgs walking down from blkcg_root to @blkcg, so that all
292 * non-root blkgs have access to their parents.
293 */
294 while (true) {
295 struct blkcg *pos = blkcg;
296 struct blkcg *parent = blkcg_parent(blkcg);
297
298 while (parent && !__blkg_lookup(parent, q, false)) {
299 pos = parent;
300 parent = blkcg_parent(parent);
301 }
302
303 blkg = blkg_create(pos, q, NULL);
304 if (pos == blkcg || IS_ERR(blkg))
305 return blkg;
306 }
307 }
308 EXPORT_SYMBOL_GPL(blkg_lookup_create);
309
310 static void blkg_destroy(struct blkcg_gq *blkg)
311 {
312 struct blkcg *blkcg = blkg->blkcg;
313 int i;
314
315 lockdep_assert_held(blkg->q->queue_lock);
316 lockdep_assert_held(&blkcg->lock);
317
318 /* Something wrong if we are trying to remove same group twice */
319 WARN_ON_ONCE(list_empty(&blkg->q_node));
320 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
321
322 for (i = 0; i < BLKCG_MAX_POLS; i++) {
323 struct blkcg_policy *pol = blkcg_policy[i];
324
325 if (blkg->pd[i] && pol->pd_offline_fn)
326 pol->pd_offline_fn(blkg);
327 }
328 blkg->online = false;
329
330 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
331 list_del_init(&blkg->q_node);
332 hlist_del_init_rcu(&blkg->blkcg_node);
333
334 /*
335 * Both setting lookup hint to and clearing it from @blkg are done
336 * under queue_lock. If it's not pointing to @blkg now, it never
337 * will. Hint assignment itself can race safely.
338 */
339 if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
340 rcu_assign_pointer(blkcg->blkg_hint, NULL);
341
342 /*
343 * Put the reference taken at the time of creation so that when all
344 * queues are gone, group can be destroyed.
345 */
346 blkg_put(blkg);
347 }
348
349 /**
350 * blkg_destroy_all - destroy all blkgs associated with a request_queue
351 * @q: request_queue of interest
352 *
353 * Destroy all blkgs associated with @q.
354 */
355 static void blkg_destroy_all(struct request_queue *q)
356 {
357 struct blkcg_gq *blkg, *n;
358
359 lockdep_assert_held(q->queue_lock);
360
361 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
362 struct blkcg *blkcg = blkg->blkcg;
363
364 spin_lock(&blkcg->lock);
365 blkg_destroy(blkg);
366 spin_unlock(&blkcg->lock);
367 }
368
369 /*
370 * root blkg is destroyed. Just clear the pointer since
371 * root_rl does not take reference on root blkg.
372 */
373 q->root_blkg = NULL;
374 q->root_rl.blkg = NULL;
375 }
376
377 static void blkg_rcu_free(struct rcu_head *rcu_head)
378 {
379 blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
380 }
381
382 void __blkg_release(struct blkcg_gq *blkg)
383 {
384 /* release the blkcg and parent blkg refs this blkg has been holding */
385 css_put(&blkg->blkcg->css);
386 if (blkg->parent)
387 blkg_put(blkg->parent);
388
389 /*
390 * A group is freed in rcu manner. But having an rcu lock does not
391 * mean that one can access all the fields of blkg and assume these
392 * are valid. For example, don't try to follow throtl_data and
393 * request queue links.
394 *
395 * Having a reference to blkg under an rcu allows acess to only
396 * values local to groups like group stats and group rate limits
397 */
398 call_rcu(&blkg->rcu_head, blkg_rcu_free);
399 }
400 EXPORT_SYMBOL_GPL(__blkg_release);
401
402 /*
403 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
404 * because the root blkg uses @q->root_rl instead of its own rl.
405 */
406 struct request_list *__blk_queue_next_rl(struct request_list *rl,
407 struct request_queue *q)
408 {
409 struct list_head *ent;
410 struct blkcg_gq *blkg;
411
412 /*
413 * Determine the current blkg list_head. The first entry is
414 * root_rl which is off @q->blkg_list and mapped to the head.
415 */
416 if (rl == &q->root_rl) {
417 ent = &q->blkg_list;
418 /* There are no more block groups, hence no request lists */
419 if (list_empty(ent))
420 return NULL;
421 } else {
422 blkg = container_of(rl, struct blkcg_gq, rl);
423 ent = &blkg->q_node;
424 }
425
426 /* walk to the next list_head, skip root blkcg */
427 ent = ent->next;
428 if (ent == &q->root_blkg->q_node)
429 ent = ent->next;
430 if (ent == &q->blkg_list)
431 return NULL;
432
433 blkg = container_of(ent, struct blkcg_gq, q_node);
434 return &blkg->rl;
435 }
436
437 static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
438 u64 val)
439 {
440 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
441 struct blkcg_gq *blkg;
442 int i;
443
444 mutex_lock(&blkcg_pol_mutex);
445 spin_lock_irq(&blkcg->lock);
446
447 /*
448 * Note that stat reset is racy - it doesn't synchronize against
449 * stat updates. This is a debug feature which shouldn't exist
450 * anyway. If you get hit by a race, retry.
451 */
452 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
453 for (i = 0; i < BLKCG_MAX_POLS; i++) {
454 struct blkcg_policy *pol = blkcg_policy[i];
455
456 if (blkcg_policy_enabled(blkg->q, pol) &&
457 pol->pd_reset_stats_fn)
458 pol->pd_reset_stats_fn(blkg);
459 }
460 }
461
462 spin_unlock_irq(&blkcg->lock);
463 mutex_unlock(&blkcg_pol_mutex);
464 return 0;
465 }
466
467 static const char *blkg_dev_name(struct blkcg_gq *blkg)
468 {
469 /* some drivers (floppy) instantiate a queue w/o disk registered */
470 if (blkg->q->backing_dev_info.dev)
471 return dev_name(blkg->q->backing_dev_info.dev);
472 return NULL;
473 }
474
475 /**
476 * blkcg_print_blkgs - helper for printing per-blkg data
477 * @sf: seq_file to print to
478 * @blkcg: blkcg of interest
479 * @prfill: fill function to print out a blkg
480 * @pol: policy in question
481 * @data: data to be passed to @prfill
482 * @show_total: to print out sum of prfill return values or not
483 *
484 * This function invokes @prfill on each blkg of @blkcg if pd for the
485 * policy specified by @pol exists. @prfill is invoked with @sf, the
486 * policy data and @data and the matching queue lock held. If @show_total
487 * is %true, the sum of the return values from @prfill is printed with
488 * "Total" label at the end.
489 *
490 * This is to be used to construct print functions for
491 * cftype->read_seq_string method.
492 */
493 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
494 u64 (*prfill)(struct seq_file *,
495 struct blkg_policy_data *, int),
496 const struct blkcg_policy *pol, int data,
497 bool show_total)
498 {
499 struct blkcg_gq *blkg;
500 u64 total = 0;
501
502 rcu_read_lock();
503 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
504 spin_lock_irq(blkg->q->queue_lock);
505 if (blkcg_policy_enabled(blkg->q, pol))
506 total += prfill(sf, blkg->pd[pol->plid], data);
507 spin_unlock_irq(blkg->q->queue_lock);
508 }
509 rcu_read_unlock();
510
511 if (show_total)
512 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
513 }
514 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
515
516 /**
517 * __blkg_prfill_u64 - prfill helper for a single u64 value
518 * @sf: seq_file to print to
519 * @pd: policy private data of interest
520 * @v: value to print
521 *
522 * Print @v to @sf for the device assocaited with @pd.
523 */
524 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
525 {
526 const char *dname = blkg_dev_name(pd->blkg);
527
528 if (!dname)
529 return 0;
530
531 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
532 return v;
533 }
534 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
535
536 /**
537 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
538 * @sf: seq_file to print to
539 * @pd: policy private data of interest
540 * @rwstat: rwstat to print
541 *
542 * Print @rwstat to @sf for the device assocaited with @pd.
543 */
544 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
545 const struct blkg_rwstat *rwstat)
546 {
547 static const char *rwstr[] = {
548 [BLKG_RWSTAT_READ] = "Read",
549 [BLKG_RWSTAT_WRITE] = "Write",
550 [BLKG_RWSTAT_SYNC] = "Sync",
551 [BLKG_RWSTAT_ASYNC] = "Async",
552 };
553 const char *dname = blkg_dev_name(pd->blkg);
554 u64 v;
555 int i;
556
557 if (!dname)
558 return 0;
559
560 for (i = 0; i < BLKG_RWSTAT_NR; i++)
561 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
562 (unsigned long long)rwstat->cnt[i]);
563
564 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
565 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
566 return v;
567 }
568 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
569
570 /**
571 * blkg_prfill_stat - prfill callback for blkg_stat
572 * @sf: seq_file to print to
573 * @pd: policy private data of interest
574 * @off: offset to the blkg_stat in @pd
575 *
576 * prfill callback for printing a blkg_stat.
577 */
578 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
579 {
580 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
581 }
582 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
583
584 /**
585 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
586 * @sf: seq_file to print to
587 * @pd: policy private data of interest
588 * @off: offset to the blkg_rwstat in @pd
589 *
590 * prfill callback for printing a blkg_rwstat.
591 */
592 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
593 int off)
594 {
595 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
596
597 return __blkg_prfill_rwstat(sf, pd, &rwstat);
598 }
599 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
600
601 /**
602 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
603 * @pd: policy private data of interest
604 * @off: offset to the blkg_stat in @pd
605 *
606 * Collect the blkg_stat specified by @off from @pd and all its online
607 * descendants and return the sum. The caller must be holding the queue
608 * lock for online tests.
609 */
610 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
611 {
612 struct blkcg_policy *pol = blkcg_policy[pd->plid];
613 struct blkcg_gq *pos_blkg;
614 struct cgroup *pos_cgrp;
615 u64 sum;
616
617 lockdep_assert_held(pd->blkg->q->queue_lock);
618
619 sum = blkg_stat_read((void *)pd + off);
620
621 rcu_read_lock();
622 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
623 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
624 struct blkg_stat *stat = (void *)pos_pd + off;
625
626 if (pos_blkg->online)
627 sum += blkg_stat_read(stat);
628 }
629 rcu_read_unlock();
630
631 return sum;
632 }
633 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
634
635 /**
636 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
637 * @pd: policy private data of interest
638 * @off: offset to the blkg_stat in @pd
639 *
640 * Collect the blkg_rwstat specified by @off from @pd and all its online
641 * descendants and return the sum. The caller must be holding the queue
642 * lock for online tests.
643 */
644 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
645 int off)
646 {
647 struct blkcg_policy *pol = blkcg_policy[pd->plid];
648 struct blkcg_gq *pos_blkg;
649 struct cgroup *pos_cgrp;
650 struct blkg_rwstat sum;
651 int i;
652
653 lockdep_assert_held(pd->blkg->q->queue_lock);
654
655 sum = blkg_rwstat_read((void *)pd + off);
656
657 rcu_read_lock();
658 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
659 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
660 struct blkg_rwstat *rwstat = (void *)pos_pd + off;
661 struct blkg_rwstat tmp;
662
663 if (!pos_blkg->online)
664 continue;
665
666 tmp = blkg_rwstat_read(rwstat);
667
668 for (i = 0; i < BLKG_RWSTAT_NR; i++)
669 sum.cnt[i] += tmp.cnt[i];
670 }
671 rcu_read_unlock();
672
673 return sum;
674 }
675 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
676
677 /**
678 * blkg_conf_prep - parse and prepare for per-blkg config update
679 * @blkcg: target block cgroup
680 * @pol: target policy
681 * @input: input string
682 * @ctx: blkg_conf_ctx to be filled
683 *
684 * Parse per-blkg config update from @input and initialize @ctx with the
685 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
686 * value. This function returns with RCU read lock and queue lock held and
687 * must be paired with blkg_conf_finish().
688 */
689 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
690 const char *input, struct blkg_conf_ctx *ctx)
691 __acquires(rcu) __acquires(disk->queue->queue_lock)
692 {
693 struct gendisk *disk;
694 struct blkcg_gq *blkg;
695 unsigned int major, minor;
696 unsigned long long v;
697 int part, ret;
698
699 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
700 return -EINVAL;
701
702 disk = get_gendisk(MKDEV(major, minor), &part);
703 if (!disk || part)
704 return -EINVAL;
705
706 rcu_read_lock();
707 spin_lock_irq(disk->queue->queue_lock);
708
709 if (blkcg_policy_enabled(disk->queue, pol))
710 blkg = blkg_lookup_create(blkcg, disk->queue);
711 else
712 blkg = ERR_PTR(-EINVAL);
713
714 if (IS_ERR(blkg)) {
715 ret = PTR_ERR(blkg);
716 rcu_read_unlock();
717 spin_unlock_irq(disk->queue->queue_lock);
718 put_disk(disk);
719 /*
720 * If queue was bypassing, we should retry. Do so after a
721 * short msleep(). It isn't strictly necessary but queue
722 * can be bypassing for some time and it's always nice to
723 * avoid busy looping.
724 */
725 if (ret == -EBUSY) {
726 msleep(10);
727 ret = restart_syscall();
728 }
729 return ret;
730 }
731
732 ctx->disk = disk;
733 ctx->blkg = blkg;
734 ctx->v = v;
735 return 0;
736 }
737 EXPORT_SYMBOL_GPL(blkg_conf_prep);
738
739 /**
740 * blkg_conf_finish - finish up per-blkg config update
741 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
742 *
743 * Finish up after per-blkg config update. This function must be paired
744 * with blkg_conf_prep().
745 */
746 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
747 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
748 {
749 spin_unlock_irq(ctx->disk->queue->queue_lock);
750 rcu_read_unlock();
751 put_disk(ctx->disk);
752 }
753 EXPORT_SYMBOL_GPL(blkg_conf_finish);
754
755 struct cftype blkcg_files[] = {
756 {
757 .name = "reset_stats",
758 .write_u64 = blkcg_reset_stats,
759 },
760 { } /* terminate */
761 };
762
763 /**
764 * blkcg_css_offline - cgroup css_offline callback
765 * @cgroup: cgroup of interest
766 *
767 * This function is called when @cgroup is about to go away and responsible
768 * for shooting down all blkgs associated with @cgroup. blkgs should be
769 * removed while holding both q and blkcg locks. As blkcg lock is nested
770 * inside q lock, this function performs reverse double lock dancing.
771 *
772 * This is the blkcg counterpart of ioc_release_fn().
773 */
774 static void blkcg_css_offline(struct cgroup *cgroup)
775 {
776 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
777
778 spin_lock_irq(&blkcg->lock);
779
780 while (!hlist_empty(&blkcg->blkg_list)) {
781 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
782 struct blkcg_gq, blkcg_node);
783 struct request_queue *q = blkg->q;
784
785 if (spin_trylock(q->queue_lock)) {
786 blkg_destroy(blkg);
787 spin_unlock(q->queue_lock);
788 } else {
789 spin_unlock_irq(&blkcg->lock);
790 cpu_relax();
791 spin_lock_irq(&blkcg->lock);
792 }
793 }
794
795 spin_unlock_irq(&blkcg->lock);
796 }
797
798 static void blkcg_css_free(struct cgroup *cgroup)
799 {
800 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
801
802 if (blkcg != &blkcg_root)
803 kfree(blkcg);
804 }
805
806 static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
807 {
808 static atomic64_t id_seq = ATOMIC64_INIT(0);
809 struct blkcg *blkcg;
810 struct cgroup *parent = cgroup->parent;
811
812 if (!parent) {
813 blkcg = &blkcg_root;
814 goto done;
815 }
816
817 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
818 if (!blkcg)
819 return ERR_PTR(-ENOMEM);
820
821 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
822 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
823 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
824 done:
825 spin_lock_init(&blkcg->lock);
826 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
827 INIT_HLIST_HEAD(&blkcg->blkg_list);
828
829 return &blkcg->css;
830 }
831
832 /**
833 * blkcg_init_queue - initialize blkcg part of request queue
834 * @q: request_queue to initialize
835 *
836 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
837 * part of new request_queue @q.
838 *
839 * RETURNS:
840 * 0 on success, -errno on failure.
841 */
842 int blkcg_init_queue(struct request_queue *q)
843 {
844 might_sleep();
845
846 return blk_throtl_init(q);
847 }
848
849 /**
850 * blkcg_drain_queue - drain blkcg part of request_queue
851 * @q: request_queue to drain
852 *
853 * Called from blk_drain_queue(). Responsible for draining blkcg part.
854 */
855 void blkcg_drain_queue(struct request_queue *q)
856 {
857 lockdep_assert_held(q->queue_lock);
858
859 blk_throtl_drain(q);
860 }
861
862 /**
863 * blkcg_exit_queue - exit and release blkcg part of request_queue
864 * @q: request_queue being released
865 *
866 * Called from blk_release_queue(). Responsible for exiting blkcg part.
867 */
868 void blkcg_exit_queue(struct request_queue *q)
869 {
870 spin_lock_irq(q->queue_lock);
871 blkg_destroy_all(q);
872 spin_unlock_irq(q->queue_lock);
873
874 blk_throtl_exit(q);
875 }
876
877 /*
878 * We cannot support shared io contexts, as we have no mean to support
879 * two tasks with the same ioc in two different groups without major rework
880 * of the main cic data structures. For now we allow a task to change
881 * its cgroup only if it's the only owner of its ioc.
882 */
883 static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
884 {
885 struct task_struct *task;
886 struct io_context *ioc;
887 int ret = 0;
888
889 /* task_lock() is needed to avoid races with exit_io_context() */
890 cgroup_taskset_for_each(task, cgrp, tset) {
891 task_lock(task);
892 ioc = task->io_context;
893 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
894 ret = -EINVAL;
895 task_unlock(task);
896 if (ret)
897 break;
898 }
899 return ret;
900 }
901
902 struct cgroup_subsys blkio_subsys = {
903 .name = "blkio",
904 .css_alloc = blkcg_css_alloc,
905 .css_offline = blkcg_css_offline,
906 .css_free = blkcg_css_free,
907 .can_attach = blkcg_can_attach,
908 .subsys_id = blkio_subsys_id,
909 .base_cftypes = blkcg_files,
910 .module = THIS_MODULE,
911
912 /*
913 * blkio subsystem is utterly broken in terms of hierarchy support.
914 * It treats all cgroups equally regardless of where they're
915 * located in the hierarchy - all cgroups are treated as if they're
916 * right below the root. Fix it and remove the following.
917 */
918 .broken_hierarchy = true,
919 };
920 EXPORT_SYMBOL_GPL(blkio_subsys);
921
922 /**
923 * blkcg_activate_policy - activate a blkcg policy on a request_queue
924 * @q: request_queue of interest
925 * @pol: blkcg policy to activate
926 *
927 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
928 * bypass mode to populate its blkgs with policy_data for @pol.
929 *
930 * Activation happens with @q bypassed, so nobody would be accessing blkgs
931 * from IO path. Update of each blkg is protected by both queue and blkcg
932 * locks so that holding either lock and testing blkcg_policy_enabled() is
933 * always enough for dereferencing policy data.
934 *
935 * The caller is responsible for synchronizing [de]activations and policy
936 * [un]registerations. Returns 0 on success, -errno on failure.
937 */
938 int blkcg_activate_policy(struct request_queue *q,
939 const struct blkcg_policy *pol)
940 {
941 LIST_HEAD(pds);
942 struct blkcg_gq *blkg, *new_blkg;
943 struct blkg_policy_data *pd, *n;
944 int cnt = 0, ret;
945 bool preloaded;
946
947 if (blkcg_policy_enabled(q, pol))
948 return 0;
949
950 /* preallocations for root blkg */
951 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
952 if (!new_blkg)
953 return -ENOMEM;
954
955 blk_queue_bypass_start(q);
956
957 preloaded = !radix_tree_preload(GFP_KERNEL);
958
959 /*
960 * Make sure the root blkg exists and count the existing blkgs. As
961 * @q is bypassing at this point, blkg_lookup_create() can't be
962 * used. Open code it.
963 */
964 spin_lock_irq(q->queue_lock);
965
966 rcu_read_lock();
967 blkg = __blkg_lookup(&blkcg_root, q, false);
968 if (blkg)
969 blkg_free(new_blkg);
970 else
971 blkg = blkg_create(&blkcg_root, q, new_blkg);
972 rcu_read_unlock();
973
974 if (preloaded)
975 radix_tree_preload_end();
976
977 if (IS_ERR(blkg)) {
978 ret = PTR_ERR(blkg);
979 goto out_unlock;
980 }
981 q->root_blkg = blkg;
982 q->root_rl.blkg = blkg;
983
984 list_for_each_entry(blkg, &q->blkg_list, q_node)
985 cnt++;
986
987 spin_unlock_irq(q->queue_lock);
988
989 /* allocate policy_data for all existing blkgs */
990 while (cnt--) {
991 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
992 if (!pd) {
993 ret = -ENOMEM;
994 goto out_free;
995 }
996 list_add_tail(&pd->alloc_node, &pds);
997 }
998
999 /*
1000 * Install the allocated pds. With @q bypassing, no new blkg
1001 * should have been created while the queue lock was dropped.
1002 */
1003 spin_lock_irq(q->queue_lock);
1004
1005 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1006 if (WARN_ON(list_empty(&pds))) {
1007 /* umm... this shouldn't happen, just abort */
1008 ret = -ENOMEM;
1009 goto out_unlock;
1010 }
1011 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
1012 list_del_init(&pd->alloc_node);
1013
1014 /* grab blkcg lock too while installing @pd on @blkg */
1015 spin_lock(&blkg->blkcg->lock);
1016
1017 blkg->pd[pol->plid] = pd;
1018 pd->blkg = blkg;
1019 pd->plid = pol->plid;
1020 pol->pd_init_fn(blkg);
1021
1022 spin_unlock(&blkg->blkcg->lock);
1023 }
1024
1025 __set_bit(pol->plid, q->blkcg_pols);
1026 ret = 0;
1027 out_unlock:
1028 spin_unlock_irq(q->queue_lock);
1029 out_free:
1030 blk_queue_bypass_end(q);
1031 list_for_each_entry_safe(pd, n, &pds, alloc_node)
1032 kfree(pd);
1033 return ret;
1034 }
1035 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1036
1037 /**
1038 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1039 * @q: request_queue of interest
1040 * @pol: blkcg policy to deactivate
1041 *
1042 * Deactivate @pol on @q. Follows the same synchronization rules as
1043 * blkcg_activate_policy().
1044 */
1045 void blkcg_deactivate_policy(struct request_queue *q,
1046 const struct blkcg_policy *pol)
1047 {
1048 struct blkcg_gq *blkg;
1049
1050 if (!blkcg_policy_enabled(q, pol))
1051 return;
1052
1053 blk_queue_bypass_start(q);
1054 spin_lock_irq(q->queue_lock);
1055
1056 __clear_bit(pol->plid, q->blkcg_pols);
1057
1058 /* if no policy is left, no need for blkgs - shoot them down */
1059 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
1060 blkg_destroy_all(q);
1061
1062 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1063 /* grab blkcg lock too while removing @pd from @blkg */
1064 spin_lock(&blkg->blkcg->lock);
1065
1066 if (pol->pd_offline_fn)
1067 pol->pd_offline_fn(blkg);
1068 if (pol->pd_exit_fn)
1069 pol->pd_exit_fn(blkg);
1070
1071 kfree(blkg->pd[pol->plid]);
1072 blkg->pd[pol->plid] = NULL;
1073
1074 spin_unlock(&blkg->blkcg->lock);
1075 }
1076
1077 spin_unlock_irq(q->queue_lock);
1078 blk_queue_bypass_end(q);
1079 }
1080 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1081
1082 /**
1083 * blkcg_policy_register - register a blkcg policy
1084 * @pol: blkcg policy to register
1085 *
1086 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1087 * successful registration. Returns 0 on success and -errno on failure.
1088 */
1089 int blkcg_policy_register(struct blkcg_policy *pol)
1090 {
1091 int i, ret;
1092
1093 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
1094 return -EINVAL;
1095
1096 mutex_lock(&blkcg_pol_mutex);
1097
1098 /* find an empty slot */
1099 ret = -ENOSPC;
1100 for (i = 0; i < BLKCG_MAX_POLS; i++)
1101 if (!blkcg_policy[i])
1102 break;
1103 if (i >= BLKCG_MAX_POLS)
1104 goto out_unlock;
1105
1106 /* register and update blkgs */
1107 pol->plid = i;
1108 blkcg_policy[i] = pol;
1109
1110 /* everything is in place, add intf files for the new policy */
1111 if (pol->cftypes)
1112 WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
1113 ret = 0;
1114 out_unlock:
1115 mutex_unlock(&blkcg_pol_mutex);
1116 return ret;
1117 }
1118 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1119
1120 /**
1121 * blkcg_policy_unregister - unregister a blkcg policy
1122 * @pol: blkcg policy to unregister
1123 *
1124 * Undo blkcg_policy_register(@pol). Might sleep.
1125 */
1126 void blkcg_policy_unregister(struct blkcg_policy *pol)
1127 {
1128 mutex_lock(&blkcg_pol_mutex);
1129
1130 if (WARN_ON(blkcg_policy[pol->plid] != pol))
1131 goto out_unlock;
1132
1133 /* kill the intf files first */
1134 if (pol->cftypes)
1135 cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
1136
1137 /* unregister and update blkgs */
1138 blkcg_policy[pol->plid] = NULL;
1139 out_unlock:
1140 mutex_unlock(&blkcg_pol_mutex);
1141 }
1142 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);