2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 * For policy-specific per-blkcg data:
14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15 * Arianna Avanzini <avanzini.arianna@gmail.com>
17 #include <linux/ioprio.h>
18 #include <linux/kdev_t.h>
19 #include <linux/module.h>
20 #include <linux/err.h>
21 #include <linux/blkdev.h>
22 #include <linux/backing-dev.h>
23 #include <linux/slab.h>
24 #include <linux/genhd.h>
25 #include <linux/delay.h>
26 #include <linux/atomic.h>
27 #include <linux/blk-cgroup.h>
30 #define MAX_KEY_LEN 100
33 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
34 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
35 * policy [un]register operations including cgroup file additions /
36 * removals. Putting cgroup file registration outside blkcg_pol_mutex
37 * allows grabbing it from cgroup callbacks.
39 static DEFINE_MUTEX(blkcg_pol_register_mutex
);
40 static DEFINE_MUTEX(blkcg_pol_mutex
);
42 struct blkcg blkcg_root
;
43 EXPORT_SYMBOL_GPL(blkcg_root
);
45 struct cgroup_subsys_state
* const blkcg_root_css
= &blkcg_root
.css
;
47 static struct blkcg_policy
*blkcg_policy
[BLKCG_MAX_POLS
];
49 static LIST_HEAD(all_blkcgs
); /* protected by blkcg_pol_mutex */
51 static bool blkcg_policy_enabled(struct request_queue
*q
,
52 const struct blkcg_policy
*pol
)
54 return pol
&& test_bit(pol
->plid
, q
->blkcg_pols
);
58 * blkg_free - free a blkg
61 * Free @blkg which may be partially allocated.
63 static void blkg_free(struct blkcg_gq
*blkg
)
70 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
72 blkcg_policy
[i
]->pd_free_fn(blkg
->pd
[i
]);
74 if (blkg
->blkcg
!= &blkcg_root
)
75 blk_exit_rl(&blkg
->rl
);
77 blkg_rwstat_exit(&blkg
->stat_ios
);
78 blkg_rwstat_exit(&blkg
->stat_bytes
);
83 * blkg_alloc - allocate a blkg
84 * @blkcg: block cgroup the new blkg is associated with
85 * @q: request_queue the new blkg is associated with
86 * @gfp_mask: allocation mask to use
88 * Allocate a new blkg assocating @blkcg and @q.
90 static struct blkcg_gq
*blkg_alloc(struct blkcg
*blkcg
, struct request_queue
*q
,
93 struct blkcg_gq
*blkg
;
96 /* alloc and init base part */
97 blkg
= kzalloc_node(sizeof(*blkg
), gfp_mask
, q
->node
);
101 if (blkg_rwstat_init(&blkg
->stat_bytes
, gfp_mask
) ||
102 blkg_rwstat_init(&blkg
->stat_ios
, gfp_mask
))
106 INIT_LIST_HEAD(&blkg
->q_node
);
108 atomic_set(&blkg
->refcnt
, 1);
110 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
111 if (blkcg
!= &blkcg_root
) {
112 if (blk_init_rl(&blkg
->rl
, q
, gfp_mask
))
114 blkg
->rl
.blkg
= blkg
;
117 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
118 struct blkcg_policy
*pol
= blkcg_policy
[i
];
119 struct blkg_policy_data
*pd
;
121 if (!blkcg_policy_enabled(q
, pol
))
124 /* alloc per-policy data and attach it to blkg */
125 pd
= pol
->pd_alloc_fn(gfp_mask
, q
->node
);
141 struct blkcg_gq
*blkg_lookup_slowpath(struct blkcg
*blkcg
,
142 struct request_queue
*q
, bool update_hint
)
144 struct blkcg_gq
*blkg
;
147 * Hint didn't match. Look up from the radix tree. Note that the
148 * hint can only be updated under queue_lock as otherwise @blkg
149 * could have already been removed from blkg_tree. The caller is
150 * responsible for grabbing queue_lock if @update_hint.
152 blkg
= radix_tree_lookup(&blkcg
->blkg_tree
, q
->id
);
153 if (blkg
&& blkg
->q
== q
) {
155 lockdep_assert_held(q
->queue_lock
);
156 rcu_assign_pointer(blkcg
->blkg_hint
, blkg
);
163 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath
);
166 * If @new_blkg is %NULL, this function tries to allocate a new one as
167 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
169 static struct blkcg_gq
*blkg_create(struct blkcg
*blkcg
,
170 struct request_queue
*q
,
171 struct blkcg_gq
*new_blkg
)
173 struct blkcg_gq
*blkg
;
174 struct bdi_writeback_congested
*wb_congested
;
177 WARN_ON_ONCE(!rcu_read_lock_held());
178 lockdep_assert_held(q
->queue_lock
);
180 /* blkg holds a reference to blkcg */
181 if (!css_tryget_online(&blkcg
->css
)) {
186 wb_congested
= wb_congested_get_create(&q
->backing_dev_info
,
187 blkcg
->css
.id
, GFP_NOWAIT
);
195 new_blkg
= blkg_alloc(blkcg
, q
, GFP_NOWAIT
);
196 if (unlikely(!new_blkg
)) {
198 goto err_put_congested
;
202 blkg
->wb_congested
= wb_congested
;
205 if (blkcg_parent(blkcg
)) {
206 blkg
->parent
= __blkg_lookup(blkcg_parent(blkcg
), q
, false);
207 if (WARN_ON_ONCE(!blkg
->parent
)) {
209 goto err_put_congested
;
211 blkg_get(blkg
->parent
);
214 /* invoke per-policy init */
215 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
216 struct blkcg_policy
*pol
= blkcg_policy
[i
];
218 if (blkg
->pd
[i
] && pol
->pd_init_fn
)
219 pol
->pd_init_fn(blkg
->pd
[i
]);
223 spin_lock(&blkcg
->lock
);
224 ret
= radix_tree_insert(&blkcg
->blkg_tree
, q
->id
, blkg
);
226 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
227 list_add(&blkg
->q_node
, &q
->blkg_list
);
229 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
230 struct blkcg_policy
*pol
= blkcg_policy
[i
];
232 if (blkg
->pd
[i
] && pol
->pd_online_fn
)
233 pol
->pd_online_fn(blkg
->pd
[i
]);
237 spin_unlock(&blkcg
->lock
);
242 /* @blkg failed fully initialized, use the usual release path */
247 wb_congested_put(wb_congested
);
249 css_put(&blkcg
->css
);
256 * blkg_lookup_create - lookup blkg, try to create one if not there
257 * @blkcg: blkcg of interest
258 * @q: request_queue of interest
260 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
261 * create one. blkg creation is performed recursively from blkcg_root such
262 * that all non-root blkg's have access to the parent blkg. This function
263 * should be called under RCU read lock and @q->queue_lock.
265 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
266 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
267 * dead and bypassing, returns ERR_PTR(-EBUSY).
269 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
270 struct request_queue
*q
)
272 struct blkcg_gq
*blkg
;
274 WARN_ON_ONCE(!rcu_read_lock_held());
275 lockdep_assert_held(q
->queue_lock
);
278 * This could be the first entry point of blkcg implementation and
279 * we shouldn't allow anything to go through for a bypassing queue.
281 if (unlikely(blk_queue_bypass(q
)))
282 return ERR_PTR(blk_queue_dying(q
) ? -EINVAL
: -EBUSY
);
284 blkg
= __blkg_lookup(blkcg
, q
, true);
289 * Create blkgs walking down from blkcg_root to @blkcg, so that all
290 * non-root blkgs have access to their parents.
293 struct blkcg
*pos
= blkcg
;
294 struct blkcg
*parent
= blkcg_parent(blkcg
);
296 while (parent
&& !__blkg_lookup(parent
, q
, false)) {
298 parent
= blkcg_parent(parent
);
301 blkg
= blkg_create(pos
, q
, NULL
);
302 if (pos
== blkcg
|| IS_ERR(blkg
))
307 static void blkg_destroy(struct blkcg_gq
*blkg
)
309 struct blkcg
*blkcg
= blkg
->blkcg
;
310 struct blkcg_gq
*parent
= blkg
->parent
;
313 lockdep_assert_held(blkg
->q
->queue_lock
);
314 lockdep_assert_held(&blkcg
->lock
);
316 /* Something wrong if we are trying to remove same group twice */
317 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
318 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
320 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
321 struct blkcg_policy
*pol
= blkcg_policy
[i
];
323 if (blkg
->pd
[i
] && pol
->pd_offline_fn
)
324 pol
->pd_offline_fn(blkg
->pd
[i
]);
328 blkg_rwstat_add_aux(&parent
->stat_bytes
, &blkg
->stat_bytes
);
329 blkg_rwstat_add_aux(&parent
->stat_ios
, &blkg
->stat_ios
);
332 blkg
->online
= false;
334 radix_tree_delete(&blkcg
->blkg_tree
, blkg
->q
->id
);
335 list_del_init(&blkg
->q_node
);
336 hlist_del_init_rcu(&blkg
->blkcg_node
);
339 * Both setting lookup hint to and clearing it from @blkg are done
340 * under queue_lock. If it's not pointing to @blkg now, it never
341 * will. Hint assignment itself can race safely.
343 if (rcu_access_pointer(blkcg
->blkg_hint
) == blkg
)
344 rcu_assign_pointer(blkcg
->blkg_hint
, NULL
);
347 * Put the reference taken at the time of creation so that when all
348 * queues are gone, group can be destroyed.
354 * blkg_destroy_all - destroy all blkgs associated with a request_queue
355 * @q: request_queue of interest
357 * Destroy all blkgs associated with @q.
359 static void blkg_destroy_all(struct request_queue
*q
)
361 struct blkcg_gq
*blkg
, *n
;
363 lockdep_assert_held(q
->queue_lock
);
365 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
366 struct blkcg
*blkcg
= blkg
->blkcg
;
368 spin_lock(&blkcg
->lock
);
370 spin_unlock(&blkcg
->lock
);
375 * A group is RCU protected, but having an rcu lock does not mean that one
376 * can access all the fields of blkg and assume these are valid. For
377 * example, don't try to follow throtl_data and request queue links.
379 * Having a reference to blkg under an rcu allows accesses to only values
380 * local to groups like group stats and group rate limits.
382 void __blkg_release_rcu(struct rcu_head
*rcu_head
)
384 struct blkcg_gq
*blkg
= container_of(rcu_head
, struct blkcg_gq
, rcu_head
);
386 /* release the blkcg and parent blkg refs this blkg has been holding */
387 css_put(&blkg
->blkcg
->css
);
389 blkg_put(blkg
->parent
);
391 wb_congested_put(blkg
->wb_congested
);
395 EXPORT_SYMBOL_GPL(__blkg_release_rcu
);
398 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
399 * because the root blkg uses @q->root_rl instead of its own rl.
401 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
402 struct request_queue
*q
)
404 struct list_head
*ent
;
405 struct blkcg_gq
*blkg
;
408 * Determine the current blkg list_head. The first entry is
409 * root_rl which is off @q->blkg_list and mapped to the head.
411 if (rl
== &q
->root_rl
) {
413 /* There are no more block groups, hence no request lists */
417 blkg
= container_of(rl
, struct blkcg_gq
, rl
);
421 /* walk to the next list_head, skip root blkcg */
423 if (ent
== &q
->root_blkg
->q_node
)
425 if (ent
== &q
->blkg_list
)
428 blkg
= container_of(ent
, struct blkcg_gq
, q_node
);
432 static int blkcg_reset_stats(struct cgroup_subsys_state
*css
,
433 struct cftype
*cftype
, u64 val
)
435 struct blkcg
*blkcg
= css_to_blkcg(css
);
436 struct blkcg_gq
*blkg
;
439 mutex_lock(&blkcg_pol_mutex
);
440 spin_lock_irq(&blkcg
->lock
);
443 * Note that stat reset is racy - it doesn't synchronize against
444 * stat updates. This is a debug feature which shouldn't exist
445 * anyway. If you get hit by a race, retry.
447 hlist_for_each_entry(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
448 blkg_rwstat_reset(&blkg
->stat_bytes
);
449 blkg_rwstat_reset(&blkg
->stat_ios
);
451 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
452 struct blkcg_policy
*pol
= blkcg_policy
[i
];
454 if (blkg
->pd
[i
] && pol
->pd_reset_stats_fn
)
455 pol
->pd_reset_stats_fn(blkg
->pd
[i
]);
459 spin_unlock_irq(&blkcg
->lock
);
460 mutex_unlock(&blkcg_pol_mutex
);
464 static const char *blkg_dev_name(struct blkcg_gq
*blkg
)
466 /* some drivers (floppy) instantiate a queue w/o disk registered */
467 if (blkg
->q
->backing_dev_info
.dev
)
468 return dev_name(blkg
->q
->backing_dev_info
.dev
);
473 * blkcg_print_blkgs - helper for printing per-blkg data
474 * @sf: seq_file to print to
475 * @blkcg: blkcg of interest
476 * @prfill: fill function to print out a blkg
477 * @pol: policy in question
478 * @data: data to be passed to @prfill
479 * @show_total: to print out sum of prfill return values or not
481 * This function invokes @prfill on each blkg of @blkcg if pd for the
482 * policy specified by @pol exists. @prfill is invoked with @sf, the
483 * policy data and @data and the matching queue lock held. If @show_total
484 * is %true, the sum of the return values from @prfill is printed with
485 * "Total" label at the end.
487 * This is to be used to construct print functions for
488 * cftype->read_seq_string method.
490 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
491 u64 (*prfill
)(struct seq_file
*,
492 struct blkg_policy_data
*, int),
493 const struct blkcg_policy
*pol
, int data
,
496 struct blkcg_gq
*blkg
;
500 hlist_for_each_entry_rcu(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
501 spin_lock_irq(blkg
->q
->queue_lock
);
502 if (blkcg_policy_enabled(blkg
->q
, pol
))
503 total
+= prfill(sf
, blkg
->pd
[pol
->plid
], data
);
504 spin_unlock_irq(blkg
->q
->queue_lock
);
509 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
511 EXPORT_SYMBOL_GPL(blkcg_print_blkgs
);
514 * __blkg_prfill_u64 - prfill helper for a single u64 value
515 * @sf: seq_file to print to
516 * @pd: policy private data of interest
519 * Print @v to @sf for the device assocaited with @pd.
521 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
)
523 const char *dname
= blkg_dev_name(pd
->blkg
);
528 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
531 EXPORT_SYMBOL_GPL(__blkg_prfill_u64
);
534 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
535 * @sf: seq_file to print to
536 * @pd: policy private data of interest
537 * @rwstat: rwstat to print
539 * Print @rwstat to @sf for the device assocaited with @pd.
541 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
542 const struct blkg_rwstat
*rwstat
)
544 static const char *rwstr
[] = {
545 [BLKG_RWSTAT_READ
] = "Read",
546 [BLKG_RWSTAT_WRITE
] = "Write",
547 [BLKG_RWSTAT_SYNC
] = "Sync",
548 [BLKG_RWSTAT_ASYNC
] = "Async",
550 const char *dname
= blkg_dev_name(pd
->blkg
);
557 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
558 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
559 (unsigned long long)atomic64_read(&rwstat
->aux_cnt
[i
]));
561 v
= atomic64_read(&rwstat
->aux_cnt
[BLKG_RWSTAT_READ
]) +
562 atomic64_read(&rwstat
->aux_cnt
[BLKG_RWSTAT_WRITE
]);
563 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
566 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat
);
569 * blkg_prfill_stat - prfill callback for blkg_stat
570 * @sf: seq_file to print to
571 * @pd: policy private data of interest
572 * @off: offset to the blkg_stat in @pd
574 * prfill callback for printing a blkg_stat.
576 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
)
578 return __blkg_prfill_u64(sf
, pd
, blkg_stat_read((void *)pd
+ off
));
580 EXPORT_SYMBOL_GPL(blkg_prfill_stat
);
583 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
584 * @sf: seq_file to print to
585 * @pd: policy private data of interest
586 * @off: offset to the blkg_rwstat in @pd
588 * prfill callback for printing a blkg_rwstat.
590 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
593 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)pd
+ off
);
595 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
597 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat
);
599 static u64
blkg_prfill_rwstat_field(struct seq_file
*sf
,
600 struct blkg_policy_data
*pd
, int off
)
602 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)pd
->blkg
+ off
);
604 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
608 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
609 * @sf: seq_file to print to
612 * To be used as cftype->seq_show to print blkg->stat_bytes.
613 * cftype->private must be set to the blkcg_policy.
615 int blkg_print_stat_bytes(struct seq_file
*sf
, void *v
)
617 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
618 blkg_prfill_rwstat_field
, (void *)seq_cft(sf
)->private,
619 offsetof(struct blkcg_gq
, stat_bytes
), true);
622 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes
);
625 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
626 * @sf: seq_file to print to
629 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
630 * must be set to the blkcg_policy.
632 int blkg_print_stat_ios(struct seq_file
*sf
, void *v
)
634 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
635 blkg_prfill_rwstat_field
, (void *)seq_cft(sf
)->private,
636 offsetof(struct blkcg_gq
, stat_ios
), true);
639 EXPORT_SYMBOL_GPL(blkg_print_stat_ios
);
641 static u64
blkg_prfill_rwstat_field_recursive(struct seq_file
*sf
,
642 struct blkg_policy_data
*pd
,
645 struct blkg_rwstat rwstat
= blkg_rwstat_recursive_sum(pd
->blkg
,
647 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
651 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
652 * @sf: seq_file to print to
655 int blkg_print_stat_bytes_recursive(struct seq_file
*sf
, void *v
)
657 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
658 blkg_prfill_rwstat_field_recursive
,
659 (void *)seq_cft(sf
)->private,
660 offsetof(struct blkcg_gq
, stat_bytes
), true);
663 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive
);
666 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
667 * @sf: seq_file to print to
670 int blkg_print_stat_ios_recursive(struct seq_file
*sf
, void *v
)
672 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
673 blkg_prfill_rwstat_field_recursive
,
674 (void *)seq_cft(sf
)->private,
675 offsetof(struct blkcg_gq
, stat_ios
), true);
678 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive
);
681 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
682 * @blkg: blkg of interest
683 * @pol: blkcg_policy which contains the blkg_stat
684 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
686 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
687 * online descendants and their aux counts. The caller must be holding the
688 * queue lock for online tests.
690 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
691 * at @off bytes into @blkg's blkg_policy_data of the policy.
693 u64
blkg_stat_recursive_sum(struct blkcg_gq
*blkg
,
694 struct blkcg_policy
*pol
, int off
)
696 struct blkcg_gq
*pos_blkg
;
697 struct cgroup_subsys_state
*pos_css
;
700 lockdep_assert_held(blkg
->q
->queue_lock
);
703 blkg_for_each_descendant_pre(pos_blkg
, pos_css
, blkg
) {
704 struct blkg_stat
*stat
;
706 if (!pos_blkg
->online
)
710 stat
= (void *)blkg_to_pd(pos_blkg
, pol
) + off
;
712 stat
= (void *)blkg
+ off
;
714 sum
+= blkg_stat_read(stat
) + atomic64_read(&stat
->aux_cnt
);
720 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum
);
723 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
724 * @blkg: blkg of interest
725 * @pol: blkcg_policy which contains the blkg_rwstat
726 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
728 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
729 * online descendants and their aux counts. The caller must be holding the
730 * queue lock for online tests.
732 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
733 * is at @off bytes into @blkg's blkg_policy_data of the policy.
735 struct blkg_rwstat
blkg_rwstat_recursive_sum(struct blkcg_gq
*blkg
,
736 struct blkcg_policy
*pol
, int off
)
738 struct blkcg_gq
*pos_blkg
;
739 struct cgroup_subsys_state
*pos_css
;
740 struct blkg_rwstat sum
= { };
743 lockdep_assert_held(blkg
->q
->queue_lock
);
746 blkg_for_each_descendant_pre(pos_blkg
, pos_css
, blkg
) {
747 struct blkg_rwstat
*rwstat
;
749 if (!pos_blkg
->online
)
753 rwstat
= (void *)blkg_to_pd(pos_blkg
, pol
) + off
;
755 rwstat
= (void *)pos_blkg
+ off
;
757 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
758 atomic64_add(atomic64_read(&rwstat
->aux_cnt
[i
]) +
759 percpu_counter_sum_positive(&rwstat
->cpu_cnt
[i
]),
766 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum
);
769 * blkg_conf_prep - parse and prepare for per-blkg config update
770 * @blkcg: target block cgroup
771 * @pol: target policy
772 * @input: input string
773 * @ctx: blkg_conf_ctx to be filled
775 * Parse per-blkg config update from @input and initialize @ctx with the
776 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
777 * value. This function returns with RCU read lock and queue lock held and
778 * must be paired with blkg_conf_finish().
780 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
781 const char *input
, struct blkg_conf_ctx
*ctx
)
782 __acquires(rcu
) __acquires(disk
->queue
->queue_lock
)
784 struct gendisk
*disk
;
785 struct blkcg_gq
*blkg
;
786 unsigned int major
, minor
;
787 unsigned long long v
;
790 if (sscanf(input
, "%u:%u %llu", &major
, &minor
, &v
) != 3)
793 disk
= get_gendisk(MKDEV(major
, minor
), &part
);
802 spin_lock_irq(disk
->queue
->queue_lock
);
804 if (blkcg_policy_enabled(disk
->queue
, pol
))
805 blkg
= blkg_lookup_create(blkcg
, disk
->queue
);
807 blkg
= ERR_PTR(-EINVAL
);
812 spin_unlock_irq(disk
->queue
->queue_lock
);
815 * If queue was bypassing, we should retry. Do so after a
816 * short msleep(). It isn't strictly necessary but queue
817 * can be bypassing for some time and it's always nice to
818 * avoid busy looping.
822 ret
= restart_syscall();
832 EXPORT_SYMBOL_GPL(blkg_conf_prep
);
835 * blkg_conf_finish - finish up per-blkg config update
836 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
838 * Finish up after per-blkg config update. This function must be paired
839 * with blkg_conf_prep().
841 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
)
842 __releases(ctx
->disk
->queue
->queue_lock
) __releases(rcu
)
844 spin_unlock_irq(ctx
->disk
->queue
->queue_lock
);
848 EXPORT_SYMBOL_GPL(blkg_conf_finish
);
850 struct cftype blkcg_files
[] = {
852 .name
= "reset_stats",
853 .write_u64
= blkcg_reset_stats
,
859 * blkcg_css_offline - cgroup css_offline callback
860 * @css: css of interest
862 * This function is called when @css is about to go away and responsible
863 * for shooting down all blkgs associated with @css. blkgs should be
864 * removed while holding both q and blkcg locks. As blkcg lock is nested
865 * inside q lock, this function performs reverse double lock dancing.
867 * This is the blkcg counterpart of ioc_release_fn().
869 static void blkcg_css_offline(struct cgroup_subsys_state
*css
)
871 struct blkcg
*blkcg
= css_to_blkcg(css
);
873 spin_lock_irq(&blkcg
->lock
);
875 while (!hlist_empty(&blkcg
->blkg_list
)) {
876 struct blkcg_gq
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
877 struct blkcg_gq
, blkcg_node
);
878 struct request_queue
*q
= blkg
->q
;
880 if (spin_trylock(q
->queue_lock
)) {
882 spin_unlock(q
->queue_lock
);
884 spin_unlock_irq(&blkcg
->lock
);
886 spin_lock_irq(&blkcg
->lock
);
890 spin_unlock_irq(&blkcg
->lock
);
892 wb_blkcg_offline(blkcg
);
895 static void blkcg_css_free(struct cgroup_subsys_state
*css
)
897 struct blkcg
*blkcg
= css_to_blkcg(css
);
900 mutex_lock(&blkcg_pol_mutex
);
902 list_del(&blkcg
->all_blkcgs_node
);
904 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
906 blkcg_policy
[i
]->cpd_free_fn(blkcg
->cpd
[i
]);
908 mutex_unlock(&blkcg_pol_mutex
);
913 static struct cgroup_subsys_state
*
914 blkcg_css_alloc(struct cgroup_subsys_state
*parent_css
)
917 struct cgroup_subsys_state
*ret
;
920 mutex_lock(&blkcg_pol_mutex
);
925 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
927 ret
= ERR_PTR(-ENOMEM
);
932 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
933 struct blkcg_policy
*pol
= blkcg_policy
[i
];
934 struct blkcg_policy_data
*cpd
;
937 * If the policy hasn't been attached yet, wait for it
938 * to be attached before doing anything else. Otherwise,
939 * check if the policy requires any specific per-cgroup
940 * data: if it does, allocate and initialize it.
942 if (!pol
|| !pol
->cpd_alloc_fn
)
945 cpd
= pol
->cpd_alloc_fn(GFP_KERNEL
);
947 ret
= ERR_PTR(-ENOMEM
);
953 if (pol
->cpd_init_fn
)
954 pol
->cpd_init_fn(cpd
);
957 spin_lock_init(&blkcg
->lock
);
958 INIT_RADIX_TREE(&blkcg
->blkg_tree
, GFP_NOWAIT
);
959 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
960 #ifdef CONFIG_CGROUP_WRITEBACK
961 INIT_LIST_HEAD(&blkcg
->cgwb_list
);
963 list_add_tail(&blkcg
->all_blkcgs_node
, &all_blkcgs
);
965 mutex_unlock(&blkcg_pol_mutex
);
969 for (i
--; i
>= 0; i
--)
971 blkcg_policy
[i
]->cpd_free_fn(blkcg
->cpd
[i
]);
974 mutex_unlock(&blkcg_pol_mutex
);
979 * blkcg_init_queue - initialize blkcg part of request queue
980 * @q: request_queue to initialize
982 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
983 * part of new request_queue @q.
986 * 0 on success, -errno on failure.
988 int blkcg_init_queue(struct request_queue
*q
)
990 struct blkcg_gq
*new_blkg
, *blkg
;
994 new_blkg
= blkg_alloc(&blkcg_root
, q
, GFP_KERNEL
);
998 preloaded
= !radix_tree_preload(GFP_KERNEL
);
1001 * Make sure the root blkg exists and count the existing blkgs. As
1002 * @q is bypassing at this point, blkg_lookup_create() can't be
1003 * used. Open code insertion.
1006 spin_lock_irq(q
->queue_lock
);
1007 blkg
= blkg_create(&blkcg_root
, q
, new_blkg
);
1008 spin_unlock_irq(q
->queue_lock
);
1012 radix_tree_preload_end();
1015 blkg_free(new_blkg
);
1016 return PTR_ERR(blkg
);
1019 q
->root_blkg
= blkg
;
1020 q
->root_rl
.blkg
= blkg
;
1022 ret
= blk_throtl_init(q
);
1024 spin_lock_irq(q
->queue_lock
);
1025 blkg_destroy_all(q
);
1026 spin_unlock_irq(q
->queue_lock
);
1032 * blkcg_drain_queue - drain blkcg part of request_queue
1033 * @q: request_queue to drain
1035 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1037 void blkcg_drain_queue(struct request_queue
*q
)
1039 lockdep_assert_held(q
->queue_lock
);
1042 * @q could be exiting and already have destroyed all blkgs as
1043 * indicated by NULL root_blkg. If so, don't confuse policies.
1048 blk_throtl_drain(q
);
1052 * blkcg_exit_queue - exit and release blkcg part of request_queue
1053 * @q: request_queue being released
1055 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1057 void blkcg_exit_queue(struct request_queue
*q
)
1059 spin_lock_irq(q
->queue_lock
);
1060 blkg_destroy_all(q
);
1061 spin_unlock_irq(q
->queue_lock
);
1067 * We cannot support shared io contexts, as we have no mean to support
1068 * two tasks with the same ioc in two different groups without major rework
1069 * of the main cic data structures. For now we allow a task to change
1070 * its cgroup only if it's the only owner of its ioc.
1072 static int blkcg_can_attach(struct cgroup_subsys_state
*css
,
1073 struct cgroup_taskset
*tset
)
1075 struct task_struct
*task
;
1076 struct io_context
*ioc
;
1079 /* task_lock() is needed to avoid races with exit_io_context() */
1080 cgroup_taskset_for_each(task
, tset
) {
1082 ioc
= task
->io_context
;
1083 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1092 struct cgroup_subsys blkio_cgrp_subsys
= {
1093 .css_alloc
= blkcg_css_alloc
,
1094 .css_offline
= blkcg_css_offline
,
1095 .css_free
= blkcg_css_free
,
1096 .can_attach
= blkcg_can_attach
,
1097 .legacy_cftypes
= blkcg_files
,
1100 * This ensures that, if available, memcg is automatically enabled
1101 * together on the default hierarchy so that the owner cgroup can
1102 * be retrieved from writeback pages.
1104 .depends_on
= 1 << memory_cgrp_id
,
1107 EXPORT_SYMBOL_GPL(blkio_cgrp_subsys
);
1110 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1111 * @q: request_queue of interest
1112 * @pol: blkcg policy to activate
1114 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1115 * bypass mode to populate its blkgs with policy_data for @pol.
1117 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1118 * from IO path. Update of each blkg is protected by both queue and blkcg
1119 * locks so that holding either lock and testing blkcg_policy_enabled() is
1120 * always enough for dereferencing policy data.
1122 * The caller is responsible for synchronizing [de]activations and policy
1123 * [un]registerations. Returns 0 on success, -errno on failure.
1125 int blkcg_activate_policy(struct request_queue
*q
,
1126 const struct blkcg_policy
*pol
)
1128 struct blkg_policy_data
*pd_prealloc
= NULL
;
1129 struct blkcg_gq
*blkg
;
1132 if (blkcg_policy_enabled(q
, pol
))
1135 blk_queue_bypass_start(q
);
1138 pd_prealloc
= pol
->pd_alloc_fn(GFP_KERNEL
, q
->node
);
1141 goto out_bypass_end
;
1145 spin_lock_irq(q
->queue_lock
);
1147 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
1148 struct blkg_policy_data
*pd
;
1150 if (blkg
->pd
[pol
->plid
])
1153 pd
= pol
->pd_alloc_fn(GFP_NOWAIT
, q
->node
);
1155 swap(pd
, pd_prealloc
);
1157 spin_unlock_irq(q
->queue_lock
);
1161 blkg
->pd
[pol
->plid
] = pd
;
1163 pd
->plid
= pol
->plid
;
1164 if (pol
->pd_init_fn
)
1165 pol
->pd_init_fn(pd
);
1168 __set_bit(pol
->plid
, q
->blkcg_pols
);
1171 spin_unlock_irq(q
->queue_lock
);
1173 blk_queue_bypass_end(q
);
1175 pol
->pd_free_fn(pd_prealloc
);
1178 EXPORT_SYMBOL_GPL(blkcg_activate_policy
);
1181 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1182 * @q: request_queue of interest
1183 * @pol: blkcg policy to deactivate
1185 * Deactivate @pol on @q. Follows the same synchronization rules as
1186 * blkcg_activate_policy().
1188 void blkcg_deactivate_policy(struct request_queue
*q
,
1189 const struct blkcg_policy
*pol
)
1191 struct blkcg_gq
*blkg
;
1193 if (!blkcg_policy_enabled(q
, pol
))
1196 blk_queue_bypass_start(q
);
1197 spin_lock_irq(q
->queue_lock
);
1199 __clear_bit(pol
->plid
, q
->blkcg_pols
);
1201 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
1202 /* grab blkcg lock too while removing @pd from @blkg */
1203 spin_lock(&blkg
->blkcg
->lock
);
1205 if (blkg
->pd
[pol
->plid
]) {
1206 if (pol
->pd_offline_fn
)
1207 pol
->pd_offline_fn(blkg
->pd
[pol
->plid
]);
1208 pol
->pd_free_fn(blkg
->pd
[pol
->plid
]);
1209 blkg
->pd
[pol
->plid
] = NULL
;
1212 spin_unlock(&blkg
->blkcg
->lock
);
1215 spin_unlock_irq(q
->queue_lock
);
1216 blk_queue_bypass_end(q
);
1218 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy
);
1221 * blkcg_policy_register - register a blkcg policy
1222 * @pol: blkcg policy to register
1224 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1225 * successful registration. Returns 0 on success and -errno on failure.
1227 int blkcg_policy_register(struct blkcg_policy
*pol
)
1229 struct blkcg
*blkcg
;
1232 mutex_lock(&blkcg_pol_register_mutex
);
1233 mutex_lock(&blkcg_pol_mutex
);
1235 /* find an empty slot */
1237 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
1238 if (!blkcg_policy
[i
])
1240 if (i
>= BLKCG_MAX_POLS
)
1245 blkcg_policy
[pol
->plid
] = pol
;
1247 /* allocate and install cpd's */
1248 if (pol
->cpd_alloc_fn
) {
1249 list_for_each_entry(blkcg
, &all_blkcgs
, all_blkcgs_node
) {
1250 struct blkcg_policy_data
*cpd
;
1252 cpd
= pol
->cpd_alloc_fn(GFP_KERNEL
);
1254 mutex_unlock(&blkcg_pol_mutex
);
1258 blkcg
->cpd
[pol
->plid
] = cpd
;
1260 cpd
->plid
= pol
->plid
;
1261 pol
->cpd_init_fn(cpd
);
1265 mutex_unlock(&blkcg_pol_mutex
);
1267 /* everything is in place, add intf files for the new policy */
1269 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys
,
1271 mutex_unlock(&blkcg_pol_register_mutex
);
1275 if (pol
->cpd_alloc_fn
) {
1276 list_for_each_entry(blkcg
, &all_blkcgs
, all_blkcgs_node
) {
1277 if (blkcg
->cpd
[pol
->plid
]) {
1278 pol
->cpd_free_fn(blkcg
->cpd
[pol
->plid
]);
1279 blkcg
->cpd
[pol
->plid
] = NULL
;
1283 blkcg_policy
[pol
->plid
] = NULL
;
1285 mutex_unlock(&blkcg_pol_mutex
);
1286 mutex_unlock(&blkcg_pol_register_mutex
);
1289 EXPORT_SYMBOL_GPL(blkcg_policy_register
);
1292 * blkcg_policy_unregister - unregister a blkcg policy
1293 * @pol: blkcg policy to unregister
1295 * Undo blkcg_policy_register(@pol). Might sleep.
1297 void blkcg_policy_unregister(struct blkcg_policy
*pol
)
1299 struct blkcg
*blkcg
;
1301 mutex_lock(&blkcg_pol_register_mutex
);
1303 if (WARN_ON(blkcg_policy
[pol
->plid
] != pol
))
1306 /* kill the intf files first */
1308 cgroup_rm_cftypes(pol
->cftypes
);
1310 /* remove cpds and unregister */
1311 mutex_lock(&blkcg_pol_mutex
);
1313 if (pol
->cpd_alloc_fn
) {
1314 list_for_each_entry(blkcg
, &all_blkcgs
, all_blkcgs_node
) {
1315 if (blkcg
->cpd
[pol
->plid
]) {
1316 pol
->cpd_free_fn(blkcg
->cpd
[pol
->plid
]);
1317 blkcg
->cpd
[pol
->plid
] = NULL
;
1321 blkcg_policy
[pol
->plid
] = NULL
;
1323 mutex_unlock(&blkcg_pol_mutex
);
1325 mutex_unlock(&blkcg_pol_register_mutex
);
1327 EXPORT_SYMBOL_GPL(blkcg_policy_unregister
);