2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 * For policy-specific per-blkcg data:
14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15 * Arianna Avanzini <avanzini.arianna@gmail.com>
17 #include <linux/ioprio.h>
18 #include <linux/kdev_t.h>
19 #include <linux/module.h>
20 #include <linux/err.h>
21 #include <linux/blkdev.h>
22 #include <linux/backing-dev.h>
23 #include <linux/slab.h>
24 #include <linux/genhd.h>
25 #include <linux/delay.h>
26 #include <linux/atomic.h>
27 #include <linux/blk-cgroup.h>
30 #define MAX_KEY_LEN 100
33 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
34 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
35 * policy [un]register operations including cgroup file additions /
36 * removals. Putting cgroup file registration outside blkcg_pol_mutex
37 * allows grabbing it from cgroup callbacks.
39 static DEFINE_MUTEX(blkcg_pol_register_mutex
);
40 static DEFINE_MUTEX(blkcg_pol_mutex
);
42 struct blkcg blkcg_root
;
43 EXPORT_SYMBOL_GPL(blkcg_root
);
45 struct cgroup_subsys_state
* const blkcg_root_css
= &blkcg_root
.css
;
47 static struct blkcg_policy
*blkcg_policy
[BLKCG_MAX_POLS
];
49 static LIST_HEAD(all_blkcgs
); /* protected by blkcg_pol_mutex */
51 static bool blkcg_policy_enabled(struct request_queue
*q
,
52 const struct blkcg_policy
*pol
)
54 return pol
&& test_bit(pol
->plid
, q
->blkcg_pols
);
58 * blkg_free - free a blkg
61 * Free @blkg which may be partially allocated.
63 static void blkg_free(struct blkcg_gq
*blkg
)
70 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
73 if (blkg
->blkcg
!= &blkcg_root
)
74 blk_exit_rl(&blkg
->rl
);
79 * blkg_alloc - allocate a blkg
80 * @blkcg: block cgroup the new blkg is associated with
81 * @q: request_queue the new blkg is associated with
82 * @gfp_mask: allocation mask to use
84 * Allocate a new blkg assocating @blkcg and @q.
86 static struct blkcg_gq
*blkg_alloc(struct blkcg
*blkcg
, struct request_queue
*q
,
89 struct blkcg_gq
*blkg
;
92 /* alloc and init base part */
93 blkg
= kzalloc_node(sizeof(*blkg
), gfp_mask
, q
->node
);
98 INIT_LIST_HEAD(&blkg
->q_node
);
100 atomic_set(&blkg
->refcnt
, 1);
102 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
103 if (blkcg
!= &blkcg_root
) {
104 if (blk_init_rl(&blkg
->rl
, q
, gfp_mask
))
106 blkg
->rl
.blkg
= blkg
;
109 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
110 struct blkcg_policy
*pol
= blkcg_policy
[i
];
111 struct blkg_policy_data
*pd
;
113 if (!blkcg_policy_enabled(q
, pol
))
116 /* alloc per-policy data and attach it to blkg */
117 pd
= kzalloc_node(pol
->pd_size
, gfp_mask
, q
->node
);
134 * __blkg_lookup - internal version of blkg_lookup()
135 * @blkcg: blkcg of interest
136 * @q: request_queue of interest
137 * @update_hint: whether to update lookup hint with the result or not
139 * This is internal version and shouldn't be used by policy
140 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
141 * @q's bypass state. If @update_hint is %true, the caller should be
142 * holding @q->queue_lock and lookup hint is updated on success.
144 struct blkcg_gq
*__blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
,
147 struct blkcg_gq
*blkg
;
149 blkg
= rcu_dereference(blkcg
->blkg_hint
);
150 if (blkg
&& blkg
->q
== q
)
154 * Hint didn't match. Look up from the radix tree. Note that the
155 * hint can only be updated under queue_lock as otherwise @blkg
156 * could have already been removed from blkg_tree. The caller is
157 * responsible for grabbing queue_lock if @update_hint.
159 blkg
= radix_tree_lookup(&blkcg
->blkg_tree
, q
->id
);
160 if (blkg
&& blkg
->q
== q
) {
162 lockdep_assert_held(q
->queue_lock
);
163 rcu_assign_pointer(blkcg
->blkg_hint
, blkg
);
172 * blkg_lookup - lookup blkg for the specified blkcg - q pair
173 * @blkcg: blkcg of interest
174 * @q: request_queue of interest
176 * Lookup blkg for the @blkcg - @q pair. This function should be called
177 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
178 * - see blk_queue_bypass_start() for details.
180 struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
)
182 WARN_ON_ONCE(!rcu_read_lock_held());
184 if (unlikely(blk_queue_bypass(q
)))
186 return __blkg_lookup(blkcg
, q
, false);
188 EXPORT_SYMBOL_GPL(blkg_lookup
);
191 * If @new_blkg is %NULL, this function tries to allocate a new one as
192 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
194 static struct blkcg_gq
*blkg_create(struct blkcg
*blkcg
,
195 struct request_queue
*q
,
196 struct blkcg_gq
*new_blkg
)
198 struct blkcg_gq
*blkg
;
199 struct bdi_writeback_congested
*wb_congested
;
202 WARN_ON_ONCE(!rcu_read_lock_held());
203 lockdep_assert_held(q
->queue_lock
);
205 /* blkg holds a reference to blkcg */
206 if (!css_tryget_online(&blkcg
->css
)) {
211 wb_congested
= wb_congested_get_create(&q
->backing_dev_info
,
212 blkcg
->css
.id
, GFP_NOWAIT
);
220 new_blkg
= blkg_alloc(blkcg
, q
, GFP_NOWAIT
);
221 if (unlikely(!new_blkg
)) {
223 goto err_put_congested
;
227 blkg
->wb_congested
= wb_congested
;
230 if (blkcg_parent(blkcg
)) {
231 blkg
->parent
= __blkg_lookup(blkcg_parent(blkcg
), q
, false);
232 if (WARN_ON_ONCE(!blkg
->parent
)) {
234 goto err_put_congested
;
236 blkg_get(blkg
->parent
);
239 /* invoke per-policy init */
240 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
241 struct blkcg_policy
*pol
= blkcg_policy
[i
];
243 if (blkg
->pd
[i
] && pol
->pd_init_fn
)
244 pol
->pd_init_fn(blkg
);
248 spin_lock(&blkcg
->lock
);
249 ret
= radix_tree_insert(&blkcg
->blkg_tree
, q
->id
, blkg
);
251 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
252 list_add(&blkg
->q_node
, &q
->blkg_list
);
254 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
255 struct blkcg_policy
*pol
= blkcg_policy
[i
];
257 if (blkg
->pd
[i
] && pol
->pd_online_fn
)
258 pol
->pd_online_fn(blkg
);
262 spin_unlock(&blkcg
->lock
);
267 /* @blkg failed fully initialized, use the usual release path */
272 wb_congested_put(wb_congested
);
274 css_put(&blkcg
->css
);
281 * blkg_lookup_create - lookup blkg, try to create one if not there
282 * @blkcg: blkcg of interest
283 * @q: request_queue of interest
285 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
286 * create one. blkg creation is performed recursively from blkcg_root such
287 * that all non-root blkg's have access to the parent blkg. This function
288 * should be called under RCU read lock and @q->queue_lock.
290 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
291 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
292 * dead and bypassing, returns ERR_PTR(-EBUSY).
294 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
295 struct request_queue
*q
)
297 struct blkcg_gq
*blkg
;
299 WARN_ON_ONCE(!rcu_read_lock_held());
300 lockdep_assert_held(q
->queue_lock
);
303 * This could be the first entry point of blkcg implementation and
304 * we shouldn't allow anything to go through for a bypassing queue.
306 if (unlikely(blk_queue_bypass(q
)))
307 return ERR_PTR(blk_queue_dying(q
) ? -EINVAL
: -EBUSY
);
309 blkg
= __blkg_lookup(blkcg
, q
, true);
314 * Create blkgs walking down from blkcg_root to @blkcg, so that all
315 * non-root blkgs have access to their parents.
318 struct blkcg
*pos
= blkcg
;
319 struct blkcg
*parent
= blkcg_parent(blkcg
);
321 while (parent
&& !__blkg_lookup(parent
, q
, false)) {
323 parent
= blkcg_parent(parent
);
326 blkg
= blkg_create(pos
, q
, NULL
);
327 if (pos
== blkcg
|| IS_ERR(blkg
))
331 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
333 static void blkg_destroy(struct blkcg_gq
*blkg
)
335 struct blkcg
*blkcg
= blkg
->blkcg
;
338 lockdep_assert_held(blkg
->q
->queue_lock
);
339 lockdep_assert_held(&blkcg
->lock
);
341 /* Something wrong if we are trying to remove same group twice */
342 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
343 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
345 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
346 struct blkcg_policy
*pol
= blkcg_policy
[i
];
348 if (blkg
->pd
[i
] && pol
->pd_offline_fn
)
349 pol
->pd_offline_fn(blkg
);
351 blkg
->online
= false;
353 radix_tree_delete(&blkcg
->blkg_tree
, blkg
->q
->id
);
354 list_del_init(&blkg
->q_node
);
355 hlist_del_init_rcu(&blkg
->blkcg_node
);
358 * Both setting lookup hint to and clearing it from @blkg are done
359 * under queue_lock. If it's not pointing to @blkg now, it never
360 * will. Hint assignment itself can race safely.
362 if (rcu_access_pointer(blkcg
->blkg_hint
) == blkg
)
363 rcu_assign_pointer(blkcg
->blkg_hint
, NULL
);
366 * Put the reference taken at the time of creation so that when all
367 * queues are gone, group can be destroyed.
373 * blkg_destroy_all - destroy all blkgs associated with a request_queue
374 * @q: request_queue of interest
376 * Destroy all blkgs associated with @q.
378 static void blkg_destroy_all(struct request_queue
*q
)
380 struct blkcg_gq
*blkg
, *n
;
382 lockdep_assert_held(q
->queue_lock
);
384 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
385 struct blkcg
*blkcg
= blkg
->blkcg
;
387 spin_lock(&blkcg
->lock
);
389 spin_unlock(&blkcg
->lock
);
394 * A group is RCU protected, but having an rcu lock does not mean that one
395 * can access all the fields of blkg and assume these are valid. For
396 * example, don't try to follow throtl_data and request queue links.
398 * Having a reference to blkg under an rcu allows accesses to only values
399 * local to groups like group stats and group rate limits.
401 void __blkg_release_rcu(struct rcu_head
*rcu_head
)
403 struct blkcg_gq
*blkg
= container_of(rcu_head
, struct blkcg_gq
, rcu_head
);
406 /* tell policies that this one is being freed */
407 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
408 struct blkcg_policy
*pol
= blkcg_policy
[i
];
410 if (blkg
->pd
[i
] && pol
->pd_exit_fn
)
411 pol
->pd_exit_fn(blkg
);
414 /* release the blkcg and parent blkg refs this blkg has been holding */
415 css_put(&blkg
->blkcg
->css
);
417 blkg_put(blkg
->parent
);
419 wb_congested_put(blkg
->wb_congested
);
423 EXPORT_SYMBOL_GPL(__blkg_release_rcu
);
426 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
427 * because the root blkg uses @q->root_rl instead of its own rl.
429 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
430 struct request_queue
*q
)
432 struct list_head
*ent
;
433 struct blkcg_gq
*blkg
;
436 * Determine the current blkg list_head. The first entry is
437 * root_rl which is off @q->blkg_list and mapped to the head.
439 if (rl
== &q
->root_rl
) {
441 /* There are no more block groups, hence no request lists */
445 blkg
= container_of(rl
, struct blkcg_gq
, rl
);
449 /* walk to the next list_head, skip root blkcg */
451 if (ent
== &q
->root_blkg
->q_node
)
453 if (ent
== &q
->blkg_list
)
456 blkg
= container_of(ent
, struct blkcg_gq
, q_node
);
460 static int blkcg_reset_stats(struct cgroup_subsys_state
*css
,
461 struct cftype
*cftype
, u64 val
)
463 struct blkcg
*blkcg
= css_to_blkcg(css
);
464 struct blkcg_gq
*blkg
;
467 mutex_lock(&blkcg_pol_mutex
);
468 spin_lock_irq(&blkcg
->lock
);
471 * Note that stat reset is racy - it doesn't synchronize against
472 * stat updates. This is a debug feature which shouldn't exist
473 * anyway. If you get hit by a race, retry.
475 hlist_for_each_entry(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
476 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
477 struct blkcg_policy
*pol
= blkcg_policy
[i
];
479 if (blkcg_policy_enabled(blkg
->q
, pol
) &&
480 pol
->pd_reset_stats_fn
)
481 pol
->pd_reset_stats_fn(blkg
);
485 spin_unlock_irq(&blkcg
->lock
);
486 mutex_unlock(&blkcg_pol_mutex
);
490 static const char *blkg_dev_name(struct blkcg_gq
*blkg
)
492 /* some drivers (floppy) instantiate a queue w/o disk registered */
493 if (blkg
->q
->backing_dev_info
.dev
)
494 return dev_name(blkg
->q
->backing_dev_info
.dev
);
499 * blkcg_print_blkgs - helper for printing per-blkg data
500 * @sf: seq_file to print to
501 * @blkcg: blkcg of interest
502 * @prfill: fill function to print out a blkg
503 * @pol: policy in question
504 * @data: data to be passed to @prfill
505 * @show_total: to print out sum of prfill return values or not
507 * This function invokes @prfill on each blkg of @blkcg if pd for the
508 * policy specified by @pol exists. @prfill is invoked with @sf, the
509 * policy data and @data and the matching queue lock held. If @show_total
510 * is %true, the sum of the return values from @prfill is printed with
511 * "Total" label at the end.
513 * This is to be used to construct print functions for
514 * cftype->read_seq_string method.
516 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
517 u64 (*prfill
)(struct seq_file
*,
518 struct blkg_policy_data
*, int),
519 const struct blkcg_policy
*pol
, int data
,
522 struct blkcg_gq
*blkg
;
526 hlist_for_each_entry_rcu(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
527 spin_lock_irq(blkg
->q
->queue_lock
);
528 if (blkcg_policy_enabled(blkg
->q
, pol
))
529 total
+= prfill(sf
, blkg
->pd
[pol
->plid
], data
);
530 spin_unlock_irq(blkg
->q
->queue_lock
);
535 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
537 EXPORT_SYMBOL_GPL(blkcg_print_blkgs
);
540 * __blkg_prfill_u64 - prfill helper for a single u64 value
541 * @sf: seq_file to print to
542 * @pd: policy private data of interest
545 * Print @v to @sf for the device assocaited with @pd.
547 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
)
549 const char *dname
= blkg_dev_name(pd
->blkg
);
554 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
557 EXPORT_SYMBOL_GPL(__blkg_prfill_u64
);
560 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
561 * @sf: seq_file to print to
562 * @pd: policy private data of interest
563 * @rwstat: rwstat to print
565 * Print @rwstat to @sf for the device assocaited with @pd.
567 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
568 const struct blkg_rwstat
*rwstat
)
570 static const char *rwstr
[] = {
571 [BLKG_RWSTAT_READ
] = "Read",
572 [BLKG_RWSTAT_WRITE
] = "Write",
573 [BLKG_RWSTAT_SYNC
] = "Sync",
574 [BLKG_RWSTAT_ASYNC
] = "Async",
576 const char *dname
= blkg_dev_name(pd
->blkg
);
583 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
584 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
585 (unsigned long long)rwstat
->cnt
[i
]);
587 v
= rwstat
->cnt
[BLKG_RWSTAT_READ
] + rwstat
->cnt
[BLKG_RWSTAT_WRITE
];
588 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
591 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat
);
594 * blkg_prfill_stat - prfill callback for blkg_stat
595 * @sf: seq_file to print to
596 * @pd: policy private data of interest
597 * @off: offset to the blkg_stat in @pd
599 * prfill callback for printing a blkg_stat.
601 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
)
603 return __blkg_prfill_u64(sf
, pd
, blkg_stat_read((void *)pd
+ off
));
605 EXPORT_SYMBOL_GPL(blkg_prfill_stat
);
608 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
609 * @sf: seq_file to print to
610 * @pd: policy private data of interest
611 * @off: offset to the blkg_rwstat in @pd
613 * prfill callback for printing a blkg_rwstat.
615 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
618 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)pd
+ off
);
620 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
622 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat
);
625 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
626 * @pd: policy private data of interest
627 * @off: offset to the blkg_stat in @pd
629 * Collect the blkg_stat specified by @off from @pd and all its online
630 * descendants and return the sum. The caller must be holding the queue
631 * lock for online tests.
633 u64
blkg_stat_recursive_sum(struct blkg_policy_data
*pd
, int off
)
635 struct blkcg_policy
*pol
= blkcg_policy
[pd
->plid
];
636 struct blkcg_gq
*pos_blkg
;
637 struct cgroup_subsys_state
*pos_css
;
640 lockdep_assert_held(pd
->blkg
->q
->queue_lock
);
643 blkg_for_each_descendant_pre(pos_blkg
, pos_css
, pd_to_blkg(pd
)) {
644 struct blkg_policy_data
*pos_pd
= blkg_to_pd(pos_blkg
, pol
);
645 struct blkg_stat
*stat
= (void *)pos_pd
+ off
;
647 if (pos_blkg
->online
)
648 sum
+= blkg_stat_read(stat
);
654 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum
);
657 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
658 * @pd: policy private data of interest
659 * @off: offset to the blkg_stat in @pd
661 * Collect the blkg_rwstat specified by @off from @pd and all its online
662 * descendants and return the sum. The caller must be holding the queue
663 * lock for online tests.
665 struct blkg_rwstat
blkg_rwstat_recursive_sum(struct blkg_policy_data
*pd
,
668 struct blkcg_policy
*pol
= blkcg_policy
[pd
->plid
];
669 struct blkcg_gq
*pos_blkg
;
670 struct cgroup_subsys_state
*pos_css
;
671 struct blkg_rwstat sum
= { };
674 lockdep_assert_held(pd
->blkg
->q
->queue_lock
);
677 blkg_for_each_descendant_pre(pos_blkg
, pos_css
, pd_to_blkg(pd
)) {
678 struct blkg_policy_data
*pos_pd
= blkg_to_pd(pos_blkg
, pol
);
679 struct blkg_rwstat
*rwstat
= (void *)pos_pd
+ off
;
680 struct blkg_rwstat tmp
;
682 if (!pos_blkg
->online
)
685 tmp
= blkg_rwstat_read(rwstat
);
687 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
688 sum
.cnt
[i
] += tmp
.cnt
[i
];
694 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum
);
697 * blkg_conf_prep - parse and prepare for per-blkg config update
698 * @blkcg: target block cgroup
699 * @pol: target policy
700 * @input: input string
701 * @ctx: blkg_conf_ctx to be filled
703 * Parse per-blkg config update from @input and initialize @ctx with the
704 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
705 * value. This function returns with RCU read lock and queue lock held and
706 * must be paired with blkg_conf_finish().
708 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
709 const char *input
, struct blkg_conf_ctx
*ctx
)
710 __acquires(rcu
) __acquires(disk
->queue
->queue_lock
)
712 struct gendisk
*disk
;
713 struct blkcg_gq
*blkg
;
714 unsigned int major
, minor
;
715 unsigned long long v
;
718 if (sscanf(input
, "%u:%u %llu", &major
, &minor
, &v
) != 3)
721 disk
= get_gendisk(MKDEV(major
, minor
), &part
);
730 spin_lock_irq(disk
->queue
->queue_lock
);
732 if (blkcg_policy_enabled(disk
->queue
, pol
))
733 blkg
= blkg_lookup_create(blkcg
, disk
->queue
);
735 blkg
= ERR_PTR(-EINVAL
);
740 spin_unlock_irq(disk
->queue
->queue_lock
);
743 * If queue was bypassing, we should retry. Do so after a
744 * short msleep(). It isn't strictly necessary but queue
745 * can be bypassing for some time and it's always nice to
746 * avoid busy looping.
750 ret
= restart_syscall();
760 EXPORT_SYMBOL_GPL(blkg_conf_prep
);
763 * blkg_conf_finish - finish up per-blkg config update
764 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
766 * Finish up after per-blkg config update. This function must be paired
767 * with blkg_conf_prep().
769 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
)
770 __releases(ctx
->disk
->queue
->queue_lock
) __releases(rcu
)
772 spin_unlock_irq(ctx
->disk
->queue
->queue_lock
);
776 EXPORT_SYMBOL_GPL(blkg_conf_finish
);
778 struct cftype blkcg_files
[] = {
780 .name
= "reset_stats",
781 .write_u64
= blkcg_reset_stats
,
787 * blkcg_css_offline - cgroup css_offline callback
788 * @css: css of interest
790 * This function is called when @css is about to go away and responsible
791 * for shooting down all blkgs associated with @css. blkgs should be
792 * removed while holding both q and blkcg locks. As blkcg lock is nested
793 * inside q lock, this function performs reverse double lock dancing.
795 * This is the blkcg counterpart of ioc_release_fn().
797 static void blkcg_css_offline(struct cgroup_subsys_state
*css
)
799 struct blkcg
*blkcg
= css_to_blkcg(css
);
801 spin_lock_irq(&blkcg
->lock
);
803 while (!hlist_empty(&blkcg
->blkg_list
)) {
804 struct blkcg_gq
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
805 struct blkcg_gq
, blkcg_node
);
806 struct request_queue
*q
= blkg
->q
;
808 if (spin_trylock(q
->queue_lock
)) {
810 spin_unlock(q
->queue_lock
);
812 spin_unlock_irq(&blkcg
->lock
);
814 spin_lock_irq(&blkcg
->lock
);
818 spin_unlock_irq(&blkcg
->lock
);
820 wb_blkcg_offline(blkcg
);
823 static void blkcg_css_free(struct cgroup_subsys_state
*css
)
825 struct blkcg
*blkcg
= css_to_blkcg(css
);
827 mutex_lock(&blkcg_pol_mutex
);
828 list_del(&blkcg
->all_blkcgs_node
);
829 mutex_unlock(&blkcg_pol_mutex
);
831 if (blkcg
!= &blkcg_root
) {
834 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
840 static struct cgroup_subsys_state
*
841 blkcg_css_alloc(struct cgroup_subsys_state
*parent_css
)
844 struct cgroup_subsys_state
*ret
;
847 mutex_lock(&blkcg_pol_mutex
);
854 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
856 ret
= ERR_PTR(-ENOMEM
);
860 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
861 struct blkcg_policy
*pol
= blkcg_policy
[i
];
862 struct blkcg_policy_data
*cpd
;
865 * If the policy hasn't been attached yet, wait for it
866 * to be attached before doing anything else. Otherwise,
867 * check if the policy requires any specific per-cgroup
868 * data: if it does, allocate and initialize it.
870 if (!pol
|| !pol
->cpd_size
)
873 BUG_ON(blkcg
->pd
[i
]);
874 cpd
= kzalloc(pol
->cpd_size
, GFP_KERNEL
);
876 ret
= ERR_PTR(-ENOMEM
);
881 pol
->cpd_init_fn(blkcg
);
885 spin_lock_init(&blkcg
->lock
);
886 INIT_RADIX_TREE(&blkcg
->blkg_tree
, GFP_NOWAIT
);
887 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
888 #ifdef CONFIG_CGROUP_WRITEBACK
889 INIT_LIST_HEAD(&blkcg
->cgwb_list
);
891 list_add_tail(&blkcg
->all_blkcgs_node
, &all_blkcgs
);
893 mutex_unlock(&blkcg_pol_mutex
);
897 for (i
--; i
>= 0; i
--)
901 mutex_unlock(&blkcg_pol_mutex
);
906 * blkcg_init_queue - initialize blkcg part of request queue
907 * @q: request_queue to initialize
909 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
910 * part of new request_queue @q.
913 * 0 on success, -errno on failure.
915 int blkcg_init_queue(struct request_queue
*q
)
917 struct blkcg_gq
*new_blkg
, *blkg
;
921 new_blkg
= blkg_alloc(&blkcg_root
, q
, GFP_KERNEL
);
925 preloaded
= !radix_tree_preload(GFP_KERNEL
);
928 * Make sure the root blkg exists and count the existing blkgs. As
929 * @q is bypassing at this point, blkg_lookup_create() can't be
930 * used. Open code insertion.
933 spin_lock_irq(q
->queue_lock
);
934 blkg
= blkg_create(&blkcg_root
, q
, new_blkg
);
935 spin_unlock_irq(q
->queue_lock
);
939 radix_tree_preload_end();
943 return PTR_ERR(blkg
);
947 q
->root_rl
.blkg
= blkg
;
949 ret
= blk_throtl_init(q
);
951 spin_lock_irq(q
->queue_lock
);
953 spin_unlock_irq(q
->queue_lock
);
959 * blkcg_drain_queue - drain blkcg part of request_queue
960 * @q: request_queue to drain
962 * Called from blk_drain_queue(). Responsible for draining blkcg part.
964 void blkcg_drain_queue(struct request_queue
*q
)
966 lockdep_assert_held(q
->queue_lock
);
969 * @q could be exiting and already have destroyed all blkgs as
970 * indicated by NULL root_blkg. If so, don't confuse policies.
979 * blkcg_exit_queue - exit and release blkcg part of request_queue
980 * @q: request_queue being released
982 * Called from blk_release_queue(). Responsible for exiting blkcg part.
984 void blkcg_exit_queue(struct request_queue
*q
)
986 spin_lock_irq(q
->queue_lock
);
988 spin_unlock_irq(q
->queue_lock
);
994 * We cannot support shared io contexts, as we have no mean to support
995 * two tasks with the same ioc in two different groups without major rework
996 * of the main cic data structures. For now we allow a task to change
997 * its cgroup only if it's the only owner of its ioc.
999 static int blkcg_can_attach(struct cgroup_subsys_state
*css
,
1000 struct cgroup_taskset
*tset
)
1002 struct task_struct
*task
;
1003 struct io_context
*ioc
;
1006 /* task_lock() is needed to avoid races with exit_io_context() */
1007 cgroup_taskset_for_each(task
, tset
) {
1009 ioc
= task
->io_context
;
1010 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1019 struct cgroup_subsys blkio_cgrp_subsys
= {
1020 .css_alloc
= blkcg_css_alloc
,
1021 .css_offline
= blkcg_css_offline
,
1022 .css_free
= blkcg_css_free
,
1023 .can_attach
= blkcg_can_attach
,
1024 .legacy_cftypes
= blkcg_files
,
1027 * This ensures that, if available, memcg is automatically enabled
1028 * together on the default hierarchy so that the owner cgroup can
1029 * be retrieved from writeback pages.
1031 .depends_on
= 1 << memory_cgrp_id
,
1034 EXPORT_SYMBOL_GPL(blkio_cgrp_subsys
);
1037 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1038 * @q: request_queue of interest
1039 * @pol: blkcg policy to activate
1041 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1042 * bypass mode to populate its blkgs with policy_data for @pol.
1044 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1045 * from IO path. Update of each blkg is protected by both queue and blkcg
1046 * locks so that holding either lock and testing blkcg_policy_enabled() is
1047 * always enough for dereferencing policy data.
1049 * The caller is responsible for synchronizing [de]activations and policy
1050 * [un]registerations. Returns 0 on success, -errno on failure.
1052 int blkcg_activate_policy(struct request_queue
*q
,
1053 const struct blkcg_policy
*pol
)
1056 struct blkcg_gq
*blkg
;
1057 struct blkg_policy_data
*pd
, *nd
;
1060 if (blkcg_policy_enabled(q
, pol
))
1063 /* count and allocate policy_data for all existing blkgs */
1064 blk_queue_bypass_start(q
);
1065 spin_lock_irq(q
->queue_lock
);
1066 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
)
1068 spin_unlock_irq(q
->queue_lock
);
1070 /* allocate per-blkg policy data for all existing blkgs */
1072 pd
= kzalloc_node(pol
->pd_size
, GFP_KERNEL
, q
->node
);
1077 list_add_tail(&pd
->alloc_node
, &pds
);
1081 * Install the allocated pds and cpds. With @q bypassing, no new blkg
1082 * should have been created while the queue lock was dropped.
1084 spin_lock_irq(q
->queue_lock
);
1086 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
1087 if (WARN_ON(list_empty(&pds
))) {
1088 /* umm... this shouldn't happen, just abort */
1092 pd
= list_first_entry(&pds
, struct blkg_policy_data
, alloc_node
);
1093 list_del_init(&pd
->alloc_node
);
1095 /* grab blkcg lock too while installing @pd on @blkg */
1096 spin_lock(&blkg
->blkcg
->lock
);
1098 blkg
->pd
[pol
->plid
] = pd
;
1100 pd
->plid
= pol
->plid
;
1101 pol
->pd_init_fn(blkg
);
1103 spin_unlock(&blkg
->blkcg
->lock
);
1106 __set_bit(pol
->plid
, q
->blkcg_pols
);
1109 spin_unlock_irq(q
->queue_lock
);
1111 blk_queue_bypass_end(q
);
1112 list_for_each_entry_safe(pd
, nd
, &pds
, alloc_node
)
1116 EXPORT_SYMBOL_GPL(blkcg_activate_policy
);
1119 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1120 * @q: request_queue of interest
1121 * @pol: blkcg policy to deactivate
1123 * Deactivate @pol on @q. Follows the same synchronization rules as
1124 * blkcg_activate_policy().
1126 void blkcg_deactivate_policy(struct request_queue
*q
,
1127 const struct blkcg_policy
*pol
)
1129 struct blkcg_gq
*blkg
;
1131 if (!blkcg_policy_enabled(q
, pol
))
1134 blk_queue_bypass_start(q
);
1135 spin_lock_irq(q
->queue_lock
);
1137 __clear_bit(pol
->plid
, q
->blkcg_pols
);
1139 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
1140 /* grab blkcg lock too while removing @pd from @blkg */
1141 spin_lock(&blkg
->blkcg
->lock
);
1143 if (pol
->pd_offline_fn
)
1144 pol
->pd_offline_fn(blkg
);
1145 if (pol
->pd_exit_fn
)
1146 pol
->pd_exit_fn(blkg
);
1148 kfree(blkg
->pd
[pol
->plid
]);
1149 blkg
->pd
[pol
->plid
] = NULL
;
1151 spin_unlock(&blkg
->blkcg
->lock
);
1154 spin_unlock_irq(q
->queue_lock
);
1155 blk_queue_bypass_end(q
);
1157 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy
);
1160 * blkcg_policy_register - register a blkcg policy
1161 * @pol: blkcg policy to register
1163 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1164 * successful registration. Returns 0 on success and -errno on failure.
1166 int blkcg_policy_register(struct blkcg_policy
*pol
)
1168 struct blkcg
*blkcg
;
1171 if (WARN_ON(pol
->pd_size
< sizeof(struct blkg_policy_data
)))
1174 mutex_lock(&blkcg_pol_register_mutex
);
1175 mutex_lock(&blkcg_pol_mutex
);
1177 /* find an empty slot */
1179 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
1180 if (!blkcg_policy
[i
])
1182 if (i
>= BLKCG_MAX_POLS
)
1187 blkcg_policy
[pol
->plid
] = pol
;
1189 /* allocate and install cpd's */
1190 if (pol
->cpd_size
) {
1191 list_for_each_entry(blkcg
, &all_blkcgs
, all_blkcgs_node
) {
1192 struct blkcg_policy_data
*cpd
;
1194 cpd
= kzalloc(pol
->cpd_size
, GFP_KERNEL
);
1196 mutex_unlock(&blkcg_pol_mutex
);
1200 blkcg
->pd
[pol
->plid
] = cpd
;
1201 cpd
->plid
= pol
->plid
;
1202 pol
->cpd_init_fn(blkcg
);
1206 mutex_unlock(&blkcg_pol_mutex
);
1208 /* everything is in place, add intf files for the new policy */
1210 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys
,
1212 mutex_unlock(&blkcg_pol_register_mutex
);
1216 if (pol
->cpd_size
) {
1217 list_for_each_entry(blkcg
, &all_blkcgs
, all_blkcgs_node
) {
1218 kfree(blkcg
->pd
[pol
->plid
]);
1219 blkcg
->pd
[pol
->plid
] = NULL
;
1222 blkcg_policy
[pol
->plid
] = NULL
;
1224 mutex_unlock(&blkcg_pol_mutex
);
1225 mutex_unlock(&blkcg_pol_register_mutex
);
1228 EXPORT_SYMBOL_GPL(blkcg_policy_register
);
1231 * blkcg_policy_unregister - unregister a blkcg policy
1232 * @pol: blkcg policy to unregister
1234 * Undo blkcg_policy_register(@pol). Might sleep.
1236 void blkcg_policy_unregister(struct blkcg_policy
*pol
)
1238 struct blkcg
*blkcg
;
1240 mutex_lock(&blkcg_pol_register_mutex
);
1242 if (WARN_ON(blkcg_policy
[pol
->plid
] != pol
))
1245 /* kill the intf files first */
1247 cgroup_rm_cftypes(pol
->cftypes
);
1249 /* remove cpds and unregister */
1250 mutex_lock(&blkcg_pol_mutex
);
1252 if (pol
->cpd_size
) {
1253 list_for_each_entry(blkcg
, &all_blkcgs
, all_blkcgs_node
) {
1254 kfree(blkcg
->pd
[pol
->plid
]);
1255 blkcg
->pd
[pol
->plid
] = NULL
;
1258 blkcg_policy
[pol
->plid
] = NULL
;
1260 mutex_unlock(&blkcg_pol_mutex
);
1262 mutex_unlock(&blkcg_pol_register_mutex
);
1264 EXPORT_SYMBOL_GPL(blkcg_policy_unregister
);