2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
16 #include <linux/err.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/genhd.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_SPINLOCK(blkio_list_lock
);
28 static LIST_HEAD(blkio_list
);
30 static DEFINE_MUTEX(all_q_mutex
);
31 static LIST_HEAD(all_q_list
);
33 /* List of groups pending per cpu stats allocation */
34 static DEFINE_SPINLOCK(alloc_list_lock
);
35 static LIST_HEAD(alloc_list
);
37 static void blkio_stat_alloc_fn(struct work_struct
*);
38 static DECLARE_DELAYED_WORK(blkio_stat_alloc_work
, blkio_stat_alloc_fn
);
40 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
41 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
43 static struct blkio_policy_type
*blkio_policy
[BLKIO_NR_POLICIES
];
45 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
47 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
48 struct blkio_cgroup
, css
);
50 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
52 static struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
54 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
55 struct blkio_cgroup
, css
);
58 struct blkio_cgroup
*bio_blkio_cgroup(struct bio
*bio
)
60 if (bio
&& bio
->bi_css
)
61 return container_of(bio
->bi_css
, struct blkio_cgroup
, css
);
62 return task_blkio_cgroup(current
);
64 EXPORT_SYMBOL_GPL(bio_blkio_cgroup
);
67 * Worker for allocating per cpu stat for blk groups. This is scheduled on
68 * the system_nrt_wq once there are some groups on the alloc_list waiting
71 static void blkio_stat_alloc_fn(struct work_struct
*work
)
73 static void *pcpu_stats
[BLKIO_NR_POLICIES
];
74 struct delayed_work
*dwork
= to_delayed_work(work
);
75 struct blkio_group
*blkg
;
80 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
81 if (pcpu_stats
[i
] != NULL
)
84 pcpu_stats
[i
] = alloc_percpu(struct blkio_group_stats_cpu
);
86 /* Allocation failed. Try again after some time. */
87 if (pcpu_stats
[i
] == NULL
) {
88 queue_delayed_work(system_nrt_wq
, dwork
,
89 msecs_to_jiffies(10));
94 spin_lock_irq(&blkio_list_lock
);
95 spin_lock(&alloc_list_lock
);
97 /* cgroup got deleted or queue exited. */
98 if (!list_empty(&alloc_list
)) {
99 blkg
= list_first_entry(&alloc_list
, struct blkio_group
,
101 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
102 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
104 if (blkio_policy
[i
] && pd
&& !pd
->stats_cpu
)
105 swap(pd
->stats_cpu
, pcpu_stats
[i
]);
108 list_del_init(&blkg
->alloc_node
);
111 empty
= list_empty(&alloc_list
);
113 spin_unlock(&alloc_list_lock
);
114 spin_unlock_irq(&blkio_list_lock
);
121 * blkg_free - free a blkg
122 * @blkg: blkg to free
124 * Free @blkg which may be partially allocated.
126 static void blkg_free(struct blkio_group
*blkg
)
133 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
134 struct blkio_policy_type
*pol
= blkio_policy
[i
];
135 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
140 if (pol
&& pol
->ops
.blkio_exit_group_fn
)
141 pol
->ops
.blkio_exit_group_fn(blkg
);
143 free_percpu(pd
->stats_cpu
);
151 * blkg_alloc - allocate a blkg
152 * @blkcg: block cgroup the new blkg is associated with
153 * @q: request_queue the new blkg is associated with
155 * Allocate a new blkg assocating @blkcg and @q.
157 static struct blkio_group
*blkg_alloc(struct blkio_cgroup
*blkcg
,
158 struct request_queue
*q
)
160 struct blkio_group
*blkg
;
163 /* alloc and init base part */
164 blkg
= kzalloc_node(sizeof(*blkg
), GFP_ATOMIC
, q
->node
);
169 INIT_LIST_HEAD(&blkg
->q_node
);
170 INIT_LIST_HEAD(&blkg
->alloc_node
);
173 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
175 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
176 struct blkio_policy_type
*pol
= blkio_policy
[i
];
177 struct blkg_policy_data
*pd
;
182 /* alloc per-policy data and attach it to blkg */
183 pd
= kzalloc_node(sizeof(*pd
) + pol
->pdata_size
, GFP_ATOMIC
,
194 /* invoke per-policy init */
195 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
196 struct blkio_policy_type
*pol
= blkio_policy
[i
];
199 pol
->ops
.blkio_init_group_fn(blkg
);
205 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
206 struct request_queue
*q
,
208 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
210 struct blkio_group
*blkg
;
212 WARN_ON_ONCE(!rcu_read_lock_held());
213 lockdep_assert_held(q
->queue_lock
);
216 * This could be the first entry point of blkcg implementation and
217 * we shouldn't allow anything to go through for a bypassing queue.
218 * The following can be removed if blkg lookup is guaranteed to
219 * fail on a bypassing queue.
221 if (unlikely(blk_queue_bypass(q
)) && !for_root
)
222 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
224 blkg
= blkg_lookup(blkcg
, q
);
228 /* blkg holds a reference to blkcg */
229 if (!css_tryget(&blkcg
->css
))
230 return ERR_PTR(-EINVAL
);
233 * Allocate and initialize.
235 blkg
= blkg_alloc(blkcg
, q
);
237 /* did alloc fail? */
238 if (unlikely(!blkg
)) {
239 blkg
= ERR_PTR(-ENOMEM
);
244 spin_lock(&blkcg
->lock
);
245 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
246 list_add(&blkg
->q_node
, &q
->blkg_list
);
247 spin_unlock(&blkcg
->lock
);
249 spin_lock(&alloc_list_lock
);
250 list_add(&blkg
->alloc_node
, &alloc_list
);
251 /* Queue per cpu stat allocation from worker thread. */
252 queue_delayed_work(system_nrt_wq
, &blkio_stat_alloc_work
, 0);
253 spin_unlock(&alloc_list_lock
);
257 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
259 /* called under rcu_read_lock(). */
260 struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
261 struct request_queue
*q
)
263 struct blkio_group
*blkg
;
264 struct hlist_node
*n
;
266 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
271 EXPORT_SYMBOL_GPL(blkg_lookup
);
273 static void blkg_destroy(struct blkio_group
*blkg
)
275 struct request_queue
*q
= blkg
->q
;
276 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
278 lockdep_assert_held(q
->queue_lock
);
279 lockdep_assert_held(&blkcg
->lock
);
281 /* Something wrong if we are trying to remove same group twice */
282 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
283 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
284 list_del_init(&blkg
->q_node
);
285 hlist_del_init_rcu(&blkg
->blkcg_node
);
287 spin_lock(&alloc_list_lock
);
288 list_del_init(&blkg
->alloc_node
);
289 spin_unlock(&alloc_list_lock
);
292 * Put the reference taken at the time of creation so that when all
293 * queues are gone, group can be destroyed.
299 * XXX: This updates blkg policy data in-place for root blkg, which is
300 * necessary across elevator switch and policy registration as root blkgs
301 * aren't shot down. This broken and racy implementation is temporary.
302 * Eventually, blkg shoot down will be replaced by proper in-place update.
304 void update_root_blkg_pd(struct request_queue
*q
, enum blkio_policy_id plid
)
306 struct blkio_policy_type
*pol
= blkio_policy
[plid
];
307 struct blkio_group
*blkg
= blkg_lookup(&blkio_root_cgroup
, q
);
308 struct blkg_policy_data
*pd
;
313 kfree(blkg
->pd
[plid
]);
314 blkg
->pd
[plid
] = NULL
;
319 pd
= kzalloc(sizeof(*pd
) + pol
->pdata_size
, GFP_KERNEL
);
322 pd
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
323 WARN_ON_ONCE(!pd
->stats_cpu
);
327 pol
->ops
.blkio_init_group_fn(blkg
);
329 EXPORT_SYMBOL_GPL(update_root_blkg_pd
);
332 * blkg_destroy_all - destroy all blkgs associated with a request_queue
333 * @q: request_queue of interest
334 * @destroy_root: whether to destroy root blkg or not
336 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
337 * destroyed; otherwise, root blkg is left alone.
339 void blkg_destroy_all(struct request_queue
*q
, bool destroy_root
)
341 struct blkio_group
*blkg
, *n
;
343 spin_lock_irq(q
->queue_lock
);
345 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
346 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
349 if (!destroy_root
&& blkg
->blkcg
== &blkio_root_cgroup
)
352 spin_lock(&blkcg
->lock
);
354 spin_unlock(&blkcg
->lock
);
357 spin_unlock_irq(q
->queue_lock
);
359 EXPORT_SYMBOL_GPL(blkg_destroy_all
);
361 static void blkg_rcu_free(struct rcu_head
*rcu_head
)
363 blkg_free(container_of(rcu_head
, struct blkio_group
, rcu_head
));
366 void __blkg_release(struct blkio_group
*blkg
)
368 /* release the extra blkcg reference this blkg has been holding */
369 css_put(&blkg
->blkcg
->css
);
372 * A group is freed in rcu manner. But having an rcu lock does not
373 * mean that one can access all the fields of blkg and assume these
374 * are valid. For example, don't try to follow throtl_data and
375 * request queue links.
377 * Having a reference to blkg under an rcu allows acess to only
378 * values local to groups like group stats and group rate limits
380 call_rcu(&blkg
->rcu_head
, blkg_rcu_free
);
382 EXPORT_SYMBOL_GPL(__blkg_release
);
384 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
, int plid
)
386 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
389 if (pd
->stats_cpu
== NULL
)
392 for_each_possible_cpu(cpu
) {
393 struct blkio_group_stats_cpu
*sc
=
394 per_cpu_ptr(pd
->stats_cpu
, cpu
);
396 blkg_rwstat_reset(&sc
->service_bytes
);
397 blkg_rwstat_reset(&sc
->serviced
);
402 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
404 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
405 struct blkio_group
*blkg
;
406 struct hlist_node
*n
;
408 spin_lock(&blkio_list_lock
);
409 spin_lock_irq(&blkcg
->lock
);
412 * Note that stat reset is racy - it doesn't synchronize against
413 * stat updates. This is a debug feature which shouldn't exist
414 * anyway. If you get hit by a race, retry.
416 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
417 struct blkio_policy_type
*pol
;
419 list_for_each_entry(pol
, &blkio_list
, list
) {
420 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
421 struct blkio_group_stats
*stats
= &pd
->stats
;
423 /* queued stats shouldn't be cleared */
424 blkg_rwstat_reset(&stats
->service_bytes
);
425 blkg_rwstat_reset(&stats
->serviced
);
426 blkg_rwstat_reset(&stats
->merged
);
427 blkg_rwstat_reset(&stats
->service_time
);
428 blkg_rwstat_reset(&stats
->wait_time
);
429 blkg_stat_reset(&stats
->time
);
430 #ifdef CONFIG_DEBUG_BLK_CGROUP
431 blkg_stat_reset(&stats
->unaccounted_time
);
432 blkg_stat_reset(&stats
->avg_queue_size_sum
);
433 blkg_stat_reset(&stats
->avg_queue_size_samples
);
434 blkg_stat_reset(&stats
->dequeue
);
435 blkg_stat_reset(&stats
->group_wait_time
);
436 blkg_stat_reset(&stats
->idle_time
);
437 blkg_stat_reset(&stats
->empty_time
);
439 blkio_reset_stats_cpu(blkg
, pol
->plid
);
441 if (pol
->ops
.blkio_reset_group_stats_fn
)
442 pol
->ops
.blkio_reset_group_stats_fn(blkg
);
446 spin_unlock_irq(&blkcg
->lock
);
447 spin_unlock(&blkio_list_lock
);
451 static const char *blkg_dev_name(struct blkio_group
*blkg
)
453 /* some drivers (floppy) instantiate a queue w/o disk registered */
454 if (blkg
->q
->backing_dev_info
.dev
)
455 return dev_name(blkg
->q
->backing_dev_info
.dev
);
460 * blkcg_print_blkgs - helper for printing per-blkg data
461 * @sf: seq_file to print to
462 * @blkcg: blkcg of interest
463 * @prfill: fill function to print out a blkg
464 * @pol: policy in question
465 * @data: data to be passed to @prfill
466 * @show_total: to print out sum of prfill return values or not
468 * This function invokes @prfill on each blkg of @blkcg if pd for the
469 * policy specified by @pol exists. @prfill is invoked with @sf, the
470 * policy data and @data. If @show_total is %true, the sum of the return
471 * values from @prfill is printed with "Total" label at the end.
473 * This is to be used to construct print functions for
474 * cftype->read_seq_string method.
476 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkio_cgroup
*blkcg
,
477 u64 (*prfill
)(struct seq_file
*, struct blkg_policy_data
*, int),
478 int pol
, int data
, bool show_total
)
480 struct blkio_group
*blkg
;
481 struct hlist_node
*n
;
484 spin_lock_irq(&blkcg
->lock
);
485 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
487 total
+= prfill(sf
, blkg
->pd
[pol
], data
);
488 spin_unlock_irq(&blkcg
->lock
);
491 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
493 EXPORT_SYMBOL_GPL(blkcg_print_blkgs
);
496 * __blkg_prfill_u64 - prfill helper for a single u64 value
497 * @sf: seq_file to print to
498 * @pd: policy data of interest
501 * Print @v to @sf for the device assocaited with @pd.
503 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
)
505 const char *dname
= blkg_dev_name(pd
->blkg
);
510 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
513 EXPORT_SYMBOL_GPL(__blkg_prfill_u64
);
516 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
517 * @sf: seq_file to print to
518 * @pd: policy data of interest
519 * @rwstat: rwstat to print
521 * Print @rwstat to @sf for the device assocaited with @pd.
523 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
524 const struct blkg_rwstat
*rwstat
)
526 static const char *rwstr
[] = {
527 [BLKG_RWSTAT_READ
] = "Read",
528 [BLKG_RWSTAT_WRITE
] = "Write",
529 [BLKG_RWSTAT_SYNC
] = "Sync",
530 [BLKG_RWSTAT_ASYNC
] = "Async",
532 const char *dname
= blkg_dev_name(pd
->blkg
);
539 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
540 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
541 (unsigned long long)rwstat
->cnt
[i
]);
543 v
= rwstat
->cnt
[BLKG_RWSTAT_READ
] + rwstat
->cnt
[BLKG_RWSTAT_WRITE
];
544 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
548 static u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
551 return __blkg_prfill_u64(sf
, pd
,
552 blkg_stat_read((void *)&pd
->stats
+ off
));
555 static u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
558 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)&pd
->stats
+ off
);
560 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
563 /* print blkg_stat specified by BLKCG_STAT_PRIV() */
564 int blkcg_print_stat(struct cgroup
*cgrp
, struct cftype
*cft
,
567 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
569 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_stat
,
570 BLKCG_STAT_POL(cft
->private),
571 BLKCG_STAT_OFF(cft
->private), false);
574 EXPORT_SYMBOL_GPL(blkcg_print_stat
);
576 /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
577 int blkcg_print_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
580 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
582 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_rwstat
,
583 BLKCG_STAT_POL(cft
->private),
584 BLKCG_STAT_OFF(cft
->private), true);
587 EXPORT_SYMBOL_GPL(blkcg_print_rwstat
);
590 * blkg_conf_prep - parse and prepare for per-blkg config update
591 * @blkcg: target block cgroup
592 * @input: input string
593 * @ctx: blkg_conf_ctx to be filled
595 * Parse per-blkg config update from @input and initialize @ctx with the
596 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
597 * value. This function returns with RCU read locked and must be paired
598 * with blkg_conf_finish().
600 int blkg_conf_prep(struct blkio_cgroup
*blkcg
, const char *input
,
601 struct blkg_conf_ctx
*ctx
)
604 struct gendisk
*disk
;
605 struct blkio_group
*blkg
;
606 unsigned int major
, minor
;
607 unsigned long long v
;
610 if (sscanf(input
, "%u:%u %llu", &major
, &minor
, &v
) != 3)
613 disk
= get_gendisk(MKDEV(major
, minor
), &part
);
619 spin_lock_irq(disk
->queue
->queue_lock
);
620 blkg
= blkg_lookup_create(blkcg
, disk
->queue
, false);
621 spin_unlock_irq(disk
->queue
->queue_lock
);
628 * If queue was bypassing, we should retry. Do so after a
629 * short msleep(). It isn't strictly necessary but queue
630 * can be bypassing for some time and it's always nice to
631 * avoid busy looping.
635 ret
= restart_syscall();
645 EXPORT_SYMBOL_GPL(blkg_conf_prep
);
648 * blkg_conf_finish - finish up per-blkg config update
649 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
651 * Finish up after per-blkg config update. This function must be paired
652 * with blkg_conf_prep().
654 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
)
660 EXPORT_SYMBOL_GPL(blkg_conf_finish
);
662 struct cftype blkio_files
[] = {
664 .name
= "reset_stats",
665 .write_u64
= blkiocg_reset_stats
,
671 * blkiocg_pre_destroy - cgroup pre_destroy callback
672 * @cgroup: cgroup of interest
674 * This function is called when @cgroup is about to go away and responsible
675 * for shooting down all blkgs associated with @cgroup. blkgs should be
676 * removed while holding both q and blkcg locks. As blkcg lock is nested
677 * inside q lock, this function performs reverse double lock dancing.
679 * This is the blkcg counterpart of ioc_release_fn().
681 static int blkiocg_pre_destroy(struct cgroup
*cgroup
)
683 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
685 spin_lock_irq(&blkcg
->lock
);
687 while (!hlist_empty(&blkcg
->blkg_list
)) {
688 struct blkio_group
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
689 struct blkio_group
, blkcg_node
);
690 struct request_queue
*q
= blkg
->q
;
692 if (spin_trylock(q
->queue_lock
)) {
694 spin_unlock(q
->queue_lock
);
696 spin_unlock_irq(&blkcg
->lock
);
698 spin_lock_irq(&blkcg
->lock
);
702 spin_unlock_irq(&blkcg
->lock
);
706 static void blkiocg_destroy(struct cgroup
*cgroup
)
708 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
710 if (blkcg
!= &blkio_root_cgroup
)
714 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup
*cgroup
)
716 static atomic64_t id_seq
= ATOMIC64_INIT(0);
717 struct blkio_cgroup
*blkcg
;
718 struct cgroup
*parent
= cgroup
->parent
;
721 blkcg
= &blkio_root_cgroup
;
725 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
727 return ERR_PTR(-ENOMEM
);
729 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
730 blkcg
->id
= atomic64_inc_return(&id_seq
); /* root is 0, start from 1 */
732 spin_lock_init(&blkcg
->lock
);
733 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
739 * blkcg_init_queue - initialize blkcg part of request queue
740 * @q: request_queue to initialize
742 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
743 * part of new request_queue @q.
746 * 0 on success, -errno on failure.
748 int blkcg_init_queue(struct request_queue
*q
)
754 ret
= blk_throtl_init(q
);
758 mutex_lock(&all_q_mutex
);
759 INIT_LIST_HEAD(&q
->all_q_node
);
760 list_add_tail(&q
->all_q_node
, &all_q_list
);
761 mutex_unlock(&all_q_mutex
);
767 * blkcg_drain_queue - drain blkcg part of request_queue
768 * @q: request_queue to drain
770 * Called from blk_drain_queue(). Responsible for draining blkcg part.
772 void blkcg_drain_queue(struct request_queue
*q
)
774 lockdep_assert_held(q
->queue_lock
);
780 * blkcg_exit_queue - exit and release blkcg part of request_queue
781 * @q: request_queue being released
783 * Called from blk_release_queue(). Responsible for exiting blkcg part.
785 void blkcg_exit_queue(struct request_queue
*q
)
787 mutex_lock(&all_q_mutex
);
788 list_del_init(&q
->all_q_node
);
789 mutex_unlock(&all_q_mutex
);
791 blkg_destroy_all(q
, true);
797 * We cannot support shared io contexts, as we have no mean to support
798 * two tasks with the same ioc in two different groups without major rework
799 * of the main cic data structures. For now we allow a task to change
800 * its cgroup only if it's the only owner of its ioc.
802 static int blkiocg_can_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
804 struct task_struct
*task
;
805 struct io_context
*ioc
;
808 /* task_lock() is needed to avoid races with exit_io_context() */
809 cgroup_taskset_for_each(task
, cgrp
, tset
) {
811 ioc
= task
->io_context
;
812 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
821 static void blkcg_bypass_start(void)
822 __acquires(&all_q_mutex
)
824 struct request_queue
*q
;
826 mutex_lock(&all_q_mutex
);
828 list_for_each_entry(q
, &all_q_list
, all_q_node
) {
829 blk_queue_bypass_start(q
);
830 blkg_destroy_all(q
, false);
834 static void blkcg_bypass_end(void)
835 __releases(&all_q_mutex
)
837 struct request_queue
*q
;
839 list_for_each_entry(q
, &all_q_list
, all_q_node
)
840 blk_queue_bypass_end(q
);
842 mutex_unlock(&all_q_mutex
);
845 struct cgroup_subsys blkio_subsys
= {
847 .create
= blkiocg_create
,
848 .can_attach
= blkiocg_can_attach
,
849 .pre_destroy
= blkiocg_pre_destroy
,
850 .destroy
= blkiocg_destroy
,
851 .subsys_id
= blkio_subsys_id
,
852 .base_cftypes
= blkio_files
,
853 .module
= THIS_MODULE
,
855 EXPORT_SYMBOL_GPL(blkio_subsys
);
857 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
859 struct request_queue
*q
;
861 blkcg_bypass_start();
862 spin_lock(&blkio_list_lock
);
864 BUG_ON(blkio_policy
[blkiop
->plid
]);
865 blkio_policy
[blkiop
->plid
] = blkiop
;
866 list_add_tail(&blkiop
->list
, &blkio_list
);
868 spin_unlock(&blkio_list_lock
);
869 list_for_each_entry(q
, &all_q_list
, all_q_node
)
870 update_root_blkg_pd(q
, blkiop
->plid
);
874 WARN_ON(cgroup_add_cftypes(&blkio_subsys
, blkiop
->cftypes
));
876 EXPORT_SYMBOL_GPL(blkio_policy_register
);
878 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
880 struct request_queue
*q
;
883 cgroup_rm_cftypes(&blkio_subsys
, blkiop
->cftypes
);
885 blkcg_bypass_start();
886 spin_lock(&blkio_list_lock
);
888 BUG_ON(blkio_policy
[blkiop
->plid
] != blkiop
);
889 blkio_policy
[blkiop
->plid
] = NULL
;
890 list_del_init(&blkiop
->list
);
892 spin_unlock(&blkio_list_lock
);
893 list_for_each_entry(q
, &all_q_list
, all_q_node
)
894 update_root_blkg_pd(q
, blkiop
->plid
);
897 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);