]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
blkcg: add request_queue->root_blkg
authorTejun Heo <tj@kernel.org>
Fri, 13 Apr 2012 20:11:32 +0000 (13:11 -0700)
committerJens Axboe <axboe@kernel.dk>
Fri, 20 Apr 2012 08:06:06 +0000 (10:06 +0200)
With per-queue policy activation, root blkg creation will be moved to
blkcg core.  Add q->root_blkg in preparation.  For blk-throtl, this
replaces throtl_data->root_tg; however, cfq needs to keep
cfqd->root_group for !CONFIG_CFQ_GROUP_IOSCHED.

This is to prepare for per-queue policy activation and doesn't cause
any functional difference.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-throttle.c
block/cfq-iosched.c
include/linux/blkdev.h

index 6f1bfdf9a1b75dad2390ad23057eaa7ad63fb39f..8c520fad6885411465ba0ce0e02389fef6ffed42 100644 (file)
@@ -97,7 +97,6 @@ struct throtl_data
        /* service tree for active throtl groups */
        struct throtl_rb_root tg_service_tree;
 
-       struct throtl_grp *root_tg;
        struct request_queue *queue;
 
        /* Total Number of queued bios on READ and WRITE lists */
@@ -131,6 +130,11 @@ static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg)
        return pdata_to_blkg(tg);
 }
 
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
+{
+       return blkg_to_tg(td->queue->root_blkg);
+}
+
 enum tg_state_flags {
        THROTL_TG_FLAG_on_rr = 0,       /* on round-robin busy list */
 };
@@ -261,7 +265,7 @@ throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
         * Avoid lookup in this case
         */
        if (blkcg == &blkio_root_cgroup)
-               return td->root_tg;
+               return td_root_tg(td);
 
        return blkg_to_tg(blkg_lookup(blkcg, td->queue));
 }
@@ -277,7 +281,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
         * Avoid lookup in this case
         */
        if (blkcg == &blkio_root_cgroup) {
-               tg = td->root_tg;
+               tg = td_root_tg(td);
        } else {
                struct blkio_group *blkg;
 
@@ -287,7 +291,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
                if (!IS_ERR(blkg))
                        tg = blkg_to_tg(blkg);
                else if (!blk_queue_dead(q))
-                       tg = td->root_tg;
+                       tg = td_root_tg(td);
        }
 
        return tg;
@@ -1245,12 +1249,12 @@ int blk_throtl_init(struct request_queue *q)
 
        blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
        if (!IS_ERR(blkg))
-               td->root_tg = blkg_to_tg(blkg);
+               q->root_blkg = blkg;
 
        spin_unlock_irq(q->queue_lock);
        rcu_read_unlock();
 
-       if (!td->root_tg) {
+       if (!q->root_blkg) {
                kfree(td);
                return -ENOMEM;
        }
index de95f9a2acf89050c14a37dff113d98bc76b7a0c..86440e04f3ee72e8e4ce854411492c359f1ae847 100644 (file)
@@ -3964,8 +3964,10 @@ static int cfq_init_queue(struct request_queue *q)
        spin_lock_irq(q->queue_lock);
 
        blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
-       if (!IS_ERR(blkg))
+       if (!IS_ERR(blkg)) {
+               q->root_blkg = blkg;
                cfqd->root_group = blkg_to_cfqg(blkg);
+       }
 
        spin_unlock_irq(q->queue_lock);
        rcu_read_unlock();
index d2c69f8c188af50b34a4b49de8073bafd2986242..b01c377fd73980e2b38c5b1a37c1a7c17536ff33 100644 (file)
@@ -31,6 +31,7 @@ struct blk_trace;
 struct request;
 struct sg_io_hdr;
 struct bsg_job;
+struct blkio_group;
 
 #define BLKDEV_MIN_RQ  4
 #define BLKDEV_MAX_RQ  128     /* Default maximum */
@@ -369,6 +370,7 @@ struct request_queue {
 
        struct list_head        icq_list;
 #ifdef CONFIG_BLK_CGROUP
+       struct blkio_group      *root_blkg;
        struct list_head        blkg_list;
 #endif