]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/cfq-iosched.c
block, cfq: misc updates to cfq_io_context
[mirror_ubuntu-bionic-kernel.git] / block / cfq-iosched.c
index 16ace89613bc6e4ce343cdd2f1da85218b7dc5e7..a612ca65f3715a9795c2c75759f05d4db52df071 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/rbtree.h>
 #include <linux/ioprio.h>
 #include <linux/blktrace_api.h>
+#include "blk.h"
 #include "cfq.h"
 
 /*
@@ -65,9 +66,6 @@ static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
 static struct completion *ioc_gone;
 static DEFINE_SPINLOCK(ioc_gone_lock);
 
-static DEFINE_SPINLOCK(cic_index_lock);
-static DEFINE_IDA(cic_index_ida);
-
 #define CFQ_PRIO_LISTS         IOPRIO_BE_NR
 #define cfq_class_idle(cfqq)   ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
 #define cfq_class_rt(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
@@ -290,7 +288,6 @@ struct cfq_data {
        unsigned int cfq_group_idle;
        unsigned int cfq_latency;
 
-       unsigned int cic_index;
        struct list_head cic_list;
 
        /*
@@ -484,7 +481,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
 
 static inline void *cfqd_dead_key(struct cfq_data *cfqd)
 {
-       return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
+       return (void *)(cfqd->queue->id << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
 }
 
 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
@@ -2712,21 +2709,26 @@ static void cfq_cic_free(struct cfq_io_context *cic)
        call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
 }
 
-static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
+static void cfq_release_cic(struct cfq_io_context *cic)
 {
-       unsigned long flags;
+       struct io_context *ioc = cic->ioc;
        unsigned long dead_key = (unsigned long) cic->key;
 
        BUG_ON(!(dead_key & CIC_DEAD_KEY));
-
-       spin_lock_irqsave(&ioc->lock, flags);
        radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
        hlist_del_rcu(&cic->cic_list);
-       spin_unlock_irqrestore(&ioc->lock, flags);
-
        cfq_cic_free(cic);
 }
 
+static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioc->lock, flags);
+       cfq_release_cic(cic);
+       spin_unlock_irqrestore(&ioc->lock, flags);
+}
+
 /*
  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
@@ -2776,9 +2778,9 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        cfq_put_queue(cfqq);
 }
 
-static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
-                                        struct cfq_io_context *cic)
+static void cfq_exit_cic(struct cfq_io_context *cic)
 {
+       struct cfq_data *cfqd = cic_to_cfqd(cic);
        struct io_context *ioc = cic->ioc;
 
        list_del_init(&cic->queue_list);
@@ -2826,7 +2828,7 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
                 */
                smp_read_barrier_depends();
                if (cic->key == cfqd)
-                       __cfq_exit_single_io_context(cfqd, cic);
+                       cfq_exit_cic(cic);
 
                spin_unlock_irqrestore(q->queue_lock, flags);
        }
@@ -3105,7 +3107,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
        BUG_ON(rcu_dereference_check(ioc->ioc_data,
                lockdep_is_held(&ioc->lock)) == cic);
 
-       radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
+       radix_tree_delete(&ioc->radix_root, cfqd->queue->id);
        hlist_del_rcu(&cic->cic_list);
        spin_unlock_irqrestore(&ioc->lock, flags);
 
@@ -3133,7 +3135,7 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
        }
 
        do {
-               cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
+               cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id);
                rcu_read_unlock();
                if (!cic)
                        break;
@@ -3164,29 +3166,29 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
        int ret;
 
        ret = radix_tree_preload(gfp_mask);
-       if (!ret) {
-               cic->ioc = ioc;
-               cic->key = cfqd;
+       if (ret)
+               goto out;
 
-               spin_lock_irqsave(&ioc->lock, flags);
-               ret = radix_tree_insert(&ioc->radix_root,
-                                               cfqd->cic_index, cic);
-               if (!ret)
-                       hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
-               spin_unlock_irqrestore(&ioc->lock, flags);
+       cic->ioc = ioc;
+       cic->key = cfqd;
+       cic->q = cfqd->queue;
 
-               radix_tree_preload_end();
+       spin_lock_irqsave(&ioc->lock, flags);
+       ret = radix_tree_insert(&ioc->radix_root, cfqd->queue->id, cic);
+       if (!ret)
+               hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
+       spin_unlock_irqrestore(&ioc->lock, flags);
 
-               if (!ret) {
-                       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-                       list_add(&cic->queue_list, &cfqd->cic_list);
-                       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
-               }
-       }
+       radix_tree_preload_end();
 
+       if (!ret) {
+               spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+               list_add(&cic->queue_list, &cfqd->cic_list);
+               spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+       }
+out:
        if (ret)
                printk(KERN_ERR "cfq: cic link failed!\n");
-
        return ret;
 }
 
@@ -3199,13 +3201,13 @@ static struct cfq_io_context *
 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct io_context *ioc = NULL;
-       struct cfq_io_context *cic;
+       struct cfq_io_context *cic = NULL;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
-       ioc = get_io_context(gfp_mask, cfqd->queue->node);
+       ioc = current_io_context(gfp_mask, cfqd->queue->node);
        if (!ioc)
-               return NULL;
+               goto err;
 
        cic = cfq_cic_lookup(cfqd, ioc);
        if (cic)
@@ -3216,10 +3218,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
                goto err;
 
        if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
-               goto err_free;
-
+               goto err;
 out:
-       smp_read_barrier_depends();
+       get_io_context(ioc);
+
        if (unlikely(ioc->ioprio_changed))
                cfq_ioc_set_ioprio(ioc);
 
@@ -3228,10 +3230,9 @@ out:
                cfq_ioc_set_cgroup(ioc);
 #endif
        return cic;
-err_free:
-       cfq_cic_free(cic);
 err:
-       put_io_context(ioc);
+       if (cic)
+               cfq_cic_free(cic);
        return NULL;
 }
 
@@ -3927,7 +3928,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
                                                        struct cfq_io_context,
                                                        queue_list);
 
-               __cfq_exit_single_io_context(cfqd, cic);
+               cfq_exit_cic(cic);
        }
 
        cfq_put_async_queues(cfqd);
@@ -3944,10 +3945,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-       spin_lock(&cic_index_lock);
-       ida_remove(&cic_index_ida, cfqd->cic_index);
-       spin_unlock(&cic_index_lock);
-
        /*
         * Wait for cfqg->blkg->key accessors to exit their grace periods.
         * Do this wait only if there are other unlinked groups out
@@ -3969,24 +3966,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
        kfree(cfqd);
 }
 
-static int cfq_alloc_cic_index(void)
-{
-       int index, error;
-
-       do {
-               if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
-                       return -ENOMEM;
-
-               spin_lock(&cic_index_lock);
-               error = ida_get_new(&cic_index_ida, &index);
-               spin_unlock(&cic_index_lock);
-               if (error && error != -EAGAIN)
-                       return error;
-       } while (error);
-
-       return index;
-}
-
 static void *cfq_init_queue(struct request_queue *q)
 {
        struct cfq_data *cfqd;
@@ -3994,23 +3973,9 @@ static void *cfq_init_queue(struct request_queue *q)
        struct cfq_group *cfqg;
        struct cfq_rb_root *st;
 
-       i = cfq_alloc_cic_index();
-       if (i < 0)
-               return NULL;
-
        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
-       if (!cfqd) {
-               spin_lock(&cic_index_lock);
-               ida_remove(&cic_index_ida, i);
-               spin_unlock(&cic_index_lock);
+       if (!cfqd)
                return NULL;
-       }
-
-       /*
-        * Don't need take queue_lock in the routine, since we are
-        * initializing the ioscheduler, and nobody is using cfqd
-        */
-       cfqd->cic_index = i;
 
        /* Init root service tree */
        cfqd->grp_service_tree = CFQ_RB_ROOT;
@@ -4294,7 +4259,6 @@ static void __exit cfq_exit(void)
         */
        if (elv_ioc_count_read(cfq_ioc_count))
                wait_for_completion(&all_gone);
-       ida_destroy(&cic_index_ida);
        cfq_slab_kill();
 }