]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
block: get rid of MQ scheduler ops union
authorJens Axboe <axboe@kernel.dk>
Thu, 1 Nov 2018 22:41:41 +0000 (16:41 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 7 Nov 2018 20:42:32 +0000 (13:42 -0700)
This is a remnant of when we had ops for both SQ and MQ
schedulers. Now it's just MQ, so get rid of the union.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c
block/blk-ioc.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/elevator.c
block/kyber-iosched.c
block/mq-deadline.c
include/linux/elevator.h

index 44c7e567aa25dbed9d9f17b42a3c1f6fd4c654f2..c7636cbefc85107f2beb7ce68686f2eb9a140736 100644 (file)
@@ -5724,7 +5724,7 @@ static struct elv_fs_entry bfq_attrs[] = {
 };
 
 static struct elevator_type iosched_bfq_mq = {
-       .ops.mq = {
+       .ops = {
                .limit_depth            = bfq_limit_depth,
                .prepare_request        = bfq_prepare_request,
                .requeue_request        = bfq_finish_requeue_request,
index 391128456aec5795533098e2e398223b0e009124..007aac6e6a4bad95d9dac3659fee4fb8da326c62 100644 (file)
@@ -48,8 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq)
        if (icq->flags & ICQ_EXITED)
                return;
 
-       if (et->ops.mq.exit_icq)
-               et->ops.mq.exit_icq(icq);
+       if (et->ops.exit_icq)
+               et->ops.exit_icq(icq);
 
        icq->flags |= ICQ_EXITED;
 }
@@ -396,8 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
        if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
                hlist_add_head(&icq->ioc_node, &ioc->icq_list);
                list_add(&icq->q_node, &q->icq_list);
-               if (et->ops.mq.init_icq)
-                       et->ops.mq.init_icq(icq);
+               if (et->ops.init_icq)
+                       et->ops.init_icq(icq);
        } else {
                kmem_cache_free(et->icq_cache, icq);
                icq = ioc_lookup_icq(ioc, q);
index 29bfe8017a2d8e6cbadeab6b9d1d63d293f656a5..0feefd6c6aaab2f07331acf3892c4bd22020da0a 100644 (file)
@@ -85,14 +85,13 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
        do {
                struct request *rq;
 
-               if (e->type->ops.mq.has_work &&
-                               !e->type->ops.mq.has_work(hctx))
+               if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
                        break;
 
                if (!blk_mq_get_dispatch_budget(hctx))
                        break;
 
-               rq = e->type->ops.mq.dispatch_request(hctx);
+               rq = e->type->ops.dispatch_request(hctx);
                if (!rq) {
                        blk_mq_put_dispatch_budget(hctx);
                        break;
@@ -163,7 +162,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 {
        struct request_queue *q = hctx->queue;
        struct elevator_queue *e = q->elevator;
-       const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
+       const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
        LIST_HEAD(rq_list);
 
        /* RCU or SRCU read lock is needed before checking quiesced flag */
@@ -314,9 +313,9 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
        struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
        bool ret = false;
 
-       if (e && e->type->ops.mq.bio_merge) {
+       if (e && e->type->ops.bio_merge) {
                blk_mq_put_ctx(ctx);
-               return e->type->ops.mq.bio_merge(hctx, bio);
+               return e->type->ops.bio_merge(hctx, bio);
        }
 
        if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
@@ -380,11 +379,11 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
        if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
                goto run;
 
-       if (e && e->type->ops.mq.insert_requests) {
+       if (e && e->type->ops.insert_requests) {
                LIST_HEAD(list);
 
                list_add(&rq->queuelist, &list);
-               e->type->ops.mq.insert_requests(hctx, &list, at_head);
+               e->type->ops.insert_requests(hctx, &list, at_head);
        } else {
                spin_lock(&ctx->lock);
                __blk_mq_insert_request(hctx, rq, at_head);
@@ -403,8 +402,8 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
        struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
        struct elevator_queue *e = hctx->queue->elevator;
 
-       if (e && e->type->ops.mq.insert_requests)
-               e->type->ops.mq.insert_requests(hctx, list, false);
+       if (e && e->type->ops.insert_requests)
+               e->type->ops.insert_requests(hctx, list, false);
        else {
                /*
                 * try to issue requests directly if the hw queue isn't
@@ -489,15 +488,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
                        goto err;
        }
 
-       ret = e->ops.mq.init_sched(q, e);
+       ret = e->ops.init_sched(q, e);
        if (ret)
                goto err;
 
        blk_mq_debugfs_register_sched(q);
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (e->ops.mq.init_hctx) {
-                       ret = e->ops.mq.init_hctx(hctx, i);
+               if (e->ops.init_hctx) {
+                       ret = e->ops.init_hctx(hctx, i);
                        if (ret) {
                                eq = q->elevator;
                                blk_mq_exit_sched(q, eq);
@@ -523,14 +522,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
 
        queue_for_each_hw_ctx(q, hctx, i) {
                blk_mq_debugfs_unregister_sched_hctx(hctx);
-               if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
-                       e->type->ops.mq.exit_hctx(hctx, i);
+               if (e->type->ops.exit_hctx && hctx->sched_data) {
+                       e->type->ops.exit_hctx(hctx, i);
                        hctx->sched_data = NULL;
                }
        }
        blk_mq_debugfs_unregister_sched(q);
-       if (e->type->ops.mq.exit_sched)
-               e->type->ops.mq.exit_sched(e);
+       if (e->type->ops.exit_sched)
+               e->type->ops.exit_sched(e);
        blk_mq_sched_tags_teardown(q);
        q->elevator = NULL;
 }
index 8a9544203173fac13a718f22e746128246dde4c7..947f236b273da2a4f65b2d60b6fa3dcb5d72a0c2 100644 (file)
@@ -43,8 +43,8 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
 {
        struct elevator_queue *e = q->elevator;
 
-       if (e && e->type->ops.mq.allow_merge)
-               return e->type->ops.mq.allow_merge(q, rq, bio);
+       if (e && e->type->ops.allow_merge)
+               return e->type->ops.allow_merge(q, rq, bio);
 
        return true;
 }
@@ -53,8 +53,8 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
 {
        struct elevator_queue *e = rq->q->elevator;
 
-       if (e && e->type->ops.mq.completed_request)
-               e->type->ops.mq.completed_request(rq, now);
+       if (e && e->type->ops.completed_request)
+               e->type->ops.completed_request(rq, now);
 }
 
 static inline void blk_mq_sched_started_request(struct request *rq)
@@ -62,8 +62,8 @@ static inline void blk_mq_sched_started_request(struct request *rq)
        struct request_queue *q = rq->q;
        struct elevator_queue *e = q->elevator;
 
-       if (e && e->type->ops.mq.started_request)
-               e->type->ops.mq.started_request(rq);
+       if (e && e->type->ops.started_request)
+               e->type->ops.started_request(rq);
 }
 
 static inline void blk_mq_sched_requeue_request(struct request *rq)
@@ -71,16 +71,16 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
        struct request_queue *q = rq->q;
        struct elevator_queue *e = q->elevator;
 
-       if (e && e->type->ops.mq.requeue_request)
-               e->type->ops.mq.requeue_request(rq);
+       if (e && e->type->ops.requeue_request)
+               e->type->ops.requeue_request(rq);
 }
 
 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
 {
        struct elevator_queue *e = hctx->queue->elevator;
 
-       if (e && e->type->ops.mq.has_work)
-               return e->type->ops.mq.has_work(hctx);
+       if (e && e->type->ops.has_work)
+               return e->type->ops.has_work(hctx);
 
        return false;
 }
index a58d2d953876755a8984b4c598561177cfd265d5..d106d7a970cc1f502ea47071c98442aea810d445 100644 (file)
@@ -363,9 +363,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
                 * dispatch list. Don't include reserved tags in the
                 * limiting, as it isn't useful.
                 */
-               if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
+               if (!op_is_flush(op) && e->type->ops.limit_depth &&
                    !(data->flags & BLK_MQ_REQ_RESERVED))
-                       e->type->ops.mq.limit_depth(op, data);
+                       e->type->ops.limit_depth(op, data);
        } else {
                blk_mq_tag_busy(data->hctx);
        }
@@ -383,11 +383,11 @@ static struct request *blk_mq_get_request(struct request_queue *q,
        rq = blk_mq_rq_ctx_init(data, tag, op);
        if (!op_is_flush(op)) {
                rq->elv.icq = NULL;
-               if (e && e->type->ops.mq.prepare_request) {
+               if (e && e->type->ops.prepare_request) {
                        if (e->type->icq_cache && rq_ioc(bio))
                                blk_mq_sched_assign_ioc(rq, bio);
 
-                       e->type->ops.mq.prepare_request(rq, bio);
+                       e->type->ops.prepare_request(rq, bio);
                        rq->rq_flags |= RQF_ELVPRIV;
                }
        }
@@ -491,8 +491,8 @@ void blk_mq_free_request(struct request *rq)
        struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
 
        if (rq->rq_flags & RQF_ELVPRIV) {
-               if (e && e->type->ops.mq.finish_request)
-                       e->type->ops.mq.finish_request(rq);
+               if (e && e->type->ops.finish_request)
+                       e->type->ops.finish_request(rq);
                if (rq->elv.icq) {
                        put_io_context(rq->elv.icq->ioc);
                        rq->elv.icq = NULL;
index 334097c54b084eb502626ed7876d3267ca990896..19351ffa56b1b7af775fdc7b78d869ae3941fd9e 100644 (file)
@@ -61,8 +61,8 @@ static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
        struct request_queue *q = rq->q;
        struct elevator_queue *e = q->elevator;
 
-       if (e->type->ops.mq.allow_merge)
-               return e->type->ops.mq.allow_merge(q, rq, bio);
+       if (e->type->ops.allow_merge)
+               return e->type->ops.allow_merge(q, rq, bio);
 
        return 1;
 }
@@ -180,7 +180,7 @@ static void elevator_release(struct kobject *kobj)
 void elevator_exit(struct request_queue *q, struct elevator_queue *e)
 {
        mutex_lock(&e->sysfs_lock);
-       if (e->type->ops.mq.exit_sched)
+       if (e->type->ops.exit_sched)
                blk_mq_exit_sched(q, e);
        mutex_unlock(&e->sysfs_lock);
 
@@ -329,8 +329,8 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
                return ELEVATOR_BACK_MERGE;
        }
 
-       if (e->type->ops.mq.request_merge)
-               return e->type->ops.mq.request_merge(q, req, bio);
+       if (e->type->ops.request_merge)
+               return e->type->ops.request_merge(q, req, bio);
 
        return ELEVATOR_NO_MERGE;
 }
@@ -381,8 +381,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq,
 {
        struct elevator_queue *e = q->elevator;
 
-       if (e->type->ops.mq.request_merged)
-               e->type->ops.mq.request_merged(q, rq, type);
+       if (e->type->ops.request_merged)
+               e->type->ops.request_merged(q, rq, type);
 
        if (type == ELEVATOR_BACK_MERGE)
                elv_rqhash_reposition(q, rq);
@@ -396,8 +396,8 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
        struct elevator_queue *e = q->elevator;
        bool next_sorted = false;
 
-       if (e->type->ops.mq.requests_merged)
-               e->type->ops.mq.requests_merged(q, rq, next);
+       if (e->type->ops.requests_merged)
+               e->type->ops.requests_merged(q, rq, next);
 
        elv_rqhash_reposition(q, rq);
 
@@ -413,8 +413,8 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 {
        struct elevator_queue *e = q->elevator;
 
-       if (e->type->ops.mq.next_request)
-               return e->type->ops.mq.next_request(q, rq);
+       if (e->type->ops.next_request)
+               return e->type->ops.next_request(q, rq);
 
        return NULL;
 }
@@ -423,8 +423,8 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
 {
        struct elevator_queue *e = q->elevator;
 
-       if (e->type->ops.mq.former_request)
-               return e->type->ops.mq.former_request(q, rq);
+       if (e->type->ops.former_request)
+               return e->type->ops.former_request(q, rq);
 
        return NULL;
 }
index 728757a34fa03ac627f6faed007a4c0613ab5e7e..1fd83a91e7499fd500fca76e3b8126058348c0ca 100644 (file)
@@ -1017,7 +1017,7 @@ static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
 #endif
 
 static struct elevator_type kyber_sched = {
-       .ops.mq = {
+       .ops = {
                .init_sched = kyber_init_sched,
                .exit_sched = kyber_exit_sched,
                .init_hctx = kyber_init_hctx,
index 513edefd10fd9bc753190266bce7664b6b2a6a9e..1bd06cefce575693a605c08d3cdda31ca8ad2acb 100644 (file)
@@ -761,7 +761,7 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
 #endif
 
 static struct elevator_type mq_deadline = {
-       .ops.mq = {
+       .ops = {
                .insert_requests        = dd_insert_requests,
                .dispatch_request       = dd_dispatch_request,
                .prepare_request        = dd_prepare_request,
index 158004f1754db5d262f7b35f3b0bd7ee38af3012..2e9e2763bf47dbea239976034e32fd11f14826f7 100644 (file)
@@ -69,9 +69,7 @@ struct elevator_type
        struct kmem_cache *icq_cache;
 
        /* fields provided by elevator implementation */
-       union {
-               struct elevator_mq_ops mq;
-       } ops;
+       struct elevator_mq_ops ops;
 
        size_t icq_size;        /* see iocontext.h */
        size_t icq_align;       /* ditto */