]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
blk-mq: cache request hardware queue mapping
authorJens Axboe <axboe@kernel.dk>
Mon, 29 Oct 2018 21:06:13 +0000 (15:06 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 7 Nov 2018 20:44:59 +0000 (13:44 -0700)
We call blk_mq_map_queue() a lot, at least two times for each
request per IO, sometimes more. Since we now have an indirect
call as well in that function. cache the mapping so we don't
have to re-call blk_mq_map_queue() for the same request
multiple times.

Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-flush.c
block/blk-mq-debugfs.c
block/blk-mq-sched.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
include/linux/blkdev.h

index 77e9f5b2ee05fddd05fff4a5822adf5ccfb3ef24..c53197dcdd70b0653812c7b3f4fc580ecfb92ba8 100644 (file)
@@ -215,7 +215,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
 
        /* release the tag's ownership to the req cloned from */
        spin_lock_irqsave(&fq->mq_flush_lock, flags);
-       hctx = blk_mq_map_queue(q, flush_rq->cmd_flags, flush_rq->mq_ctx->cpu);
+       hctx = flush_rq->mq_hctx;
        if (!q->elevator) {
                blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
                flush_rq->tag = -1;
@@ -262,7 +262,6 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        struct request *first_rq =
                list_first_entry(pending, struct request, flush.list);
        struct request *flush_rq = fq->flush_rq;
-       struct blk_mq_hw_ctx *hctx;
 
        /* C1 described at the top of this file */
        if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
@@ -297,13 +296,12 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
         * just for cheating put/get driver tag.
         */
        flush_rq->mq_ctx = first_rq->mq_ctx;
+       flush_rq->mq_hctx = first_rq->mq_hctx;
 
        if (!q->elevator) {
                fq->orig_rq = first_rq;
                flush_rq->tag = first_rq->tag;
-               hctx = blk_mq_map_queue(q, first_rq->cmd_flags,
-                                       first_rq->mq_ctx->cpu);
-               blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
+               blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
        } else {
                flush_rq->internal_tag = first_rq->internal_tag;
        }
@@ -320,13 +318,11 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 {
        struct request_queue *q = rq->q;
-       struct blk_mq_hw_ctx *hctx;
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
        struct blk_mq_ctx *ctx = rq->mq_ctx;
        unsigned long flags;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
 
-       hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
-
        if (q->elevator) {
                WARN_ON(rq->tag < 0);
                blk_mq_put_driver_tag_hctx(hctx, rq);
index fac70c81b7de08cf06dec1382087daa9e325755d..cde19be361354673afa51b1b687e9504438aa919 100644 (file)
@@ -427,10 +427,8 @@ struct show_busy_params {
 static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
 {
        const struct show_busy_params *params = data;
-       struct blk_mq_hw_ctx *hctx;
 
-       hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
-       if (hctx == params->hctx)
+       if (rq->mq_hctx == params->hctx)
                __blk_mq_debugfs_rq_show(params->m,
                                         list_entry_rq(&rq->queuelist));
 }
index bbabc3877d5acc99e718bab71028bb65881cb83b..641df3f00632b6fac92ca16e0fadeed05ab57c48 100644 (file)
@@ -366,9 +366,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
        struct request_queue *q = rq->q;
        struct elevator_queue *e = q->elevator;
        struct blk_mq_ctx *ctx = rq->mq_ctx;
-       struct blk_mq_hw_ctx *hctx;
-
-       hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        /* flush rq in flush machinery need to be dispatched directly */
        if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
@@ -407,7 +405,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
 
        /* For list inserts, requests better be on the same hw queue */
        rq = list_first_entry(list, struct request, queuelist);
-       hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+       hctx = rq->mq_hctx;
 
        e = hctx->queue->elevator;
        if (e && e->type->ops.insert_requests)
index 478a959357f567128814e9ce0b16e1c7380655a8..fb836d818b80944c489668aa37884709b9f188d8 100644 (file)
@@ -527,14 +527,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
  */
 u32 blk_mq_unique_tag(struct request *rq)
 {
-       struct request_queue *q = rq->q;
-       struct blk_mq_hw_ctx *hctx;
-       int hwq = 0;
-
-       hctx = blk_mq_map_queue(q, rq->cmd_flags, rq->mq_ctx->cpu);
-       hwq = hctx->queue_num;
-
-       return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
+       return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
                (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
 }
 EXPORT_SYMBOL(blk_mq_unique_tag);
index ccf135cf41b0c5498a863beb0606551c2c88a171..6b2859d3ad234659ece19dc804e4c960bf6d8c87 100644 (file)
@@ -300,6 +300,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        /* csd/requeue_work/fifo_time is initialized before use */
        rq->q = data->q;
        rq->mq_ctx = data->ctx;
+       rq->mq_hctx = data->hctx;
        rq->rq_flags = rq_flags;
        rq->cmd_flags = op;
        if (data->flags & BLK_MQ_REQ_PREEMPT)
@@ -472,10 +473,11 @@ static void __blk_mq_free_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_ctx *ctx = rq->mq_ctx;
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
        const int sched_tag = rq->internal_tag;
 
        blk_pm_mark_last_busy(rq);
+       rq->mq_hctx = NULL;
        if (rq->tag != -1)
                blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
        if (sched_tag != -1)
@@ -489,7 +491,7 @@ void blk_mq_free_request(struct request *rq)
        struct request_queue *q = rq->q;
        struct elevator_queue *e = q->elevator;
        struct blk_mq_ctx *ctx = rq->mq_ctx;
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        if (rq->rq_flags & RQF_ELVPRIV) {
                if (e && e->type->ops.finish_request)
@@ -983,7 +985,7 @@ bool blk_mq_get_driver_tag(struct request *rq)
 {
        struct blk_mq_alloc_data data = {
                .q = rq->q,
-               .hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu),
+               .hctx = rq->mq_hctx,
                .flags = BLK_MQ_REQ_NOWAIT,
                .cmd_flags = rq->cmd_flags,
        };
@@ -1149,7 +1151,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
 
                rq = list_first_entry(list, struct request, queuelist);
 
-               hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
+               hctx = rq->mq_hctx;
                if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
                        break;
 
@@ -1579,9 +1581,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  */
 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
 {
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
-                                                       ctx->cpu);
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        spin_lock(&hctx->lock);
        list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -1790,9 +1790,7 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
        blk_status_t ret;
        int srcu_idx;
        blk_qc_t unused_cookie;
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
-                                                       ctx->cpu);
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        hctx_lock(hctx, &srcu_idx);
        ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
@@ -1917,9 +1915,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
 
                if (same_queue_rq) {
-                       data.hctx = blk_mq_map_queue(q,
-                                       same_queue_rq->cmd_flags,
-                                       same_queue_rq->mq_ctx->cpu);
+                       data.hctx = same_queue_rq->mq_hctx;
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
                }
index 0538622701258beaf41a0eac0b7a2423edccfb69..facb6e9ddce43705ca81302d9e9adc61cf76ecf6 100644 (file)
@@ -223,13 +223,10 @@ static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
 
 static inline void blk_mq_put_driver_tag(struct request *rq)
 {
-       struct blk_mq_hw_ctx *hctx;
-
        if (rq->tag == -1 || rq->internal_tag == -1)
                return;
 
-       hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
-       __blk_mq_put_driver_tag(hctx, rq);
+       __blk_mq_put_driver_tag(rq->mq_hctx, rq);
 }
 
 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
index 2ae7465d68ab2b12dee487dcc736e4590bed2615..9b1f470cc784a7e5293a40e78a5a38f79cbcdb54 100644 (file)
@@ -129,6 +129,7 @@ enum mq_rq_state {
 struct request {
        struct request_queue *q;
        struct blk_mq_ctx *mq_ctx;
+       struct blk_mq_hw_ctx *mq_hctx;
 
        unsigned int cmd_flags;         /* op and common flags */
        req_flags_t rq_flags;