]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - block/blk-mq.c
blk-mq: rework flush sequencing logic
[mirror_ubuntu-jammy-kernel.git] / block / blk-mq.c
index 14c8f35946e18404adfc6323827520ce19106620..a59b0565e940dfd3616f0f5439794c7995d5a5ee 100644 (file)
@@ -194,27 +194,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 }
 
 static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
-                                             gfp_t gfp, bool reserved,
-                                             int rw)
+                                             gfp_t gfp, bool reserved)
 {
-       struct request *req;
-       bool is_flush = false;
-       /*
-        * flush need allocate a request, leave at least one request for
-        * non-flush IO to avoid deadlock
-        */
-       if ((rw & REQ_FLUSH) && !(rw & REQ_FLUSH_SEQ)) {
-               if (atomic_inc_return(&hctx->pending_flush) >=
-                   hctx->queue_depth - hctx->reserved_tags - 1) {
-                       atomic_dec(&hctx->pending_flush);
-                       return NULL;
-               }
-               is_flush = true;
-       }
-       req = blk_mq_alloc_rq(hctx, gfp, reserved);
-       if (!req && is_flush)
-               atomic_dec(&hctx->pending_flush);
-       return req;
+       return blk_mq_alloc_rq(hctx, gfp, reserved);
 }
 
 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
@@ -227,7 +209,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
                struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
                struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-               rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved, rw);
+               rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
                if (rq) {
                        blk_mq_rq_ctx_init(q, ctx, rq, rw);
                        break;
@@ -244,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
        return rq;
 }
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
-               gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
 {
        struct request *rq;
 
        if (blk_mq_queue_enter(q))
                return NULL;
 
-       rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+       rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
        if (rq)
                blk_mq_put_ctx(rq->mq_ctx);
        return rq;
@@ -276,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
 /*
  * Re-init and set pdu, if we have it
  */
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
 {
        blk_rq_init(hctx->queue, rq);
 
@@ -290,9 +271,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
        const int tag = rq->tag;
        struct request_queue *q = rq->q;
 
-       if ((rq->cmd_flags & REQ_FLUSH) && !(rq->cmd_flags & REQ_FLUSH_SEQ))
-               atomic_dec(&hctx->pending_flush);
-
        blk_mq_rq_init(hctx, rq);
        blk_mq_put_tag(hctx->tags, tag);
 
@@ -946,14 +924,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
        trace_block_getrq(q, bio, rw);
-       rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false, bio->bi_rw);
+       rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
        if (likely(rq))
-               blk_mq_rq_ctx_init(q, ctx, rq, bio->bi_rw);
+               blk_mq_rq_ctx_init(q, ctx, rq, rw);
        else {
                blk_mq_put_ctx(ctx);
                trace_block_sleeprq(q, bio, rw);
-               rq = blk_mq_alloc_request_pinned(q, bio->bi_rw,
-                               __GFP_WAIT|GFP_ATOMIC, false);
+               rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
+                                                       false);
                ctx = rq->mq_ctx;
                hctx = q->mq_ops->map_queue(q, ctx->cpu);
        }
@@ -1230,9 +1208,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
                hctx->queue_num = i;
                hctx->flags = reg->flags;
                hctx->queue_depth = reg->queue_depth;
-               hctx->reserved_tags = reg->reserved_tags;
                hctx->cmd_size = reg->cmd_size;
-               atomic_set(&hctx->pending_flush, 0);
 
                blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                                blk_mq_hctx_notify, hctx);
@@ -1412,9 +1388,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
        blk_mq_init_flush(q);
        blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
 
-       if (blk_mq_init_hw_queues(q, reg, driver_data))
+       q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+                               cache_line_size()), GFP_KERNEL);
+       if (!q->flush_rq)
                goto err_hw;
 
+       if (blk_mq_init_hw_queues(q, reg, driver_data))
+               goto err_flush_rq;
+
        blk_mq_map_swqueue(q);
 
        mutex_lock(&all_q_mutex);
@@ -1422,6 +1403,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
        mutex_unlock(&all_q_mutex);
 
        return q;
+
+err_flush_rq:
+       kfree(q->flush_rq);
 err_hw:
        kfree(q->mq_map);
 err_map: