]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
blk-mq-sched: decide how to handle flush rq via RQF_FLUSH_SEQ
authorMing Lei <ming.lei@redhat.com>
Thu, 2 Nov 2017 15:24:36 +0000 (23:24 +0800)
committerJens Axboe <axboe@kernel.dk>
Sat, 4 Nov 2017 18:38:50 +0000 (12:38 -0600)
In case of IO scheduler we always pre-allocate one driver tag before
calling blk_insert_flush(), and flush request will be marked as
RQF_FLUSH_SEQ once it is in flush machinery.

So if RQF_FLUSH_SEQ isn't set, we call blk_insert_flush() to handle
the request, otherwise the flush request is dispatched to ->dispatch
list directly.

This is a preparation patch for not preallocating a driver tag for flush
requests, and for not treating flush requests as a special case. This is
similar to what the legacy path does.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c

index 13a27d4d1671df4ae32346e40b1e470e28eee413..e7094f44afafcd8c7230d0b20b15d7e4f37421f7 100644 (file)
@@ -345,21 +345,23 @@ void blk_mq_sched_request_inserted(struct request *rq)
 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
 
 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
+                                      bool has_sched,
                                       struct request *rq)
 {
-       if (rq->tag == -1) {
+       /* dispatch flush rq directly */
+       if (rq->rq_flags & RQF_FLUSH_SEQ) {
+               spin_lock(&hctx->lock);
+               list_add(&rq->queuelist, &hctx->dispatch);
+               spin_unlock(&hctx->lock);
+               return true;
+       }
+
+       if (has_sched) {
                rq->rq_flags |= RQF_SORTED;
-               return false;
+               WARN_ON(rq->tag != -1);
        }
 
-       /*
-        * If we already have a real request tag, send directly to
-        * the dispatch list.
-        */
-       spin_lock(&hctx->lock);
-       list_add(&rq->queuelist, &hctx->dispatch);
-       spin_unlock(&hctx->lock);
-       return true;
+       return false;
 }
 
 /*
@@ -385,12 +387,13 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
        struct blk_mq_ctx *ctx = rq->mq_ctx;
        struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
 
-       if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) {
+       /* flush rq in flush machinery need to be dispatched directly */
+       if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
                blk_mq_sched_insert_flush(hctx, rq, can_block);
                return;
        }
 
-       if (e && blk_mq_sched_bypass_insert(hctx, rq))
+       if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
                goto run;
 
        if (e && e->type->ops.mq.insert_requests) {
@@ -428,7 +431,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
                list_for_each_entry_safe(rq, next, list, queuelist) {
                        if (WARN_ON_ONCE(rq->tag != -1)) {
                                list_del_init(&rq->queuelist);
-                               blk_mq_sched_bypass_insert(hctx, rq);
+                               blk_mq_sched_bypass_insert(hctx, true, rq);
                        }
                }
        }