]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
blk-mq: fix plugging in blk_sq_make_request
authorJeff Moyer <jmoyer@redhat.com>
Fri, 8 May 2015 17:51:30 +0000 (10:51 -0700)
committerJens Axboe <axboe@fb.com>
Fri, 8 May 2015 20:17:17 +0000 (14:17 -0600)
The following appears in blk_sq_make_request:

/*
 * If we have multiple hardware queues, just go directly to
 * one of those for sync IO.
 */

We clearly don't have multiple hardware queues, here!  This comment was
introduced with this commit 07068d5b8e (blk-mq: split make request
handler for multi and single queue):

    We want slightly different behavior from them:

    - On single queue devices, we currently use the per-process plug
      for deferred IO and for merging.

    - On multi queue devices, we don't use the per-process plug, but
      we want to go straight to hardware for SYNC IO.

The old code had this:

        use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync);

and that was converted to:

use_plug = !is_flush_fua && !is_sync;

which is not equivalent.  For the single queue case, that second half of
the && expression is always true.  So, what I think was actually inteded
follows (and this more closely matches what is done in blk_queue_bio).

V2: delete the 'likely', which should not be a big deal

Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Shaohua Li <shli@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq.c

index ade8a2d1b0aa8600ad31413b59db37392628bffc..a65acffde19ae336256cc1a864a7997b8782e0f6 100644 (file)
@@ -1309,16 +1309,11 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
 {
        const int is_sync = rw_is_sync(bio->bi_rw);
        const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
-       unsigned int use_plug, request_count = 0;
+       struct blk_plug *plug;
+       unsigned int request_count = 0;
        struct blk_map_ctx data;
        struct request *rq;
 
-       /*
-        * If we have multiple hardware queues, just go directly to
-        * one of those for sync IO.
-        */
-       use_plug = !is_flush_fua && !is_sync;
-
        blk_queue_bounce(q, &bio);
 
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
@@ -1326,7 +1321,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
                return;
        }
 
-       if (use_plug && !blk_queue_nomerges(q) &&
+       if (!is_flush_fua && !blk_queue_nomerges(q) &&
            blk_attempt_plug_merge(q, bio, &request_count))
                return;
 
@@ -1345,21 +1340,18 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
         * utilize that to temporarily store requests until the task is
         * either done or scheduled away.
         */
-       if (use_plug) {
-               struct blk_plug *plug = current->plug;
-
-               if (plug) {
-                       blk_mq_bio_to_request(rq, bio);
-                       if (list_empty(&plug->mq_list))
-                               trace_block_plug(q);
-                       else if (request_count >= BLK_MAX_REQUEST_COUNT) {
-                               blk_flush_plug_list(plug, false);
-                               trace_block_plug(q);
-                       }
-                       list_add_tail(&rq->queuelist, &plug->mq_list);
-                       blk_mq_put_ctx(data.ctx);
-                       return;
+       plug = current->plug;
+       if (plug) {
+               blk_mq_bio_to_request(rq, bio);
+               if (list_empty(&plug->mq_list))
+                       trace_block_plug(q);
+               else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+                       blk_flush_plug_list(plug, false);
+                       trace_block_plug(q);
                }
+               list_add_tail(&rq->queuelist, &plug->mq_list);
+               blk_mq_put_ctx(data.ctx);
+               return;
        }
 
        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {