]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/blk-merge.c
blk-mq: factor out a few helpers from __blk_mq_try_issue_directly
[mirror_ubuntu-bionic-kernel.git] / block / blk-merge.c
index f452ccc610602b21a95154fc0ee5a51f3509550e..5a0cc8550fe6e6b74933c7598cb8d9f3fd8f8587 100644 (file)
@@ -660,6 +660,31 @@ static void blk_account_io_merge(struct request *req)
                part_stat_unlock();
        }
 }
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+       if (req_op(req) == REQ_OP_DISCARD &&
+           queue_max_discard_segments(req->q) > 1)
+               return true;
+       return false;
+}
+
+enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
+{
+       if (blk_discard_mergable(req))
+               return ELEVATOR_DISCARD_MERGE;
+       else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
+               return ELEVATOR_BACK_MERGE;
+
+       return ELEVATOR_NO_MERGE;
+}
 
 /*
  * For non-mq, this has to be called with the request spinlock acquired.
@@ -677,12 +702,6 @@ static struct request *attempt_merge(struct request_queue *q,
        if (req_op(req) != req_op(next))
                return NULL;
 
-       /*
-        * not contiguous
-        */
-       if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
-               return NULL;
-
        if (rq_data_dir(req) != rq_data_dir(next)
            || req->rq_disk != next->rq_disk
            || req_no_special_merge(next))
@@ -706,11 +725,19 @@ static struct request *attempt_merge(struct request_queue *q,
         * counts here. Handle DISCARDs separately, as they
         * have separate settings.
         */
-       if (req_op(req) == REQ_OP_DISCARD) {
+
+       switch (blk_try_req_merge(req, next)) {
+       case ELEVATOR_DISCARD_MERGE:
                if (!req_attempt_discard_merge(q, req, next))
                        return NULL;
-       } else if (!ll_merge_requests_fn(q, req, next))
+               break;
+       case ELEVATOR_BACK_MERGE:
+               if (!ll_merge_requests_fn(q, req, next))
+                       return NULL;
+               break;
+       default:
                return NULL;
+       }
 
        /*
         * If failfast settings disagree or any of the two is already
@@ -739,7 +766,7 @@ static struct request *attempt_merge(struct request_queue *q,
 
        req->__data_len += blk_rq_bytes(next);
 
-       if (req_op(req) != REQ_OP_DISCARD)
+       if (!blk_discard_mergable(req))
                elv_merge_requests(q, req, next);
 
        /*
@@ -835,8 +862,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 
 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 {
-       if (req_op(rq) == REQ_OP_DISCARD &&
-           queue_max_discard_segments(rq->q) > 1)
+       if (blk_discard_mergable(rq))
                return ELEVATOR_DISCARD_MERGE;
        else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
                return ELEVATOR_BACK_MERGE;