]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/blk-merge.c
block: use blk_free_flush_queue() to free hctx->fq in blk_mq_init_hctx
[mirror_ubuntu-bionic-kernel.git] / block / blk-merge.c
index f5dedd57dff6b40fb6e88faa532bb88a94fcde61..f452ccc610602b21a95154fc0ee5a51f3509550e 100644 (file)
@@ -27,7 +27,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
 
-       max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+       max_discard_sectors = min(q->limits.max_discard_sectors,
+                       bio_allowed_max_sectors(q));
        max_discard_sectors -= max_discard_sectors % granularity;
 
        if (unlikely(!max_discard_sectors)) {
@@ -551,6 +552,24 @@ static bool req_no_special_merge(struct request *req)
        return !q->mq_ops && req->special;
 }
 
+static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
+               struct request *next)
+{
+       unsigned short segments = blk_rq_nr_discard_segments(req);
+
+       if (segments >= queue_max_discard_segments(q))
+               goto no_merge;
+       if (blk_rq_sectors(req) + bio_sectors(next->bio) >
+           blk_rq_get_max_sectors(req, blk_rq_pos(req)))
+               goto no_merge;
+
+       req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
+       return true;
+no_merge:
+       req_set_nomerge(q, req);
+       return false;
+}
+
 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                                struct request *next)
 {
@@ -684,9 +703,13 @@ static struct request *attempt_merge(struct request_queue *q,
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
         * will have updated segment counts, update sector
-        * counts here.
+        * counts here. Handle DISCARDs separately, as they
+        * have separate settings.
         */
-       if (!ll_merge_requests_fn(q, req, next))
+       if (req_op(req) == REQ_OP_DISCARD) {
+               if (!req_attempt_discard_merge(q, req, next))
+                       return NULL;
+       } else if (!ll_merge_requests_fn(q, req, next))
                return NULL;
 
        /*
@@ -716,7 +739,8 @@ static struct request *attempt_merge(struct request_queue *q,
 
        req->__data_len += blk_rq_bytes(next);
 
-       elv_merge_requests(q, req, next);
+       if (req_op(req) != REQ_OP_DISCARD)
+               elv_merge_requests(q, req, next);
 
        /*
         * 'next' is going away, so update stats accordingly