]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - block/blk-mq.c
block: optionally merge discontiguous discard bios into a single request
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.c
index da2123dd681e3e6d8e1abe8ed0719e1fcf130a93..7412191aee5762d89a0b2dcce10aefbbe89784d5 100644 (file)
@@ -199,13 +199,7 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        rq->special = NULL;
        /* tag was already set */
        rq->errors = 0;
-
-       rq->cmd = rq->__cmd;
-
        rq->extra_len = 0;
-       rq->sense_len = 0;
-       rq->resid_len = 0;
-       rq->sense = NULL;
 
        INIT_LIST_HEAD(&rq->timeout_list);
        rq->timeout = 0;
@@ -487,10 +481,6 @@ void blk_mq_start_request(struct request *rq)
 
        trace_block_rq_issue(q, rq);
 
-       rq->resid_len = blk_rq_bytes(rq);
-       if (unlikely(blk_bidi_rq(rq)))
-               rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
-
        if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
                blk_stat_set_issue_time(&rq->issue_stat);
                rq->rq_flags |= RQF_STATS;
@@ -568,13 +558,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
 
                rq->rq_flags &= ~RQF_SOFTBARRIER;
                list_del_init(&rq->queuelist);
-               blk_mq_sched_insert_request(rq, true, false, false);
+               blk_mq_sched_insert_request(rq, true, false, false, true);
        }
 
        while (!list_empty(&rq_list)) {
                rq = list_entry(rq_list.next, struct request, queuelist);
                list_del_init(&rq->queuelist);
-               blk_mq_sched_insert_request(rq, false, false, false);
+               blk_mq_sched_insert_request(rq, false, false, false, true);
        }
 
        blk_mq_run_hw_queues(q, false);
@@ -773,7 +763,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
        int checked = 8;
 
        list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
-               int el_ret;
+               bool merged = false;
 
                if (!checked--)
                        break;
@@ -781,26 +771,25 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
                if (!blk_rq_merge_ok(rq, bio))
                        continue;
 
-               el_ret = blk_try_merge(rq, bio);
-               if (el_ret == ELEVATOR_NO_MERGE)
-                       continue;
-
-               if (!blk_mq_sched_allow_merge(q, rq, bio))
+               switch (blk_try_merge(rq, bio)) {
+               case ELEVATOR_BACK_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_back_merge(q, rq, bio);
                        break;
-
-               if (el_ret == ELEVATOR_BACK_MERGE) {
-                       if (bio_attempt_back_merge(q, rq, bio)) {
-                               ctx->rq_merged++;
-                               return true;
-                       }
+               case ELEVATOR_FRONT_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_front_merge(q, rq, bio);
                        break;
-               } else if (el_ret == ELEVATOR_FRONT_MERGE) {
-                       if (bio_attempt_front_merge(q, rq, bio)) {
-                               ctx->rq_merged++;
-                               return true;
-                       }
+               case ELEVATOR_DISCARD_MERGE:
+                       merged = bio_attempt_discard_merge(q, rq, bio);
                        break;
+               default:
+                       continue;
                }
+
+               if (merged)
+                       ctx->rq_merged++;
+               return merged;
        }
 
        return false;
@@ -847,12 +836,11 @@ static inline unsigned int queued_to_index(unsigned int queued)
        return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
 }
 
-static bool blk_mq_get_driver_tag(struct request *rq,
-                                 struct blk_mq_hw_ctx **hctx, bool wait)
+bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
+                          bool wait)
 {
        struct blk_mq_alloc_data data = {
                .q = rq->q,
-               .ctx = rq->mq_ctx,
                .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
                .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
        };
@@ -1395,7 +1383,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
        }
 
 insert:
-       blk_mq_sched_insert_request(rq, false, true, true);
+       blk_mq_sched_insert_request(rq, false, true, true, false);
 }
 
 /*
@@ -1446,10 +1434,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
+               blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               goto run_queue;
+               blk_mq_run_hw_queue(data.hctx, true);
+               goto done;
        }
 
        plug = current->plug;
@@ -1502,7 +1492,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
-                                               !is_sync || is_flush_fua);
+                                               !is_sync || is_flush_fua, true);
                goto done;
        }
        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1512,7 +1502,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
-run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }
        blk_mq_put_ctx(data.ctx);
@@ -1568,10 +1557,12 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
+               blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               goto run_queue;
+               blk_mq_run_hw_queue(data.hctx, true);
+               goto done;
        }
 
        /*
@@ -1612,7 +1603,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
-                                               !is_sync || is_flush_fua);
+                                               !is_sync || is_flush_fua, true);
                goto done;
        }
        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1622,7 +1613,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
-run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }
 
@@ -2826,8 +2816,6 @@ void blk_mq_enable_hotplug(void)
 
 static int __init blk_mq_init(void)
 {
-       blk_mq_debugfs_init();
-
        cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
                                blk_mq_hctx_notify_dead);