]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - block/blk-mq.c
block: optionally merge discontiguous discard bios into a single request
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.c
index fd80101c7591a2ae4642e2ce6f1dd979de3a3d91..7412191aee5762d89a0b2dcce10aefbbe89784d5 100644 (file)
@@ -40,7 +40,7 @@ static LIST_HEAD(all_q_list);
 /*
  * Check if any of the ctx's have pending work in this hardware queue
  */
-static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
+bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
        return sbitmap_any_bit_set(&hctx->ctx_map) ||
                        !list_empty_careful(&hctx->dispatch) ||
@@ -199,13 +199,7 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        rq->special = NULL;
        /* tag was already set */
        rq->errors = 0;
-
-       rq->cmd = rq->__cmd;
-
        rq->extra_len = 0;
-       rq->sense_len = 0;
-       rq->resid_len = 0;
-       rq->sense = NULL;
 
        INIT_LIST_HEAD(&rq->timeout_list);
        rq->timeout = 0;
@@ -345,6 +339,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
        if (sched_tag != -1)
                blk_mq_sched_completed_request(hctx, rq);
+       blk_mq_sched_restart_queues(hctx);
        blk_queue_exit(q);
 }
 
@@ -486,10 +481,6 @@ void blk_mq_start_request(struct request *rq)
 
        trace_block_rq_issue(q, rq);
 
-       rq->resid_len = blk_rq_bytes(rq);
-       if (unlikely(blk_bidi_rq(rq)))
-               rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
-
        if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
                blk_stat_set_issue_time(&rq->issue_stat);
                rq->rq_flags |= RQF_STATS;
@@ -567,13 +558,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
 
                rq->rq_flags &= ~RQF_SOFTBARRIER;
                list_del_init(&rq->queuelist);
-               blk_mq_sched_insert_request(rq, true, false, false);
+               blk_mq_sched_insert_request(rq, true, false, false, true);
        }
 
        while (!list_empty(&rq_list)) {
                rq = list_entry(rq_list.next, struct request, queuelist);
                list_del_init(&rq->queuelist);
-               blk_mq_sched_insert_request(rq, false, false, false);
+               blk_mq_sched_insert_request(rq, false, false, false, true);
        }
 
        blk_mq_run_hw_queues(q, false);
@@ -772,7 +763,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
        int checked = 8;
 
        list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
-               int el_ret;
+               bool merged = false;
 
                if (!checked--)
                        break;
@@ -780,26 +771,25 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
                if (!blk_rq_merge_ok(rq, bio))
                        continue;
 
-               el_ret = blk_try_merge(rq, bio);
-               if (el_ret == ELEVATOR_NO_MERGE)
-                       continue;
-
-               if (!blk_mq_sched_allow_merge(q, rq, bio))
+               switch (blk_try_merge(rq, bio)) {
+               case ELEVATOR_BACK_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_back_merge(q, rq, bio);
                        break;
-
-               if (el_ret == ELEVATOR_BACK_MERGE) {
-                       if (bio_attempt_back_merge(q, rq, bio)) {
-                               ctx->rq_merged++;
-                               return true;
-                       }
+               case ELEVATOR_FRONT_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_front_merge(q, rq, bio);
                        break;
-               } else if (el_ret == ELEVATOR_FRONT_MERGE) {
-                       if (bio_attempt_front_merge(q, rq, bio)) {
-                               ctx->rq_merged++;
-                               return true;
-                       }
+               case ELEVATOR_DISCARD_MERGE:
+                       merged = bio_attempt_discard_merge(q, rq, bio);
                        break;
+               default:
+                       continue;
                }
+
+               if (merged)
+                       ctx->rq_merged++;
+               return merged;
        }
 
        return false;
@@ -846,12 +836,11 @@ static inline unsigned int queued_to_index(unsigned int queued)
        return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
 }
 
-static bool blk_mq_get_driver_tag(struct request *rq,
-                                 struct blk_mq_hw_ctx **hctx, bool wait)
+bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
+                          bool wait)
 {
        struct blk_mq_alloc_data data = {
                .q = rq->q,
-               .ctx = rq->mq_ctx,
                .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
                .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
        };
@@ -879,6 +868,21 @@ done:
        return false;
 }
 
+static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
+                                 struct request *rq)
+{
+       if (rq->tag == -1 || rq->internal_tag == -1)
+               return;
+
+       blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
+       rq->tag = -1;
+
+       if (rq->rq_flags & RQF_MQ_INFLIGHT) {
+               rq->rq_flags &= ~RQF_MQ_INFLIGHT;
+               atomic_dec(&hctx->nr_active);
+       }
+}
+
 /*
  * If we fail getting a driver tag because all the driver tags are already
  * assigned and on the dispatch list, BUT the first entry does not have a
@@ -951,6 +955,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                        queued++;
                        break;
                case BLK_MQ_RQ_QUEUE_BUSY:
+                       blk_mq_put_driver_tag(hctx, rq);
                        list_add(&rq->queuelist, list);
                        __blk_mq_requeue_request(rq);
                        break;
@@ -981,7 +986,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
         */
        if (!list_empty(list)) {
                spin_lock(&hctx->lock);
-               list_splice(list, &hctx->dispatch);
+               list_splice_init(list, &hctx->dispatch);
                spin_unlock(&hctx->lock);
 
                /*
@@ -1378,7 +1383,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
        }
 
 insert:
-       blk_mq_sched_insert_request(rq, false, true, true);
+       blk_mq_sched_insert_request(rq, false, true, true, false);
 }
 
 /*
@@ -1389,7 +1394,7 @@ insert:
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
        const int is_sync = op_is_sync(bio->bi_opf);
-       const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
+       const int is_flush_fua = op_is_flush(bio->bi_opf);
        struct blk_mq_alloc_data data = { .flags = 0 };
        struct request *rq;
        unsigned int request_count = 0, srcu_idx;
@@ -1429,10 +1434,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
+               blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               goto run_queue;
+               blk_mq_run_hw_queue(data.hctx, true);
+               goto done;
        }
 
        plug = current->plug;
@@ -1485,7 +1492,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
-                                               !is_sync || is_flush_fua);
+                                               !is_sync || is_flush_fua, true);
                goto done;
        }
        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1495,7 +1502,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
-run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }
        blk_mq_put_ctx(data.ctx);
@@ -1510,7 +1516,7 @@ done:
 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 {
        const int is_sync = op_is_sync(bio->bi_opf);
-       const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
+       const int is_flush_fua = op_is_flush(bio->bi_opf);
        struct blk_plug *plug;
        unsigned int request_count = 0;
        struct blk_mq_alloc_data data = { .flags = 0 };
@@ -1551,10 +1557,12 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
+               blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               goto run_queue;
+               blk_mq_run_hw_queue(data.hctx, true);
+               goto done;
        }
 
        /*
@@ -1595,7 +1603,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
-                                               !is_sync || is_flush_fua);
+                                               !is_sync || is_flush_fua, true);
                goto done;
        }
        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1605,7 +1613,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
-run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }
 
@@ -2809,8 +2816,6 @@ void blk_mq_enable_hotplug(void)
 
 static int __init blk_mq_init(void)
 {
-       blk_mq_debugfs_init();
-
        cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
                                blk_mq_hctx_notify_dead);