]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/blk-mq.c
blk-mq: mark blk_mq_rq_ctx_init static
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq.c
index f2224ffd225da8acb9b4775a19125f015cc6ab0a..e1d650804c8eee0dcad4483fed62bb4955a48a5b 100644 (file)
@@ -204,8 +204,8 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 }
 EXPORT_SYMBOL(blk_mq_can_queue);
 
-void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-                       struct request *rq, unsigned int op)
+static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
+               struct request *rq, unsigned int op)
 {
        INIT_LIST_HEAD(&rq->queuelist);
        /* csd/requeue_work/fifo_time is initialized before use */
@@ -243,7 +243,6 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 
        ctx->rq_dispatched[op_is_sync(op)]++;
 }
-EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
 
 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
                                       unsigned int op)
@@ -394,7 +393,7 @@ void blk_mq_free_request(struct request *rq)
 }
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
-inline void __blk_mq_end_request(struct request *rq, int error)
+inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
        blk_account_io_done(rq);
 
@@ -409,7 +408,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
 }
 EXPORT_SYMBOL(__blk_mq_end_request);
 
-void blk_mq_end_request(struct request *rq, int error)
+void blk_mq_end_request(struct request *rq, blk_status_t error)
 {
        if (blk_update_request(rq, error, blk_rq_bytes(rq)))
                BUG();
@@ -753,50 +752,6 @@ static void blk_mq_timeout_work(struct work_struct *work)
        blk_queue_exit(q);
 }
 
-/*
- * Reverse check our software queue for entries that we could potentially
- * merge with. Currently includes a hand-wavy stop count of 8, to not spend
- * too much time checking for merges.
- */
-static bool blk_mq_attempt_merge(struct request_queue *q,
-                                struct blk_mq_ctx *ctx, struct bio *bio)
-{
-       struct request *rq;
-       int checked = 8;
-
-       list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
-               bool merged = false;
-
-               if (!checked--)
-                       break;
-
-               if (!blk_rq_merge_ok(rq, bio))
-                       continue;
-
-               switch (blk_try_merge(rq, bio)) {
-               case ELEVATOR_BACK_MERGE:
-                       if (blk_mq_sched_allow_merge(q, rq, bio))
-                               merged = bio_attempt_back_merge(q, rq, bio);
-                       break;
-               case ELEVATOR_FRONT_MERGE:
-                       if (blk_mq_sched_allow_merge(q, rq, bio))
-                               merged = bio_attempt_front_merge(q, rq, bio);
-                       break;
-               case ELEVATOR_DISCARD_MERGE:
-                       merged = bio_attempt_discard_merge(q, rq, bio);
-                       break;
-               default:
-                       continue;
-               }
-
-               if (merged)
-                       ctx->rq_merged++;
-               return merged;
-       }
-
-       return false;
-}
-
 struct flush_busy_ctx_data {
        struct blk_mq_hw_ctx *hctx;
        struct list_head *list;
@@ -968,7 +923,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 {
        struct blk_mq_hw_ctx *hctx;
        struct request *rq;
-       int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
+       int errors, queued;
 
        if (list_empty(list))
                return false;
@@ -979,6 +934,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
        errors = queued = 0;
        do {
                struct blk_mq_queue_data bd;
+               blk_status_t ret;
 
                rq = list_first_entry(list, struct request, queuelist);
                if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
@@ -1019,25 +975,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
                }
 
                ret = q->mq_ops->queue_rq(hctx, &bd);
-               switch (ret) {
-               case BLK_MQ_RQ_QUEUE_OK:
-                       queued++;
-                       break;
-               case BLK_MQ_RQ_QUEUE_BUSY:
+               if (ret == BLK_STS_RESOURCE) {
                        blk_mq_put_driver_tag_hctx(hctx, rq);
                        list_add(&rq->queuelist, list);
                        __blk_mq_requeue_request(rq);
                        break;
-               default:
-                       pr_err("blk-mq: bad return on queue: %d\n", ret);
-               case BLK_MQ_RQ_QUEUE_ERROR:
+               }
+
+               if (unlikely(ret != BLK_STS_OK)) {
                        errors++;
-                       blk_mq_end_request(rq, -EIO);
-                       break;
+                       blk_mq_end_request(rq, BLK_STS_IOERR);
+                       continue;
                }
 
-               if (ret == BLK_MQ_RQ_QUEUE_BUSY)
-                       break;
+               queued++;
        } while (!list_empty(list));
 
        hctx->dispatched[queued_to_index(queued)]++;
@@ -1075,7 +1026,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
                 * - blk_mq_run_hw_queue() checks whether or not a queue has
                 *   been stopped before rerunning a queue.
                 * - Some but not all block drivers stop a queue before
-                *   returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
+                *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
                 *   and dm-rq.
                 */
                if (!blk_mq_sched_needs_restart(hctx) &&
@@ -1427,30 +1378,13 @@ static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
                !blk_queue_nomerges(hctx->queue);
 }
 
-static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
-                                        struct blk_mq_ctx *ctx,
-                                        struct request *rq, struct bio *bio)
+static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
+                                  struct blk_mq_ctx *ctx,
+                                  struct request *rq)
 {
-       if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
-               blk_mq_bio_to_request(rq, bio);
-               spin_lock(&ctx->lock);
-insert_rq:
-               __blk_mq_insert_request(hctx, rq, false);
-               spin_unlock(&ctx->lock);
-               return false;
-       } else {
-               struct request_queue *q = hctx->queue;
-
-               spin_lock(&ctx->lock);
-               if (!blk_mq_attempt_merge(q, ctx, bio)) {
-                       blk_mq_bio_to_request(rq, bio);
-                       goto insert_rq;
-               }
-
-               spin_unlock(&ctx->lock);
-               __blk_mq_finish_request(hctx, ctx, rq);
-               return true;
-       }
+       spin_lock(&ctx->lock);
+       __blk_mq_insert_request(hctx, rq, false);
+       spin_unlock(&ctx->lock);
 }
 
 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -1461,22 +1395,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
-                                     bool may_sleep)
+static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                       struct request *rq,
+                                       blk_qc_t *cookie, bool may_sleep)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_queue_data bd = {
                .rq = rq,
                .last = true,
        };
-       struct blk_mq_hw_ctx *hctx;
        blk_qc_t new_cookie;
-       int ret;
+       blk_status_t ret;
+       bool run_queue = true;
+
+       if (blk_mq_hctx_stopped(hctx)) {
+               run_queue = false;
+               goto insert;
+       }
 
        if (q->elevator)
                goto insert;
 
-       if (!blk_mq_get_driver_tag(rq, &hctx, false))
+       if (!blk_mq_get_driver_tag(rq, NULL, false))
                goto insert;
 
        new_cookie = request_to_qc_t(hctx, rq);
@@ -1487,20 +1427,21 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
         * would have done
         */
        ret = q->mq_ops->queue_rq(hctx, &bd);
-       if (ret == BLK_MQ_RQ_QUEUE_OK) {
+       switch (ret) {
+       case BLK_STS_OK:
                *cookie = new_cookie;
                return;
-       }
-
-       if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
+       case BLK_STS_RESOURCE:
+               __blk_mq_requeue_request(rq);
+               goto insert;
+       default:
                *cookie = BLK_QC_T_NONE;
-               blk_mq_end_request(rq, -EIO);
+               blk_mq_end_request(rq, ret);
                return;
        }
 
-       __blk_mq_requeue_request(rq);
 insert:
-       blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
+       blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
 }
 
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,7 +1449,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
        if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
                rcu_read_lock();
-               __blk_mq_try_issue_directly(rq, cookie, false);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, false);
                rcu_read_unlock();
        } else {
                unsigned int srcu_idx;
@@ -1516,7 +1457,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                might_sleep();
 
                srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
-               __blk_mq_try_issue_directly(rq, cookie, true);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, true);
                srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
        }
 }
@@ -1619,9 +1560,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                blk_mq_put_ctx(data.ctx);
 
-               if (same_queue_rq)
+               if (same_queue_rq) {
+                       data.hctx = blk_mq_map_queue(q,
+                                       same_queue_rq->mq_ctx->cpu);
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
+               }
        } else if (q->nr_hw_queues > 1 && is_sync) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
@@ -1630,11 +1574,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true, true, true);
-       } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+       } else {
                blk_mq_put_ctx(data.ctx);
+               blk_mq_bio_to_request(rq, bio);
+               blk_mq_queue_io(data.hctx, data.ctx, rq);
                blk_mq_run_hw_queue(data.hctx, true);
-       } else
-               blk_mq_put_ctx(data.ctx);
+       }
 
        return cookie;
 }
@@ -2641,7 +2586,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        return ret;
 }
 
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+                                                       int nr_hw_queues)
 {
        struct request_queue *q;
 
@@ -2665,6 +2611,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
 }
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+       mutex_lock(&set->tag_list_lock);
+       __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+       mutex_unlock(&set->tag_list_lock);
+}
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
 /* Enable polling stats and return whether they were already enabled. */