]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
blk-mq-sched: remove unused 'can_block' arg from blk_mq_sched_insert_request
authorMike Snitzer <snitzer@redhat.com>
Wed, 27 Nov 2019 20:18:11 +0000 (17:18 -0300)
committerMarcelo Henrique Cerri <marcelo.cerri@canonical.com>
Fri, 17 Jan 2020 17:23:12 +0000 (14:23 -0300)
BugLink: https://bugs.launchpad.net/bugs/1848739
After commit:

923218f6166a ("blk-mq: don't allocate driver tag upfront for flush rq")

we no longer use the 'can_block' argument in
blk_mq_sched_insert_request(). Kill it.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Added actual commit message as to why it's being removed.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
(cherry picked from commit 9e97d2951a7e6ee6e204f87f6bda4ff754a8cede)
[marcelo.cerri@canonical.com: fixed conflict in blk_mq_requeue_work()
 because the commit aef1897cd36d ("blk-mq: insert rq with DONTPREP to
 hctx dispatch list when requeue") was already applied]
Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
block/blk-exec.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c

index 5c0f3dc446dc7caa1c0cef086740744c1eaafea9..f7b292f1244960b8ed74f199e9150d12517c1b7b 100644 (file)
@@ -61,7 +61,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
         * be reused after dying flag is set
         */
        if (q->mq_ops) {
-               blk_mq_sched_insert_request(rq, at_head, true, false, false);
+               blk_mq_sched_insert_request(rq, at_head, true, false);
                return;
        }
 
index fc64558241c90eb636866b744421514f4d5af8f7..f3380331e5f3b697bebc1c3e957ee0fa32ee77ed 100644 (file)
@@ -429,7 +429,7 @@ done:
 }
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
-                                bool run_queue, bool async, bool can_block)
+                                bool run_queue, bool async)
 {
        struct request_queue *q = rq->q;
        struct elevator_queue *e = q->elevator;
index ba1d1418a96dbbd1a394e07d06f7847b4df2c8fe..1e9c9018ace127977acdb67e41fefc20955b1991 100644 (file)
@@ -18,7 +18,7 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
-                                bool run_queue, bool async, bool can_block);
+                                bool run_queue, bool async);
 void blk_mq_sched_insert_requests(struct request_queue *q,
                                  struct blk_mq_ctx *ctx,
                                  struct list_head *list, bool run_queue_async);
index d417804b7f8ffa4959331c8c12c9a07b43188ec1..9a4c6049bbca4ded34774a3f91be46c4f4cfdbb1 100644 (file)
@@ -736,13 +736,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
                if (rq->rq_flags & RQF_DONTPREP)
                        blk_mq_request_bypass_insert(rq, false);
                else
-                       blk_mq_sched_insert_request(rq, true, false, false, true);
+                       blk_mq_sched_insert_request(rq, true, false, false);
        }
 
        while (!list_empty(&rq_list)) {
                rq = list_entry(rq_list.next, struct request, queuelist);
                list_del_init(&rq->queuelist);
-               blk_mq_sched_insert_request(rq, false, false, false, true);
+               blk_mq_sched_insert_request(rq, false, false, false);
        }
 
        blk_mq_run_hw_queues(q, false);
@@ -1732,13 +1732,11 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
-static void __blk_mq_fallback_to_insert(struct blk_mq_hw_ctx *hctx,
-                                       struct request *rq,
+static void __blk_mq_fallback_to_insert(struct request *rq,
                                        bool run_queue, bool bypass_insert)
 {
        if (!bypass_insert)
-               blk_mq_sched_insert_request(rq, false, run_queue, false,
-                                           hctx->flags & BLK_MQ_F_BLOCKING);
+               blk_mq_sched_insert_request(rq, false, run_queue, false);
        else
                blk_mq_request_bypass_insert(rq, run_queue);
 }
@@ -1770,7 +1768,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
        return __blk_mq_issue_directly(hctx, rq, cookie);
 insert:
-       __blk_mq_fallback_to_insert(hctx, rq, run_queue, bypass_insert);
+       __blk_mq_fallback_to_insert(rq, run_queue, bypass_insert);
        if (bypass_insert)
                return BLK_STS_RESOURCE;
 
@@ -1789,7 +1787,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
        ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
        if (ret == BLK_STS_RESOURCE)
-               __blk_mq_fallback_to_insert(hctx, rq, true, false);
+               __blk_mq_fallback_to_insert(rq, true, false);
        else if (ret != BLK_STS_OK)
                blk_mq_end_request(rq, ret);
 
@@ -1919,7 +1917,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        } else if (q->elevator) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_sched_insert_request(rq, false, true, true, true);
+               blk_mq_sched_insert_request(rq, false, true, true);
        } else {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);