]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
block: Make q_usage_counter also track legacy requests
authorMing Lei <ming.lei@redhat.com>
Thu, 9 Nov 2017 18:49:53 +0000 (10:49 -0800)
committerJens Axboe <axboe@kernel.dk>
Sat, 11 Nov 2017 02:53:25 +0000 (19:53 -0700)
This patch makes it possible to pause request allocation for
the legacy block layer by calling blk_mq_freeze_queue() and
blk_mq_unfreeze_queue().

Signed-off-by: Ming Lei <ming.lei@redhat.com>
[ bvanassche: Combined two patches into one, edited a comment and made sure
  REQ_NOWAIT is handled properly in blk_old_get_request() ]
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Tested-by: Martin Steigerwald <martin@lichtvoll.de>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c

index 5e81dcf4690a6b964a57e6a693dbb65f0ac120da..a4362849059afdd0e726900965ae64dc6c3b4296 100644 (file)
@@ -612,6 +612,9 @@ void blk_set_queue_dying(struct request_queue *q)
                }
                spin_unlock_irq(q->queue_lock);
        }
+
+       /* Make blk_queue_enter() reexamine the DYING flag. */
+       wake_up_all(&q->mq_freeze_wq);
 }
 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
 
@@ -1398,16 +1401,22 @@ static struct request *blk_old_get_request(struct request_queue *q,
                                           unsigned int op, gfp_t gfp_mask)
 {
        struct request *rq;
+       int ret = 0;
 
        WARN_ON_ONCE(q->mq_ops);
 
        /* create ioc upfront */
        create_io_context(gfp_mask, q->node);
 
+       ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
+                             (op & REQ_NOWAIT));
+       if (ret)
+               return ERR_PTR(ret);
        spin_lock_irq(q->queue_lock);
        rq = get_request(q, op, NULL, gfp_mask);
        if (IS_ERR(rq)) {
                spin_unlock_irq(q->queue_lock);
+               blk_queue_exit(q);
                return rq;
        }
 
@@ -1579,6 +1588,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
                blk_free_request(rl, req);
                freed_request(rl, sync, rq_flags);
                blk_put_rl(rl);
+               blk_queue_exit(q);
        }
 }
 EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1860,8 +1870,10 @@ get_rq:
         * Grab a free request. This is might sleep but can not fail.
         * Returns with the queue unlocked.
         */
+       blk_queue_enter_live(q);
        req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
        if (IS_ERR(req)) {
+               blk_queue_exit(q);
                __wbt_done(q->rq_wb, wb_acct);
                if (PTR_ERR(req) == -ENOMEM)
                        bio->bi_status = BLK_STS_RESOURCE;
index fed8165973a38aacda6daea889a31efa3c261424..7173d4bd64afdbab45d8256ac22c1c85b2085ab4 100644 (file)
@@ -126,7 +126,8 @@ void blk_freeze_queue_start(struct request_queue *q)
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
                percpu_ref_kill(&q->q_usage_counter);
-               blk_mq_run_hw_queues(q, false);
+               if (q->mq_ops)
+                       blk_mq_run_hw_queues(q, false);
        }
 }
 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -256,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
        queue_for_each_hw_ctx(q, hctx, i)
                if (blk_mq_hw_queue_mapped(hctx))
                        blk_mq_tag_wakeup_all(hctx->tags, true);
-
-       /*
-        * If we are called because the queue has now been marked as
-        * dying, we need to ensure that processes currently waiting on
-        * the queue are notified as well.
-        */
-       wake_up_all(&q->mq_freeze_wq);
 }
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)