]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
blk-cgroup: don't quiesce the queue on policy activate/deactivate
authorJens Axboe <axboe@fb.com>
Wed, 18 Jan 2017 22:37:27 +0000 (15:37 -0700)
committerJens Axboe <axboe@fb.com>
Wed, 18 Jan 2017 22:37:27 +0000 (15:37 -0700)
There's no potential harm in quiescing the queue, but it also doesn't
buy us anything. And we can't run the queue async for policy
deactivate, since we could be in the path of tearing the queue down.
If we schedule an async run of the queue at that time, we're racing
with queue teardown AFTER having we've already torn most of it down.

Reported-by: Omar Sandoval <osandov@fb.com>
Fixes: 4d199c6f1c84 ("blk-cgroup: ensure that we clear the stop bit on quiesced queues")
Tested-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-cgroup.c

index efb97ec37eee58022c106e04e318b4eae0457c17..fb59a3edc7784052e1c92a84826b6e23ad6efc23 100644 (file)
@@ -1223,10 +1223,9 @@ int blkcg_activate_policy(struct request_queue *q,
        if (blkcg_policy_enabled(q, pol))
                return 0;
 
-       if (q->mq_ops) {
+       if (q->mq_ops)
                blk_mq_freeze_queue(q);
-               blk_mq_quiesce_queue(q);
-       } else
+       else
                blk_queue_bypass_start(q);
 pd_prealloc:
        if (!pd_prealloc) {
@@ -1265,10 +1264,9 @@ pd_prealloc:
 
        spin_unlock_irq(q->queue_lock);
 out_bypass_end:
-       if (q->mq_ops) {
+       if (q->mq_ops)
                blk_mq_unfreeze_queue(q);
-               blk_mq_start_stopped_hw_queues(q, true);
-       } else
+       else
                blk_queue_bypass_end(q);
        if (pd_prealloc)
                pol->pd_free_fn(pd_prealloc);
@@ -1292,10 +1290,9 @@ void blkcg_deactivate_policy(struct request_queue *q,
        if (!blkcg_policy_enabled(q, pol))
                return;
 
-       if (q->mq_ops) {
+       if (q->mq_ops)
                blk_mq_freeze_queue(q);
-               blk_mq_quiesce_queue(q);
-       } else
+       else
                blk_queue_bypass_start(q);
 
        spin_lock_irq(q->queue_lock);
@@ -1318,10 +1315,9 @@ void blkcg_deactivate_policy(struct request_queue *q,
 
        spin_unlock_irq(q->queue_lock);
 
-       if (q->mq_ops) {
+       if (q->mq_ops)
                blk_mq_unfreeze_queue(q);
-               blk_mq_start_stopped_hw_queues(q, true);
-       } else
+       else
                blk_queue_bypass_end(q);
 }
 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);