]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
blk-mq: only apply active queue tag throttling for driver tags
authorJens Axboe <axboe@fb.com>
Wed, 25 Jan 2017 15:11:38 +0000 (08:11 -0700)
committerJens Axboe <axboe@fb.com>
Wed, 25 Jan 2017 15:11:38 +0000 (08:11 -0700)
If we have a scheduler attached, we have two sets of tags. We don't
want to apply our active queue throttling for the scheduler side
of tags, that only applies to driver tags since that's the resource
we need to dispatch an IO.

Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq-tag.c
block/blk-mq.c

index a49ec77c415a9dcd92b4c471675fa2bc9ea2658c..1b156ca79af62f2f66d77f4611b4de23a619b89b 100644 (file)
@@ -90,9 +90,11 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
        return atomic_read(&hctx->nr_active) < depth;
 }
 
-static int __blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
+static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
+                           struct sbitmap_queue *bt)
 {
-       if (!hctx_may_queue(hctx, bt))
+       if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
+           !hctx_may_queue(data->hctx, bt))
                return -1;
        return __sbitmap_queue_get(bt);
 }
@@ -118,7 +120,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                tag_offset = tags->nr_reserved_tags;
        }
 
-       tag = __blk_mq_get_tag(data->hctx, bt);
+       tag = __blk_mq_get_tag(data, bt);
        if (tag != -1)
                goto found_tag;
 
@@ -129,7 +131,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
        do {
                prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
 
-               tag = __blk_mq_get_tag(data->hctx, bt);
+               tag = __blk_mq_get_tag(data, bt);
                if (tag != -1)
                        break;
 
@@ -144,7 +146,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                 * Retry tag allocation after running the hardware queue,
                 * as running the queue may also have found completions.
                 */
-               tag = __blk_mq_get_tag(data->hctx, bt);
+               tag = __blk_mq_get_tag(data, bt);
                if (tag != -1)
                        break;
 
index ee69e5e8976954958a93190b7b1b83d13d52413c..dcb567642db72c0ab01831ee23ba915c665ac82a 100644 (file)
@@ -230,15 +230,14 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
 
                rq = tags->static_rqs[tag];
 
-               if (blk_mq_tag_busy(data->hctx)) {
-                       rq->rq_flags = RQF_MQ_INFLIGHT;
-                       atomic_inc(&data->hctx->nr_active);
-               }
-
                if (data->flags & BLK_MQ_REQ_INTERNAL) {
                        rq->tag = -1;
                        rq->internal_tag = tag;
                } else {
+                       if (blk_mq_tag_busy(data->hctx)) {
+                               rq->rq_flags = RQF_MQ_INFLIGHT;
+                               atomic_inc(&data->hctx->nr_active);
+                       }
                        rq->tag = tag;
                        rq->internal_tag = -1;
                }
@@ -869,6 +868,10 @@ done:
 
        rq->tag = blk_mq_get_tag(&data);
        if (rq->tag >= 0) {
+               if (blk_mq_tag_busy(data.hctx)) {
+                       rq->rq_flags |= RQF_MQ_INFLIGHT;
+                       atomic_inc(&data.hctx->nr_active);
+               }
                data.hctx->tags->rqs[rq->tag] = rq;
                goto done;
        }