+/*
+ * Block multiqueue core code
+ *
+ * Copyright (C) 2013-2014 Jens Axboe
+ * Copyright (C) 2013-2014 Christoph Hellwig
+ */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx,
- gfp_t gfp, bool reserved)
-{
- struct request *rq;
- unsigned int tag;
-
- tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
- if (tag != BLK_MQ_TAG_FAIL) {
- rq = hctx->tags->rqs[tag];
-
- rq->cmd_flags = 0;
- if (blk_mq_tag_busy(hctx)) {
- rq->cmd_flags = REQ_MQ_INFLIGHT;
- atomic_inc(&hctx->nr_active);
- }
-
- rq->tag = tag;
- return rq;
- }
-
- return NULL;
-}
-
static int blk_mq_queue_enter(struct request_queue *q)
{
int ret;
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
-static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
- int rw, gfp_t gfp,
- bool reserved)
+static struct request *
+__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
{
struct request *rq;
+ unsigned int tag;
- do {
- struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
- struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT,
- reserved);
- if (rq) {
- blk_mq_rq_ctx_init(q, ctx, rq, rw);
- break;
- }
+ tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
+ if (tag != BLK_MQ_TAG_FAIL) {
+ rq = hctx->tags->rqs[tag];
- if (gfp & __GFP_WAIT) {
- __blk_mq_run_hw_queue(hctx);
- blk_mq_put_ctx(ctx);
- } else {
- blk_mq_put_ctx(ctx);
- break;
+ rq->cmd_flags = 0;
+ if (blk_mq_tag_busy(hctx)) {
+ rq->cmd_flags = REQ_MQ_INFLIGHT;
+ atomic_inc(&hctx->nr_active);
}
- blk_mq_wait_for_tags(hctx, reserved);
- } while (1);
+ rq->tag = tag;
+ blk_mq_rq_ctx_init(q, ctx, rq, rw);
+ return rq;
+ }
- return rq;
+ return NULL;
}
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
+ bool reserved)
{
+ struct blk_mq_ctx *ctx;
+ struct blk_mq_hw_ctx *hctx;
struct request *rq;
if (blk_mq_queue_enter(q))
return NULL;
- rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
- if (rq)
- blk_mq_put_ctx(rq->mq_ctx);
- return rq;
-}
-EXPORT_SYMBOL(blk_mq_alloc_request);
-
-struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
- gfp_t gfp)
-{
- struct request *rq;
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
- if (blk_mq_queue_enter(q))
- return NULL;
+ rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
+ reserved);
+ if (!rq && (gfp & __GFP_WAIT)) {
+ __blk_mq_run_hw_queue(hctx);
+ blk_mq_put_ctx(ctx);
- rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
- if (rq)
- blk_mq_put_ctx(rq->mq_ctx);
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved);
+ }
+ blk_mq_put_ctx(ctx);
return rq;
}
-EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
+EXPORT_SYMBOL(blk_mq_alloc_request);
static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx, struct request *rq)
**/
void blk_mq_complete_request(struct request *rq)
{
- if (unlikely(blk_should_fake_timeout(rq->q)))
+ struct request_queue *q = rq->q;
+
+ if (unlikely(blk_should_fake_timeout(q)))
return;
- if (!blk_mark_rq_complete(rq))
- __blk_mq_complete_request(rq);
+ if (!blk_mark_rq_complete(rq)) {
+ if (q->softirq_done_fn)
+ __blk_mq_complete_request(rq);
+ else
+ blk_mq_end_io(rq, rq->errors);
+ }
}
EXPORT_SYMBOL(blk_mq_complete_request);
blk_clear_rq_complete(rq);
BUG_ON(blk_queued_rq(rq));
- blk_mq_insert_request(rq, true, true, false);
+ blk_mq_add_to_requeue_list(rq, true);
}
EXPORT_SYMBOL(blk_mq_requeue_request);
+static void blk_mq_requeue_work(struct work_struct *work)
+{
+ struct request_queue *q =
+ container_of(work, struct request_queue, requeue_work);
+ LIST_HEAD(rq_list);
+ struct request *rq, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->requeue_lock, flags);
+ list_splice_init(&q->requeue_list, &rq_list);
+ spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+ list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
+ if (!(rq->cmd_flags & REQ_SOFTBARRIER))
+ continue;
+
+ rq->cmd_flags &= ~REQ_SOFTBARRIER;
+ list_del_init(&rq->queuelist);
+ blk_mq_insert_request(rq, true, false, false);
+ }
+
+ while (!list_empty(&rq_list)) {
+ rq = list_entry(rq_list.next, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ blk_mq_insert_request(rq, false, false, false);
+ }
+
+ blk_mq_run_queues(q, false);
+}
+
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ /*
+ * We abuse this flag that is otherwise used by the I/O scheduler to
+ * request head insertation from the workqueue.
+ */
+ BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
+
+ spin_lock_irqsave(&q->requeue_lock, flags);
+ if (at_head) {
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ list_add(&rq->queuelist, &q->requeue_list);
+ } else {
+ list_add_tail(&rq->queuelist, &q->requeue_list);
+ }
+ spin_unlock_irqrestore(&q->requeue_lock, flags);
+}
+EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
+
+void blk_mq_kick_requeue_list(struct request_queue *q)
+{
+ kblockd_schedule_work(&q->requeue_work);
+}
+EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{
return tags->rqs[tag];
rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
- rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false);
- if (likely(rq))
- blk_mq_rq_ctx_init(q, ctx, rq, rw);
- else {
+ rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
+ if (unlikely(!rq)) {
+ __blk_mq_run_hw_queue(hctx);
blk_mq_put_ctx(ctx);
trace_block_sleeprq(q, bio, rw);
- rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
- false);
- ctx = rq->mq_ctx;
+
+ ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
+ __GFP_WAIT|GFP_ATOMIC, false);
}
hctx->queued++;
}
EXPORT_SYMBOL(blk_mq_map_queue);
-struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
- unsigned int hctx_index)
-{
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
- set->numa_node);
-}
-EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
-
-void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_index)
-{
- kfree(hctx);
-}
-EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
-
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
queue_for_each_hw_ctx(q, hctx, i) {
free_cpumask_var(hctx->cpumask);
- set->ops->free_hctx(hctx, i);
+ kfree(hctx);
}
}
struct blk_mq_hw_ctx **hctxs;
struct blk_mq_ctx *ctx;
struct request_queue *q;
+ unsigned int *map;
int i;
ctx = alloc_percpu(struct blk_mq_ctx);
if (!hctxs)
goto err_percpu;
+ map = blk_mq_make_queue_map(set);
+ if (!map)
+ goto err_map;
+
for (i = 0; i < set->nr_hw_queues; i++) {
- hctxs[i] = set->ops->alloc_hctx(set, i);
+ int node = blk_mq_hw_queue_to_node(map, i);
+
+ hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
+ GFP_KERNEL, node);
if (!hctxs[i])
goto err_hctxs;
goto err_hctxs;
atomic_set(&hctxs[i]->nr_active, 0);
- hctxs[i]->numa_node = NUMA_NO_NODE;
+ hctxs[i]->numa_node = node;
hctxs[i]->queue_num = i;
}
if (percpu_counter_init(&q->mq_usage_counter, 0))
goto err_map;
- q->mq_map = blk_mq_make_queue_map(set);
- if (!q->mq_map)
- goto err_map;
-
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, 30000);
q->nr_queues = nr_cpu_ids;
q->nr_hw_queues = set->nr_hw_queues;
+ q->mq_map = map;
q->queue_ctx = ctx;
q->queue_hw_ctx = hctxs;
q->sg_reserved_size = INT_MAX;
+ INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
+ INIT_LIST_HEAD(&q->requeue_list);
+ spin_lock_init(&q->requeue_lock);
+
if (q->nr_hw_queues > 1)
blk_queue_make_request(q, blk_mq_make_request);
else
err_flush_rq:
kfree(q->flush_rq);
err_hw:
- kfree(q->mq_map);
-err_map:
blk_cleanup_queue(q);
err_hctxs:
+ kfree(map);
for (i = 0; i < set->nr_hw_queues; i++) {
if (!hctxs[i])
break;
free_cpumask_var(hctxs[i]->cpumask);
- set->ops->free_hctx(hctxs[i], i);
+ kfree(hctxs[i]);
}
+err_map:
kfree(hctxs);
err_percpu:
free_percpu(ctx);
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->nr_hw_queues ||
- !set->ops->queue_rq || !set->ops->map_queue ||
- !set->ops->alloc_hctx || !set->ops->free_hctx)
+ if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
return -EINVAL;