static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
- unsigned int cpu)
-{
- return per_cpu_ptr(q->queue_ctx, cpu);
-}
-
-/*
- * This assumes per-cpu software queueing queues. They could be per-node
- * as well, for instance. For now this is hardcoded as-is. Note that we don't
- * care about preemption, since we know the ctx's are persistent. This does
- * mean that we can't rely on ctx always matching the currently running CPU.
- */
-static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
-{
- return __blk_mq_get_ctx(q, get_cpu());
-}
-
-static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
-{
- put_cpu();
-}
-
/*
* Check if any of the ctx's have pending work in this hardware queue
*/
rq->q->softirq_done_fn(rq);
}
-void __blk_mq_complete_request(struct request *rq)
+static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
bool shared = false;
put_cpu();
}
+void __blk_mq_complete_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (!q->softirq_done_fn)
+ blk_mq_end_io(rq, rq->errors);
+ else
+ blk_mq_ipi_complete_request(rq);
+}
+
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
if (unlikely(blk_should_fake_timeout(q)))
return;
- if (!blk_mark_rq_complete(rq)) {
- if (q->softirq_done_fn)
- __blk_mq_complete_request(rq);
- else
- blk_mq_end_io(rq, rq->errors);
- }
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
}
EXPORT_SYMBOL(blk_mq_complete_request);
blk_mq_bio_to_request(rq, bio);
blk_mq_start_request(rq, true);
+ blk_add_timer(rq);
/*
* For OK queue, we are done. For error, kill it. Any other
{
blk_mq_freeze_queue(q);
+ blk_mq_sysfs_unregister(q);
+
blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
/*
blk_mq_map_swqueue(q);
+ blk_mq_sysfs_register(q);
+
blk_mq_unfreeze_queue(q);
}