2 * blk-mq scheduling framework
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/blk-mq.h>
10 #include <trace/events/block.h>
14 #include "blk-mq-debugfs.h"
15 #include "blk-mq-sched.h"
16 #include "blk-mq-tag.h"
19 void blk_mq_sched_free_hctx_data(struct request_queue
*q
,
20 void (*exit
)(struct blk_mq_hw_ctx
*))
22 struct blk_mq_hw_ctx
*hctx
;
25 queue_for_each_hw_ctx(q
, hctx
, i
) {
26 if (exit
&& hctx
->sched_data
)
28 kfree(hctx
->sched_data
);
29 hctx
->sched_data
= NULL
;
32 EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data
);
34 void blk_mq_sched_assign_ioc(struct request
*rq
, struct bio
*bio
)
36 struct request_queue
*q
= rq
->q
;
37 struct io_context
*ioc
= rq_ioc(bio
);
40 spin_lock_irq(q
->queue_lock
);
41 icq
= ioc_lookup_icq(ioc
, q
);
42 spin_unlock_irq(q
->queue_lock
);
45 icq
= ioc_create_icq(ioc
, q
, GFP_ATOMIC
);
49 get_io_context(icq
->ioc
);
54 * Mark a hardware queue as needing a restart. For shared queues, maintain
55 * a count of how many hardware queues are marked for restart.
57 static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx
*hctx
)
59 if (test_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
))
62 if (hctx
->flags
& BLK_MQ_F_TAG_SHARED
) {
63 struct request_queue
*q
= hctx
->queue
;
65 if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
))
66 atomic_inc(&q
->shared_hctx_restart
);
68 set_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
);
71 void blk_mq_sched_restart(struct blk_mq_hw_ctx
*hctx
)
73 if (!test_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
))
76 clear_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
);
78 if (blk_mq_hctx_has_pending(hctx
)) {
79 blk_mq_run_hw_queue(hctx
, true);
84 /* return true if hctx need to run again */
85 static bool blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx
*hctx
)
87 struct request_queue
*q
= hctx
->queue
;
88 struct elevator_queue
*e
= q
->elevator
;
95 if (e
->type
->ops
.mq
.has_work
&&
96 !e
->type
->ops
.mq
.has_work(hctx
))
99 ret
= blk_mq_get_dispatch_budget(hctx
);
100 if (ret
== BLK_STS_RESOURCE
)
103 rq
= e
->type
->ops
.mq
.dispatch_request(hctx
);
105 blk_mq_put_dispatch_budget(hctx
);
107 } else if (ret
!= BLK_STS_OK
) {
108 blk_mq_end_request(rq
, ret
);
113 * Now this rq owns the budget which has to be released
114 * if this rq won't be queued to driver via .queue_rq()
115 * in blk_mq_dispatch_rq_list().
117 list_add(&rq
->queuelist
, &rq_list
);
118 } while (blk_mq_dispatch_rq_list(q
, &rq_list
, true));
123 static struct blk_mq_ctx
*blk_mq_next_ctx(struct blk_mq_hw_ctx
*hctx
,
124 struct blk_mq_ctx
*ctx
)
126 unsigned idx
= ctx
->index_hw
;
128 if (++idx
== hctx
->nr_ctx
)
131 return hctx
->ctxs
[idx
];
134 /* return true if hctx need to run again */
135 static bool blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx
*hctx
)
137 struct request_queue
*q
= hctx
->queue
;
139 struct blk_mq_ctx
*ctx
= READ_ONCE(hctx
->dispatch_from
);
145 if (!sbitmap_any_bit_set(&hctx
->ctx_map
))
148 ret
= blk_mq_get_dispatch_budget(hctx
);
149 if (ret
== BLK_STS_RESOURCE
)
152 rq
= blk_mq_dequeue_from_ctx(hctx
, ctx
);
154 blk_mq_put_dispatch_budget(hctx
);
156 } else if (ret
!= BLK_STS_OK
) {
157 blk_mq_end_request(rq
, ret
);
162 * Now this rq owns the budget which has to be released
163 * if this rq won't be queued to driver via .queue_rq()
164 * in blk_mq_dispatch_rq_list().
166 list_add(&rq
->queuelist
, &rq_list
);
168 /* round robin for fair dispatch */
169 ctx
= blk_mq_next_ctx(hctx
, rq
->mq_ctx
);
171 } while (blk_mq_dispatch_rq_list(q
, &rq_list
, true));
173 WRITE_ONCE(hctx
->dispatch_from
, ctx
);
178 /* return true if hw queue need to be run again */
179 bool blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx
*hctx
)
181 struct request_queue
*q
= hctx
->queue
;
182 struct elevator_queue
*e
= q
->elevator
;
183 const bool has_sched_dispatch
= e
&& e
->type
->ops
.mq
.dispatch_request
;
185 bool run_queue
= false;
187 /* RCU or SRCU read lock is needed before checking quiesced flag */
188 if (unlikely(blk_mq_hctx_stopped(hctx
) || blk_queue_quiesced(q
)))
194 * If we have previous entries on our dispatch list, grab them first for
195 * more fair dispatch.
197 if (!list_empty_careful(&hctx
->dispatch
)) {
198 spin_lock(&hctx
->lock
);
199 if (!list_empty(&hctx
->dispatch
))
200 list_splice_init(&hctx
->dispatch
, &rq_list
);
201 spin_unlock(&hctx
->lock
);
205 * Only ask the scheduler for requests, if we didn't have residual
206 * requests from the dispatch list. This is to avoid the case where
207 * we only ever dispatch a fraction of the requests available because
208 * of low device queue depth. Once we pull requests out of the IO
209 * scheduler, we can no longer merge or sort them. So it's best to
210 * leave them there for as long as we can. Mark the hw queue as
211 * needing a restart in that case.
213 * We want to dispatch from the scheduler if there was nothing
214 * on the dispatch list or we were able to dispatch from the
217 if (!list_empty(&rq_list
)) {
218 blk_mq_sched_mark_restart_hctx(hctx
);
219 if (blk_mq_dispatch_rq_list(q
, &rq_list
, false)) {
220 if (has_sched_dispatch
)
221 run_queue
= blk_mq_do_dispatch_sched(hctx
);
223 run_queue
= blk_mq_do_dispatch_ctx(hctx
);
225 } else if (has_sched_dispatch
) {
226 run_queue
= blk_mq_do_dispatch_sched(hctx
);
227 } else if (q
->mq_ops
->get_budget
) {
229 * If we need to get budget before queuing request, we
230 * dequeue request one by one from sw queue for avoiding
231 * to mess up I/O merge when dispatch runs out of resource.
233 * TODO: get more budgets, and dequeue more requests in
236 run_queue
= blk_mq_do_dispatch_ctx(hctx
);
238 blk_mq_flush_busy_ctxs(hctx
, &rq_list
);
239 blk_mq_dispatch_rq_list(q
, &rq_list
, false);
242 if (run_queue
&& !blk_mq_sched_needs_restart(hctx
) &&
243 !test_bit(BLK_MQ_S_TAG_WAITING
, &hctx
->state
)) {
244 blk_mq_sched_mark_restart_hctx(hctx
);
251 bool blk_mq_sched_try_merge(struct request_queue
*q
, struct bio
*bio
,
252 struct request
**merged_request
)
256 switch (elv_merge(q
, &rq
, bio
)) {
257 case ELEVATOR_BACK_MERGE
:
258 if (!blk_mq_sched_allow_merge(q
, rq
, bio
))
260 if (!bio_attempt_back_merge(q
, rq
, bio
))
262 *merged_request
= attempt_back_merge(q
, rq
);
263 if (!*merged_request
)
264 elv_merged_request(q
, rq
, ELEVATOR_BACK_MERGE
);
266 case ELEVATOR_FRONT_MERGE
:
267 if (!blk_mq_sched_allow_merge(q
, rq
, bio
))
269 if (!bio_attempt_front_merge(q
, rq
, bio
))
271 *merged_request
= attempt_front_merge(q
, rq
);
272 if (!*merged_request
)
273 elv_merged_request(q
, rq
, ELEVATOR_FRONT_MERGE
);
279 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge
);
282 * Reverse check our software queue for entries that we could potentially
283 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
284 * too much time checking for merges.
286 static bool blk_mq_attempt_merge(struct request_queue
*q
,
287 struct blk_mq_ctx
*ctx
, struct bio
*bio
)
292 lockdep_assert_held(&ctx
->lock
);
294 list_for_each_entry_reverse(rq
, &ctx
->rq_list
, queuelist
) {
300 if (!blk_rq_merge_ok(rq
, bio
))
303 switch (blk_try_merge(rq
, bio
)) {
304 case ELEVATOR_BACK_MERGE
:
305 if (blk_mq_sched_allow_merge(q
, rq
, bio
))
306 merged
= bio_attempt_back_merge(q
, rq
, bio
);
308 case ELEVATOR_FRONT_MERGE
:
309 if (blk_mq_sched_allow_merge(q
, rq
, bio
))
310 merged
= bio_attempt_front_merge(q
, rq
, bio
);
312 case ELEVATOR_DISCARD_MERGE
:
313 merged
= bio_attempt_discard_merge(q
, rq
, bio
);
327 bool __blk_mq_sched_bio_merge(struct request_queue
*q
, struct bio
*bio
)
329 struct elevator_queue
*e
= q
->elevator
;
330 struct blk_mq_ctx
*ctx
= blk_mq_get_ctx(q
);
331 struct blk_mq_hw_ctx
*hctx
= blk_mq_map_queue(q
, ctx
->cpu
);
334 if (e
&& e
->type
->ops
.mq
.bio_merge
) {
336 return e
->type
->ops
.mq
.bio_merge(hctx
, bio
);
339 if (hctx
->flags
& BLK_MQ_F_SHOULD_MERGE
) {
340 /* default per sw-queue merge */
341 spin_lock(&ctx
->lock
);
342 ret
= blk_mq_attempt_merge(q
, ctx
, bio
);
343 spin_unlock(&ctx
->lock
);
350 bool blk_mq_sched_try_insert_merge(struct request_queue
*q
, struct request
*rq
)
352 return rq_mergeable(rq
) && elv_attempt_insert_merge(q
, rq
);
354 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge
);
356 void blk_mq_sched_request_inserted(struct request
*rq
)
358 trace_block_rq_insert(rq
->q
, rq
);
360 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted
);
362 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx
*hctx
,
366 rq
->rq_flags
|= RQF_SORTED
;
371 * If we already have a real request tag, send directly to
374 spin_lock(&hctx
->lock
);
375 list_add(&rq
->queuelist
, &hctx
->dispatch
);
376 spin_unlock(&hctx
->lock
);
381 * Add flush/fua to the queue. If we fail getting a driver tag, then
382 * punt to the requeue list. Requeue will re-invoke us from a context
383 * that's safe to block from.
385 static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx
*hctx
,
386 struct request
*rq
, bool can_block
)
388 if (blk_mq_get_driver_tag(rq
, &hctx
, can_block
)) {
389 blk_insert_flush(rq
);
390 blk_mq_run_hw_queue(hctx
, true);
392 blk_mq_add_to_requeue_list(rq
, false, true);
395 void blk_mq_sched_insert_request(struct request
*rq
, bool at_head
,
396 bool run_queue
, bool async
, bool can_block
)
398 struct request_queue
*q
= rq
->q
;
399 struct elevator_queue
*e
= q
->elevator
;
400 struct blk_mq_ctx
*ctx
= rq
->mq_ctx
;
401 struct blk_mq_hw_ctx
*hctx
= blk_mq_map_queue(q
, ctx
->cpu
);
403 if (rq
->tag
== -1 && op_is_flush(rq
->cmd_flags
)) {
404 blk_mq_sched_insert_flush(hctx
, rq
, can_block
);
408 if (e
&& blk_mq_sched_bypass_insert(hctx
, rq
))
411 if (e
&& e
->type
->ops
.mq
.insert_requests
) {
414 list_add(&rq
->queuelist
, &list
);
415 e
->type
->ops
.mq
.insert_requests(hctx
, &list
, at_head
);
417 spin_lock(&ctx
->lock
);
418 __blk_mq_insert_request(hctx
, rq
, at_head
);
419 spin_unlock(&ctx
->lock
);
424 blk_mq_run_hw_queue(hctx
, async
);
427 void blk_mq_sched_insert_requests(struct request_queue
*q
,
428 struct blk_mq_ctx
*ctx
,
429 struct list_head
*list
, bool run_queue_async
)
431 struct blk_mq_hw_ctx
*hctx
= blk_mq_map_queue(q
, ctx
->cpu
);
432 struct elevator_queue
*e
= hctx
->queue
->elevator
;
435 struct request
*rq
, *next
;
438 * We bypass requests that already have a driver tag assigned,
439 * which should only be flushes. Flushes are only ever inserted
440 * as single requests, so we shouldn't ever hit the
441 * WARN_ON_ONCE() below (but let's handle it just in case).
443 list_for_each_entry_safe(rq
, next
, list
, queuelist
) {
444 if (WARN_ON_ONCE(rq
->tag
!= -1)) {
445 list_del_init(&rq
->queuelist
);
446 blk_mq_sched_bypass_insert(hctx
, rq
);
451 if (e
&& e
->type
->ops
.mq
.insert_requests
)
452 e
->type
->ops
.mq
.insert_requests(hctx
, list
, false);
454 blk_mq_insert_requests(hctx
, ctx
, list
);
456 blk_mq_run_hw_queue(hctx
, run_queue_async
);
459 static void blk_mq_sched_free_tags(struct blk_mq_tag_set
*set
,
460 struct blk_mq_hw_ctx
*hctx
,
461 unsigned int hctx_idx
)
463 if (hctx
->sched_tags
) {
464 blk_mq_free_rqs(set
, hctx
->sched_tags
, hctx_idx
);
465 blk_mq_free_rq_map(hctx
->sched_tags
);
466 hctx
->sched_tags
= NULL
;
470 static int blk_mq_sched_alloc_tags(struct request_queue
*q
,
471 struct blk_mq_hw_ctx
*hctx
,
472 unsigned int hctx_idx
)
474 struct blk_mq_tag_set
*set
= q
->tag_set
;
477 hctx
->sched_tags
= blk_mq_alloc_rq_map(set
, hctx_idx
, q
->nr_requests
,
479 if (!hctx
->sched_tags
)
482 ret
= blk_mq_alloc_rqs(set
, hctx
->sched_tags
, hctx_idx
, q
->nr_requests
);
484 blk_mq_sched_free_tags(set
, hctx
, hctx_idx
);
489 static void blk_mq_sched_tags_teardown(struct request_queue
*q
)
491 struct blk_mq_tag_set
*set
= q
->tag_set
;
492 struct blk_mq_hw_ctx
*hctx
;
495 queue_for_each_hw_ctx(q
, hctx
, i
)
496 blk_mq_sched_free_tags(set
, hctx
, i
);
499 int blk_mq_sched_init_hctx(struct request_queue
*q
, struct blk_mq_hw_ctx
*hctx
,
500 unsigned int hctx_idx
)
502 struct elevator_queue
*e
= q
->elevator
;
508 ret
= blk_mq_sched_alloc_tags(q
, hctx
, hctx_idx
);
512 if (e
->type
->ops
.mq
.init_hctx
) {
513 ret
= e
->type
->ops
.mq
.init_hctx(hctx
, hctx_idx
);
515 blk_mq_sched_free_tags(q
->tag_set
, hctx
, hctx_idx
);
520 blk_mq_debugfs_register_sched_hctx(q
, hctx
);
525 void blk_mq_sched_exit_hctx(struct request_queue
*q
, struct blk_mq_hw_ctx
*hctx
,
526 unsigned int hctx_idx
)
528 struct elevator_queue
*e
= q
->elevator
;
533 blk_mq_debugfs_unregister_sched_hctx(hctx
);
535 if (e
->type
->ops
.mq
.exit_hctx
&& hctx
->sched_data
) {
536 e
->type
->ops
.mq
.exit_hctx(hctx
, hctx_idx
);
537 hctx
->sched_data
= NULL
;
540 blk_mq_sched_free_tags(q
->tag_set
, hctx
, hctx_idx
);
543 int blk_mq_init_sched(struct request_queue
*q
, struct elevator_type
*e
)
545 struct blk_mq_hw_ctx
*hctx
;
546 struct elevator_queue
*eq
;
556 * Default to double of smaller one between hw queue_depth and 128,
557 * since we don't split into sync/async like the old code did.
558 * Additionally, this is a per-hw queue depth.
560 q
->nr_requests
= 2 * min_t(unsigned int, q
->tag_set
->queue_depth
,
563 queue_for_each_hw_ctx(q
, hctx
, i
) {
564 ret
= blk_mq_sched_alloc_tags(q
, hctx
, i
);
569 ret
= e
->ops
.mq
.init_sched(q
, e
);
573 blk_mq_debugfs_register_sched(q
);
575 queue_for_each_hw_ctx(q
, hctx
, i
) {
576 if (e
->ops
.mq
.init_hctx
) {
577 ret
= e
->ops
.mq
.init_hctx(hctx
, i
);
580 blk_mq_exit_sched(q
, eq
);
581 kobject_put(&eq
->kobj
);
585 blk_mq_debugfs_register_sched_hctx(q
, hctx
);
591 blk_mq_sched_tags_teardown(q
);
596 void blk_mq_exit_sched(struct request_queue
*q
, struct elevator_queue
*e
)
598 struct blk_mq_hw_ctx
*hctx
;
601 queue_for_each_hw_ctx(q
, hctx
, i
) {
602 blk_mq_debugfs_unregister_sched_hctx(hctx
);
603 if (e
->type
->ops
.mq
.exit_hctx
&& hctx
->sched_data
) {
604 e
->type
->ops
.mq
.exit_hctx(hctx
, i
);
605 hctx
->sched_data
= NULL
;
608 blk_mq_debugfs_unregister_sched(q
);
609 if (e
->type
->ops
.mq
.exit_sched
)
610 e
->type
->ops
.mq
.exit_sched(e
);
611 blk_mq_sched_tags_teardown(q
);
615 int blk_mq_sched_init(struct request_queue
*q
)
619 mutex_lock(&q
->sysfs_lock
);
620 ret
= elevator_init(q
, NULL
);
621 mutex_unlock(&q
->sysfs_lock
);