]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
bd166ef1 JA |
2 | /* |
3 | * blk-mq scheduling framework | |
4 | * | |
5 | * Copyright (C) 2016 Jens Axboe | |
6 | */ | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/blk-mq.h> | |
10 | ||
11 | #include <trace/events/block.h> | |
12 | ||
13 | #include "blk.h" | |
14 | #include "blk-mq.h" | |
d332ce09 | 15 | #include "blk-mq-debugfs.h" |
bd166ef1 JA |
16 | #include "blk-mq-sched.h" |
17 | #include "blk-mq-tag.h" | |
18 | #include "blk-wbt.h" | |
19 | ||
20 | void blk_mq_sched_free_hctx_data(struct request_queue *q, | |
21 | void (*exit)(struct blk_mq_hw_ctx *)) | |
22 | { | |
23 | struct blk_mq_hw_ctx *hctx; | |
24 | int i; | |
25 | ||
26 | queue_for_each_hw_ctx(q, hctx, i) { | |
27 | if (exit && hctx->sched_data) | |
28 | exit(hctx); | |
29 | kfree(hctx->sched_data); | |
30 | hctx->sched_data = NULL; | |
31 | } | |
32 | } | |
33 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); | |
34 | ||
e2b3fa5a | 35 | void blk_mq_sched_assign_ioc(struct request *rq) |
bd166ef1 | 36 | { |
44e8c2bf | 37 | struct request_queue *q = rq->q; |
0c62bff1 | 38 | struct io_context *ioc; |
bd166ef1 JA |
39 | struct io_cq *icq; |
40 | ||
0c62bff1 JA |
41 | /* |
42 | * May not have an IO context if it's a passthrough request | |
43 | */ | |
44 | ioc = current->io_context; | |
45 | if (!ioc) | |
46 | return; | |
47 | ||
0d945c1f | 48 | spin_lock_irq(&q->queue_lock); |
bd166ef1 | 49 | icq = ioc_lookup_icq(ioc, q); |
0d945c1f | 50 | spin_unlock_irq(&q->queue_lock); |
bd166ef1 JA |
51 | |
52 | if (!icq) { | |
53 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); | |
54 | if (!icq) | |
55 | return; | |
56 | } | |
ea511e3c | 57 | get_io_context(icq->ioc); |
44e8c2bf | 58 | rq->elv.icq = icq; |
bd166ef1 JA |
59 | } |
60 | ||
8e8320c9 JA |
61 | /* |
62 | * Mark a hardware queue as needing a restart. For shared queues, maintain | |
63 | * a count of how many hardware queues are marked for restart. | |
64 | */ | |
7211aef8 | 65 | void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
8e8320c9 JA |
66 | { |
67 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
68 | return; | |
69 | ||
97889f9a | 70 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
8e8320c9 | 71 | } |
7211aef8 | 72 | EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); |
8e8320c9 | 73 | |
97889f9a | 74 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
8e8320c9 JA |
75 | { |
76 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
97889f9a ML |
77 | return; |
78 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
8e8320c9 | 79 | |
97889f9a | 80 | blk_mq_run_hw_queue(hctx, true); |
8e8320c9 JA |
81 | } |
82 | ||
a0823421 DA |
83 | #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ |
84 | ||
1f460b63 ML |
85 | /* |
86 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
87 | * its queue by itself in its completion handler, so we don't need to | |
88 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
28d65729 SQ |
89 | * |
90 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
91 | * be run again. This is necessary to avoid starving flushes. | |
1f460b63 | 92 | */ |
28d65729 | 93 | static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) |
caf8eb0d ML |
94 | { |
95 | struct request_queue *q = hctx->queue; | |
96 | struct elevator_queue *e = q->elevator; | |
97 | LIST_HEAD(rq_list); | |
28d65729 | 98 | int ret = 0; |
caf8eb0d ML |
99 | |
100 | do { | |
de148297 | 101 | struct request *rq; |
caf8eb0d | 102 | |
f9cd4bfe | 103 | if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) |
caf8eb0d | 104 | break; |
de148297 | 105 | |
28d65729 SQ |
106 | if (!list_empty_careful(&hctx->dispatch)) { |
107 | ret = -EAGAIN; | |
108 | break; | |
109 | } | |
110 | ||
88022d72 | 111 | if (!blk_mq_get_dispatch_budget(hctx)) |
1f460b63 | 112 | break; |
de148297 | 113 | |
f9cd4bfe | 114 | rq = e->type->ops.dispatch_request(hctx); |
de148297 ML |
115 | if (!rq) { |
116 | blk_mq_put_dispatch_budget(hctx); | |
a0823421 DA |
117 | /* |
118 | * We're releasing without dispatching. Holding the | |
119 | * budget could have blocked any "hctx"s with the | |
120 | * same queue and if we didn't dispatch then there's | |
121 | * no guarantee anyone will kick the queue. Kick it | |
122 | * ourselves. | |
123 | */ | |
124 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
de148297 | 125 | break; |
de148297 ML |
126 | } |
127 | ||
128 | /* | |
129 | * Now this rq owns the budget which has to be released | |
130 | * if this rq won't be queued to driver via .queue_rq() | |
131 | * in blk_mq_dispatch_rq_list(). | |
132 | */ | |
caf8eb0d | 133 | list_add(&rq->queuelist, &rq_list); |
de148297 | 134 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); |
28d65729 SQ |
135 | |
136 | return ret; | |
caf8eb0d ML |
137 | } |
138 | ||
b347689f ML |
139 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
140 | struct blk_mq_ctx *ctx) | |
141 | { | |
f31967f0 | 142 | unsigned short idx = ctx->index_hw[hctx->type]; |
b347689f ML |
143 | |
144 | if (++idx == hctx->nr_ctx) | |
145 | idx = 0; | |
146 | ||
147 | return hctx->ctxs[idx]; | |
148 | } | |
149 | ||
1f460b63 ML |
150 | /* |
151 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
152 | * its queue by itself in its completion handler, so we don't need to | |
153 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
28d65729 SQ |
154 | * |
155 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
156 | * to be run again. This is necessary to avoid starving flushes. | |
1f460b63 | 157 | */ |
28d65729 | 158 | static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) |
b347689f ML |
159 | { |
160 | struct request_queue *q = hctx->queue; | |
161 | LIST_HEAD(rq_list); | |
162 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); | |
28d65729 | 163 | int ret = 0; |
b347689f ML |
164 | |
165 | do { | |
166 | struct request *rq; | |
b347689f | 167 | |
28d65729 SQ |
168 | if (!list_empty_careful(&hctx->dispatch)) { |
169 | ret = -EAGAIN; | |
170 | break; | |
171 | } | |
172 | ||
b347689f ML |
173 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) |
174 | break; | |
175 | ||
88022d72 | 176 | if (!blk_mq_get_dispatch_budget(hctx)) |
1f460b63 | 177 | break; |
b347689f ML |
178 | |
179 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); | |
180 | if (!rq) { | |
181 | blk_mq_put_dispatch_budget(hctx); | |
a0823421 DA |
182 | /* |
183 | * We're releasing without dispatching. Holding the | |
184 | * budget could have blocked any "hctx"s with the | |
185 | * same queue and if we didn't dispatch then there's | |
186 | * no guarantee anyone will kick the queue. Kick it | |
187 | * ourselves. | |
188 | */ | |
189 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
b347689f | 190 | break; |
b347689f ML |
191 | } |
192 | ||
193 | /* | |
194 | * Now this rq owns the budget which has to be released | |
195 | * if this rq won't be queued to driver via .queue_rq() | |
196 | * in blk_mq_dispatch_rq_list(). | |
197 | */ | |
198 | list_add(&rq->queuelist, &rq_list); | |
199 | ||
200 | /* round robin for fair dispatch */ | |
201 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); | |
202 | ||
203 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); | |
204 | ||
205 | WRITE_ONCE(hctx->dispatch_from, ctx); | |
28d65729 | 206 | return ret; |
b347689f ML |
207 | } |
208 | ||
e1b586f2 | 209 | static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
bd166ef1 | 210 | { |
81380ca1 OS |
211 | struct request_queue *q = hctx->queue; |
212 | struct elevator_queue *e = q->elevator; | |
f9cd4bfe | 213 | const bool has_sched_dispatch = e && e->type->ops.dispatch_request; |
28d65729 | 214 | int ret = 0; |
bd166ef1 JA |
215 | LIST_HEAD(rq_list); |
216 | ||
bd166ef1 JA |
217 | /* |
218 | * If we have previous entries on our dispatch list, grab them first for | |
219 | * more fair dispatch. | |
220 | */ | |
221 | if (!list_empty_careful(&hctx->dispatch)) { | |
222 | spin_lock(&hctx->lock); | |
223 | if (!list_empty(&hctx->dispatch)) | |
224 | list_splice_init(&hctx->dispatch, &rq_list); | |
225 | spin_unlock(&hctx->lock); | |
226 | } | |
227 | ||
228 | /* | |
229 | * Only ask the scheduler for requests, if we didn't have residual | |
230 | * requests from the dispatch list. This is to avoid the case where | |
231 | * we only ever dispatch a fraction of the requests available because | |
232 | * of low device queue depth. Once we pull requests out of the IO | |
233 | * scheduler, we can no longer merge or sort them. So it's best to | |
234 | * leave them there for as long as we can. Mark the hw queue as | |
235 | * needing a restart in that case. | |
caf8eb0d ML |
236 | * |
237 | * We want to dispatch from the scheduler if there was nothing | |
238 | * on the dispatch list or we were able to dispatch from the | |
239 | * dispatch list. | |
bd166ef1 | 240 | */ |
c13660a0 | 241 | if (!list_empty(&rq_list)) { |
d38d3515 | 242 | blk_mq_sched_mark_restart_hctx(hctx); |
b347689f ML |
243 | if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { |
244 | if (has_sched_dispatch) | |
28d65729 | 245 | ret = blk_mq_do_dispatch_sched(hctx); |
b347689f | 246 | else |
28d65729 | 247 | ret = blk_mq_do_dispatch_ctx(hctx); |
b347689f | 248 | } |
caf8eb0d | 249 | } else if (has_sched_dispatch) { |
28d65729 | 250 | ret = blk_mq_do_dispatch_sched(hctx); |
6e768717 ML |
251 | } else if (hctx->dispatch_busy) { |
252 | /* dequeue request one by one from sw queue if queue is busy */ | |
28d65729 | 253 | ret = blk_mq_do_dispatch_ctx(hctx); |
caf8eb0d | 254 | } else { |
c13660a0 | 255 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
de148297 | 256 | blk_mq_dispatch_rq_list(q, &rq_list, false); |
64765a75 | 257 | } |
28d65729 SQ |
258 | |
259 | return ret; | |
260 | } | |
261 | ||
262 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |
263 | { | |
264 | struct request_queue *q = hctx->queue; | |
265 | ||
266 | /* RCU or SRCU read lock is needed before checking quiesced flag */ | |
267 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | |
268 | return; | |
269 | ||
270 | hctx->run++; | |
271 | ||
272 | /* | |
273 | * A return of -EAGAIN is an indication that hctx->dispatch is not | |
274 | * empty and we must run again in order to avoid starving flushes. | |
275 | */ | |
276 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { | |
277 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) | |
278 | blk_mq_run_hw_queue(hctx, true); | |
279 | } | |
bd166ef1 JA |
280 | } |
281 | ||
e4d750c9 | 282 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |
14ccb66b | 283 | unsigned int nr_segs, struct request **merged_request) |
bd166ef1 JA |
284 | { |
285 | struct request *rq; | |
bd166ef1 | 286 | |
34fe7c05 CH |
287 | switch (elv_merge(q, &rq, bio)) { |
288 | case ELEVATOR_BACK_MERGE: | |
bd166ef1 JA |
289 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
290 | return false; | |
14ccb66b | 291 | if (!bio_attempt_back_merge(rq, bio, nr_segs)) |
34fe7c05 CH |
292 | return false; |
293 | *merged_request = attempt_back_merge(q, rq); | |
294 | if (!*merged_request) | |
295 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); | |
296 | return true; | |
297 | case ELEVATOR_FRONT_MERGE: | |
bd166ef1 JA |
298 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
299 | return false; | |
14ccb66b | 300 | if (!bio_attempt_front_merge(rq, bio, nr_segs)) |
34fe7c05 CH |
301 | return false; |
302 | *merged_request = attempt_front_merge(q, rq); | |
303 | if (!*merged_request) | |
304 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); | |
305 | return true; | |
bea99a50 KB |
306 | case ELEVATOR_DISCARD_MERGE: |
307 | return bio_attempt_discard_merge(q, rq, bio); | |
34fe7c05 CH |
308 | default: |
309 | return false; | |
bd166ef1 | 310 | } |
bd166ef1 JA |
311 | } |
312 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); | |
313 | ||
9bddeb2a | 314 | /* |
9c558734 JA |
315 | * Iterate list of requests and see if we can merge this bio with any |
316 | * of them. | |
9bddeb2a | 317 | */ |
9c558734 | 318 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
14ccb66b | 319 | struct bio *bio, unsigned int nr_segs) |
9bddeb2a ML |
320 | { |
321 | struct request *rq; | |
322 | int checked = 8; | |
323 | ||
9c558734 | 324 | list_for_each_entry_reverse(rq, list, queuelist) { |
9bddeb2a ML |
325 | bool merged = false; |
326 | ||
327 | if (!checked--) | |
328 | break; | |
329 | ||
330 | if (!blk_rq_merge_ok(rq, bio)) | |
331 | continue; | |
332 | ||
333 | switch (blk_try_merge(rq, bio)) { | |
334 | case ELEVATOR_BACK_MERGE: | |
335 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
14ccb66b CH |
336 | merged = bio_attempt_back_merge(rq, bio, |
337 | nr_segs); | |
9bddeb2a ML |
338 | break; |
339 | case ELEVATOR_FRONT_MERGE: | |
340 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
14ccb66b CH |
341 | merged = bio_attempt_front_merge(rq, bio, |
342 | nr_segs); | |
9bddeb2a ML |
343 | break; |
344 | case ELEVATOR_DISCARD_MERGE: | |
345 | merged = bio_attempt_discard_merge(q, rq, bio); | |
346 | break; | |
347 | default: | |
348 | continue; | |
349 | } | |
350 | ||
9bddeb2a ML |
351 | return merged; |
352 | } | |
353 | ||
354 | return false; | |
355 | } | |
9c558734 JA |
356 | EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); |
357 | ||
358 | /* | |
359 | * Reverse check our software queue for entries that we could potentially | |
360 | * merge with. Currently includes a hand-wavy stop count of 8, to not spend | |
361 | * too much time checking for merges. | |
362 | */ | |
363 | static bool blk_mq_attempt_merge(struct request_queue *q, | |
c16d6b5a | 364 | struct blk_mq_hw_ctx *hctx, |
14ccb66b CH |
365 | struct blk_mq_ctx *ctx, struct bio *bio, |
366 | unsigned int nr_segs) | |
9c558734 | 367 | { |
c16d6b5a ML |
368 | enum hctx_type type = hctx->type; |
369 | ||
9c558734 JA |
370 | lockdep_assert_held(&ctx->lock); |
371 | ||
14ccb66b | 372 | if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { |
9c558734 JA |
373 | ctx->rq_merged++; |
374 | return true; | |
375 | } | |
376 | ||
377 | return false; | |
378 | } | |
9bddeb2a | 379 | |
14ccb66b CH |
380 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, |
381 | unsigned int nr_segs) | |
bd166ef1 JA |
382 | { |
383 | struct elevator_queue *e = q->elevator; | |
9bddeb2a | 384 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); |
8ccdf4a3 | 385 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); |
9bddeb2a | 386 | bool ret = false; |
c16d6b5a | 387 | enum hctx_type type; |
bd166ef1 | 388 | |
c05f4220 | 389 | if (e && e->type->ops.bio_merge) |
14ccb66b | 390 | return e->type->ops.bio_merge(hctx, bio, nr_segs); |
bd166ef1 | 391 | |
c16d6b5a | 392 | type = hctx->type; |
b04f50ab | 393 | if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && |
c16d6b5a | 394 | !list_empty_careful(&ctx->rq_lists[type])) { |
9bddeb2a ML |
395 | /* default per sw-queue merge */ |
396 | spin_lock(&ctx->lock); | |
14ccb66b | 397 | ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs); |
9bddeb2a ML |
398 | spin_unlock(&ctx->lock); |
399 | } | |
400 | ||
9bddeb2a | 401 | return ret; |
bd166ef1 JA |
402 | } |
403 | ||
404 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) | |
405 | { | |
406 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); | |
407 | } | |
408 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
409 | ||
410 | void blk_mq_sched_request_inserted(struct request *rq) | |
411 | { | |
412 | trace_block_rq_insert(rq->q, rq); | |
413 | } | |
414 | EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); | |
415 | ||
0cacba6c | 416 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
a6a252e6 | 417 | bool has_sched, |
0cacba6c | 418 | struct request *rq) |
bd166ef1 | 419 | { |
01e99aec ML |
420 | /* |
421 | * dispatch flush and passthrough rq directly | |
422 | * | |
423 | * passthrough request has to be added to hctx->dispatch directly. | |
424 | * For some reason, device may be in one situation which can't | |
425 | * handle FS request, so STS_RESOURCE is always returned and the | |
426 | * FS request will be added to hctx->dispatch. However passthrough | |
427 | * request may be required at that time for fixing the problem. If | |
428 | * passthrough request is added to scheduler queue, there isn't any | |
429 | * chance to dispatch it given we prioritize requests in hctx->dispatch. | |
430 | */ | |
431 | if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) | |
a6a252e6 | 432 | return true; |
a6a252e6 | 433 | |
923218f6 | 434 | if (has_sched) |
bd166ef1 | 435 | rq->rq_flags |= RQF_SORTED; |
bd166ef1 | 436 | |
a6a252e6 | 437 | return false; |
bd166ef1 | 438 | } |
bd166ef1 | 439 | |
bd6737f1 | 440 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
9e97d295 | 441 | bool run_queue, bool async) |
bd6737f1 JA |
442 | { |
443 | struct request_queue *q = rq->q; | |
444 | struct elevator_queue *e = q->elevator; | |
445 | struct blk_mq_ctx *ctx = rq->mq_ctx; | |
ea4f995e | 446 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
bd6737f1 | 447 | |
a6a252e6 ML |
448 | /* flush rq in flush machinery need to be dispatched directly */ |
449 | if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { | |
923218f6 ML |
450 | blk_insert_flush(rq); |
451 | goto run; | |
bd6737f1 JA |
452 | } |
453 | ||
923218f6 ML |
454 | WARN_ON(e && (rq->tag != -1)); |
455 | ||
01e99aec | 456 | if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { |
cc3200ea ML |
457 | /* |
458 | * Firstly normal IO request is inserted to scheduler queue or | |
459 | * sw queue, meantime we add flush request to dispatch queue( | |
460 | * hctx->dispatch) directly and there is at most one in-flight | |
461 | * flush request for each hw queue, so it doesn't matter to add | |
462 | * flush request to tail or front of the dispatch queue. | |
463 | * | |
464 | * Secondly in case of NCQ, flush request belongs to non-NCQ | |
465 | * command, and queueing it will fail when there is any | |
466 | * in-flight normal IO request(NCQ command). When adding flush | |
467 | * rq to the front of hctx->dispatch, it is easier to introduce | |
468 | * extra time to flush rq's latency because of S_SCHED_RESTART | |
469 | * compared with adding to the tail of dispatch queue, then | |
470 | * chance of flush merge is increased, and less flush requests | |
471 | * will be issued to controller. It is observed that ~10% time | |
472 | * is saved in blktests block/004 on disk attached to AHCI/NCQ | |
473 | * drive when adding flush rq to the front of hctx->dispatch. | |
474 | * | |
475 | * Simply queue flush rq to the front of hctx->dispatch so that | |
476 | * intensive flush workloads can benefit in case of NCQ HW. | |
477 | */ | |
478 | at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; | |
01e99aec | 479 | blk_mq_request_bypass_insert(rq, at_head, false); |
0cacba6c | 480 | goto run; |
01e99aec | 481 | } |
0cacba6c | 482 | |
f9cd4bfe | 483 | if (e && e->type->ops.insert_requests) { |
bd6737f1 JA |
484 | LIST_HEAD(list); |
485 | ||
486 | list_add(&rq->queuelist, &list); | |
f9cd4bfe | 487 | e->type->ops.insert_requests(hctx, &list, at_head); |
bd6737f1 JA |
488 | } else { |
489 | spin_lock(&ctx->lock); | |
490 | __blk_mq_insert_request(hctx, rq, at_head); | |
491 | spin_unlock(&ctx->lock); | |
492 | } | |
493 | ||
0cacba6c | 494 | run: |
bd6737f1 JA |
495 | if (run_queue) |
496 | blk_mq_run_hw_queue(hctx, async); | |
497 | } | |
498 | ||
67cae4c9 | 499 | void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, |
bd6737f1 JA |
500 | struct blk_mq_ctx *ctx, |
501 | struct list_head *list, bool run_queue_async) | |
502 | { | |
f9afca4d | 503 | struct elevator_queue *e; |
e87eb301 ML |
504 | struct request_queue *q = hctx->queue; |
505 | ||
506 | /* | |
507 | * blk_mq_sched_insert_requests() is called from flush plug | |
508 | * context only, and hold one usage counter to prevent queue | |
509 | * from being released. | |
510 | */ | |
511 | percpu_ref_get(&q->q_usage_counter); | |
bd6737f1 | 512 | |
f9afca4d | 513 | e = hctx->queue->elevator; |
f9cd4bfe JA |
514 | if (e && e->type->ops.insert_requests) |
515 | e->type->ops.insert_requests(hctx, list, false); | |
6ce3dd6e ML |
516 | else { |
517 | /* | |
518 | * try to issue requests directly if the hw queue isn't | |
519 | * busy in case of 'none' scheduler, and this way may save | |
520 | * us one extra enqueue & dequeue to sw queue. | |
521 | */ | |
fd9c40f6 | 522 | if (!hctx->dispatch_busy && !e && !run_queue_async) { |
6ce3dd6e | 523 | blk_mq_try_issue_list_directly(hctx, list); |
fd9c40f6 | 524 | if (list_empty(list)) |
e87eb301 | 525 | goto out; |
fd9c40f6 BVA |
526 | } |
527 | blk_mq_insert_requests(hctx, ctx, list); | |
6ce3dd6e | 528 | } |
bd6737f1 JA |
529 | |
530 | blk_mq_run_hw_queue(hctx, run_queue_async); | |
e87eb301 ML |
531 | out: |
532 | percpu_ref_put(&q->q_usage_counter); | |
bd6737f1 JA |
533 | } |
534 | ||
bd166ef1 JA |
535 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, |
536 | struct blk_mq_hw_ctx *hctx, | |
537 | unsigned int hctx_idx) | |
538 | { | |
539 | if (hctx->sched_tags) { | |
540 | blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); | |
541 | blk_mq_free_rq_map(hctx->sched_tags); | |
542 | hctx->sched_tags = NULL; | |
543 | } | |
544 | } | |
545 | ||
6917ff0b OS |
546 | static int blk_mq_sched_alloc_tags(struct request_queue *q, |
547 | struct blk_mq_hw_ctx *hctx, | |
548 | unsigned int hctx_idx) | |
549 | { | |
550 | struct blk_mq_tag_set *set = q->tag_set; | |
551 | int ret; | |
552 | ||
553 | hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, | |
554 | set->reserved_tags); | |
555 | if (!hctx->sched_tags) | |
556 | return -ENOMEM; | |
557 | ||
558 | ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); | |
559 | if (ret) | |
560 | blk_mq_sched_free_tags(set, hctx, hctx_idx); | |
561 | ||
562 | return ret; | |
563 | } | |
564 | ||
c3e22192 | 565 | /* called in queue's release handler, tagset has gone away */ |
54d5329d | 566 | static void blk_mq_sched_tags_teardown(struct request_queue *q) |
bd166ef1 | 567 | { |
bd166ef1 | 568 | struct blk_mq_hw_ctx *hctx; |
6917ff0b OS |
569 | int i; |
570 | ||
c3e22192 ML |
571 | queue_for_each_hw_ctx(q, hctx, i) { |
572 | if (hctx->sched_tags) { | |
573 | blk_mq_free_rq_map(hctx->sched_tags); | |
574 | hctx->sched_tags = NULL; | |
575 | } | |
576 | } | |
6917ff0b OS |
577 | } |
578 | ||
579 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) | |
580 | { | |
581 | struct blk_mq_hw_ctx *hctx; | |
ee056f98 | 582 | struct elevator_queue *eq; |
6917ff0b OS |
583 | unsigned int i; |
584 | int ret; | |
585 | ||
586 | if (!e) { | |
587 | q->elevator = NULL; | |
32a50fab | 588 | q->nr_requests = q->tag_set->queue_depth; |
6917ff0b OS |
589 | return 0; |
590 | } | |
bd166ef1 JA |
591 | |
592 | /* | |
32825c45 ML |
593 | * Default to double of smaller one between hw queue_depth and 128, |
594 | * since we don't split into sync/async like the old code did. | |
595 | * Additionally, this is a per-hw queue depth. | |
bd166ef1 | 596 | */ |
32825c45 ML |
597 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
598 | BLKDEV_MAX_RQ); | |
bd166ef1 | 599 | |
bd166ef1 | 600 | queue_for_each_hw_ctx(q, hctx, i) { |
6917ff0b | 601 | ret = blk_mq_sched_alloc_tags(q, hctx, i); |
bd166ef1 | 602 | if (ret) |
6917ff0b | 603 | goto err; |
bd166ef1 JA |
604 | } |
605 | ||
f9cd4bfe | 606 | ret = e->ops.init_sched(q, e); |
6917ff0b OS |
607 | if (ret) |
608 | goto err; | |
bd166ef1 | 609 | |
d332ce09 OS |
610 | blk_mq_debugfs_register_sched(q); |
611 | ||
612 | queue_for_each_hw_ctx(q, hctx, i) { | |
f9cd4bfe JA |
613 | if (e->ops.init_hctx) { |
614 | ret = e->ops.init_hctx(hctx, i); | |
ee056f98 OS |
615 | if (ret) { |
616 | eq = q->elevator; | |
c3e22192 | 617 | blk_mq_sched_free_requests(q); |
ee056f98 OS |
618 | blk_mq_exit_sched(q, eq); |
619 | kobject_put(&eq->kobj); | |
620 | return ret; | |
621 | } | |
622 | } | |
d332ce09 | 623 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
ee056f98 OS |
624 | } |
625 | ||
bd166ef1 | 626 | return 0; |
bd166ef1 | 627 | |
6917ff0b | 628 | err: |
c3e22192 | 629 | blk_mq_sched_free_requests(q); |
54d5329d OS |
630 | blk_mq_sched_tags_teardown(q); |
631 | q->elevator = NULL; | |
6917ff0b | 632 | return ret; |
bd166ef1 | 633 | } |
d3484991 | 634 | |
c3e22192 ML |
635 | /* |
636 | * called in either blk_queue_cleanup or elevator_switch, tagset | |
637 | * is required for freeing requests | |
638 | */ | |
639 | void blk_mq_sched_free_requests(struct request_queue *q) | |
640 | { | |
641 | struct blk_mq_hw_ctx *hctx; | |
642 | int i; | |
643 | ||
c3e22192 ML |
644 | queue_for_each_hw_ctx(q, hctx, i) { |
645 | if (hctx->sched_tags) | |
646 | blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); | |
647 | } | |
648 | } | |
649 | ||
54d5329d OS |
650 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
651 | { | |
ee056f98 OS |
652 | struct blk_mq_hw_ctx *hctx; |
653 | unsigned int i; | |
654 | ||
d332ce09 OS |
655 | queue_for_each_hw_ctx(q, hctx, i) { |
656 | blk_mq_debugfs_unregister_sched_hctx(hctx); | |
f9cd4bfe JA |
657 | if (e->type->ops.exit_hctx && hctx->sched_data) { |
658 | e->type->ops.exit_hctx(hctx, i); | |
d332ce09 | 659 | hctx->sched_data = NULL; |
ee056f98 OS |
660 | } |
661 | } | |
d332ce09 | 662 | blk_mq_debugfs_unregister_sched(q); |
f9cd4bfe JA |
663 | if (e->type->ops.exit_sched) |
664 | e->type->ops.exit_sched(e); | |
54d5329d OS |
665 | blk_mq_sched_tags_teardown(q); |
666 | q->elevator = NULL; | |
667 | } |