]>
Commit | Line | Data |
---|---|---|
bd166ef1 JA |
1 | /* |
2 | * blk-mq scheduling framework | |
3 | * | |
4 | * Copyright (C) 2016 Jens Axboe | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/blk-mq.h> | |
9 | ||
10 | #include <trace/events/block.h> | |
11 | ||
12 | #include "blk.h" | |
13 | #include "blk-mq.h" | |
14 | #include "blk-mq-sched.h" | |
15 | #include "blk-mq-tag.h" | |
16 | #include "blk-wbt.h" | |
17 | ||
18 | void blk_mq_sched_free_hctx_data(struct request_queue *q, | |
19 | void (*exit)(struct blk_mq_hw_ctx *)) | |
20 | { | |
21 | struct blk_mq_hw_ctx *hctx; | |
22 | int i; | |
23 | ||
24 | queue_for_each_hw_ctx(q, hctx, i) { | |
25 | if (exit && hctx->sched_data) | |
26 | exit(hctx); | |
27 | kfree(hctx->sched_data); | |
28 | hctx->sched_data = NULL; | |
29 | } | |
30 | } | |
31 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); | |
32 | ||
33 | int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size, | |
34 | int (*init)(struct blk_mq_hw_ctx *), | |
35 | void (*exit)(struct blk_mq_hw_ctx *)) | |
36 | { | |
37 | struct blk_mq_hw_ctx *hctx; | |
38 | int ret; | |
39 | int i; | |
40 | ||
41 | queue_for_each_hw_ctx(q, hctx, i) { | |
42 | hctx->sched_data = kmalloc_node(size, GFP_KERNEL, hctx->numa_node); | |
43 | if (!hctx->sched_data) { | |
44 | ret = -ENOMEM; | |
45 | goto error; | |
46 | } | |
47 | ||
48 | if (init) { | |
49 | ret = init(hctx); | |
50 | if (ret) { | |
51 | /* | |
52 | * We don't want to give exit() a partially | |
53 | * initialized sched_data. init() must clean up | |
54 | * if it fails. | |
55 | */ | |
56 | kfree(hctx->sched_data); | |
57 | hctx->sched_data = NULL; | |
58 | goto error; | |
59 | } | |
60 | } | |
61 | } | |
62 | ||
63 | return 0; | |
64 | error: | |
65 | blk_mq_sched_free_hctx_data(q, exit); | |
66 | return ret; | |
67 | } | |
68 | EXPORT_SYMBOL_GPL(blk_mq_sched_init_hctx_data); | |
69 | ||
70 | static void __blk_mq_sched_assign_ioc(struct request_queue *q, | |
71 | struct request *rq, struct io_context *ioc) | |
72 | { | |
73 | struct io_cq *icq; | |
74 | ||
75 | spin_lock_irq(q->queue_lock); | |
76 | icq = ioc_lookup_icq(ioc, q); | |
77 | spin_unlock_irq(q->queue_lock); | |
78 | ||
79 | if (!icq) { | |
80 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); | |
81 | if (!icq) | |
82 | return; | |
83 | } | |
84 | ||
85 | rq->elv.icq = icq; | |
86 | if (!blk_mq_sched_get_rq_priv(q, rq)) { | |
87 | rq->rq_flags |= RQF_ELVPRIV; | |
88 | get_io_context(icq->ioc); | |
89 | return; | |
90 | } | |
91 | ||
92 | rq->elv.icq = NULL; | |
93 | } | |
94 | ||
95 | static void blk_mq_sched_assign_ioc(struct request_queue *q, | |
96 | struct request *rq, struct bio *bio) | |
97 | { | |
98 | struct io_context *ioc; | |
99 | ||
100 | ioc = rq_ioc(bio); | |
101 | if (ioc) | |
102 | __blk_mq_sched_assign_ioc(q, rq, ioc); | |
103 | } | |
104 | ||
105 | struct request *blk_mq_sched_get_request(struct request_queue *q, | |
106 | struct bio *bio, | |
107 | unsigned int op, | |
108 | struct blk_mq_alloc_data *data) | |
109 | { | |
110 | struct elevator_queue *e = q->elevator; | |
111 | struct blk_mq_hw_ctx *hctx; | |
112 | struct blk_mq_ctx *ctx; | |
113 | struct request *rq; | |
bd166ef1 JA |
114 | |
115 | blk_queue_enter_live(q); | |
116 | ctx = blk_mq_get_ctx(q); | |
117 | hctx = blk_mq_map_queue(q, ctx->cpu); | |
118 | ||
5a797e00 | 119 | blk_mq_set_alloc_data(data, q, data->flags, ctx, hctx); |
bd166ef1 JA |
120 | |
121 | if (e) { | |
122 | data->flags |= BLK_MQ_REQ_INTERNAL; | |
123 | ||
124 | /* | |
125 | * Flush requests are special and go directly to the | |
126 | * dispatch list. | |
127 | */ | |
f73f44eb | 128 | if (!op_is_flush(op) && e->type->ops.mq.get_request) { |
bd166ef1 JA |
129 | rq = e->type->ops.mq.get_request(q, op, data); |
130 | if (rq) | |
131 | rq->rq_flags |= RQF_QUEUED; | |
132 | } else | |
133 | rq = __blk_mq_alloc_request(data, op); | |
134 | } else { | |
135 | rq = __blk_mq_alloc_request(data, op); | |
b48fda09 JA |
136 | if (rq) |
137 | data->hctx->tags->rqs[rq->tag] = rq; | |
bd166ef1 JA |
138 | } |
139 | ||
140 | if (rq) { | |
f73f44eb | 141 | if (!op_is_flush(op)) { |
bd166ef1 JA |
142 | rq->elv.icq = NULL; |
143 | if (e && e->type->icq_cache) | |
144 | blk_mq_sched_assign_ioc(q, rq, bio); | |
145 | } | |
146 | data->hctx->queued++; | |
147 | return rq; | |
148 | } | |
149 | ||
150 | blk_queue_exit(q); | |
151 | return NULL; | |
152 | } | |
153 | ||
154 | void blk_mq_sched_put_request(struct request *rq) | |
155 | { | |
156 | struct request_queue *q = rq->q; | |
157 | struct elevator_queue *e = q->elevator; | |
158 | ||
159 | if (rq->rq_flags & RQF_ELVPRIV) { | |
160 | blk_mq_sched_put_rq_priv(rq->q, rq); | |
161 | if (rq->elv.icq) { | |
162 | put_io_context(rq->elv.icq->ioc); | |
163 | rq->elv.icq = NULL; | |
164 | } | |
165 | } | |
166 | ||
167 | if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request) | |
168 | e->type->ops.mq.put_request(rq); | |
169 | else | |
170 | blk_mq_finish_request(rq); | |
171 | } | |
172 | ||
173 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |
174 | { | |
175 | struct elevator_queue *e = hctx->queue->elevator; | |
176 | LIST_HEAD(rq_list); | |
177 | ||
178 | if (unlikely(blk_mq_hctx_stopped(hctx))) | |
179 | return; | |
180 | ||
181 | hctx->run++; | |
182 | ||
183 | /* | |
184 | * If we have previous entries on our dispatch list, grab them first for | |
185 | * more fair dispatch. | |
186 | */ | |
187 | if (!list_empty_careful(&hctx->dispatch)) { | |
188 | spin_lock(&hctx->lock); | |
189 | if (!list_empty(&hctx->dispatch)) | |
190 | list_splice_init(&hctx->dispatch, &rq_list); | |
191 | spin_unlock(&hctx->lock); | |
192 | } | |
193 | ||
194 | /* | |
195 | * Only ask the scheduler for requests, if we didn't have residual | |
196 | * requests from the dispatch list. This is to avoid the case where | |
197 | * we only ever dispatch a fraction of the requests available because | |
198 | * of low device queue depth. Once we pull requests out of the IO | |
199 | * scheduler, we can no longer merge or sort them. So it's best to | |
200 | * leave them there for as long as we can. Mark the hw queue as | |
201 | * needing a restart in that case. | |
202 | */ | |
c13660a0 | 203 | if (!list_empty(&rq_list)) { |
bd166ef1 | 204 | blk_mq_sched_mark_restart(hctx); |
c13660a0 JA |
205 | blk_mq_dispatch_rq_list(hctx, &rq_list); |
206 | } else if (!e || !e->type->ops.mq.dispatch_request) { | |
207 | blk_mq_flush_busy_ctxs(hctx, &rq_list); | |
208 | blk_mq_dispatch_rq_list(hctx, &rq_list); | |
209 | } else { | |
210 | do { | |
211 | struct request *rq; | |
212 | ||
213 | rq = e->type->ops.mq.dispatch_request(hctx); | |
214 | if (!rq) | |
215 | break; | |
216 | list_add(&rq->queuelist, &rq_list); | |
217 | } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); | |
218 | } | |
bd166ef1 JA |
219 | } |
220 | ||
221 | void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, | |
222 | struct list_head *rq_list, | |
223 | struct request *(*get_rq)(struct blk_mq_hw_ctx *)) | |
224 | { | |
225 | do { | |
226 | struct request *rq; | |
227 | ||
228 | rq = get_rq(hctx); | |
229 | if (!rq) | |
230 | break; | |
231 | ||
232 | list_add_tail(&rq->queuelist, rq_list); | |
233 | } while (1); | |
234 | } | |
235 | EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch); | |
236 | ||
237 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio) | |
238 | { | |
239 | struct request *rq; | |
240 | int ret; | |
241 | ||
242 | ret = elv_merge(q, &rq, bio); | |
243 | if (ret == ELEVATOR_BACK_MERGE) { | |
244 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
245 | return false; | |
246 | if (bio_attempt_back_merge(q, rq, bio)) { | |
247 | if (!attempt_back_merge(q, rq)) | |
248 | elv_merged_request(q, rq, ret); | |
249 | return true; | |
250 | } | |
251 | } else if (ret == ELEVATOR_FRONT_MERGE) { | |
252 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
253 | return false; | |
254 | if (bio_attempt_front_merge(q, rq, bio)) { | |
255 | if (!attempt_front_merge(q, rq)) | |
256 | elv_merged_request(q, rq, ret); | |
257 | return true; | |
258 | } | |
259 | } | |
260 | ||
261 | return false; | |
262 | } | |
263 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); | |
264 | ||
265 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) | |
266 | { | |
267 | struct elevator_queue *e = q->elevator; | |
268 | ||
269 | if (e->type->ops.mq.bio_merge) { | |
270 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); | |
271 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
272 | ||
273 | blk_mq_put_ctx(ctx); | |
274 | return e->type->ops.mq.bio_merge(hctx, bio); | |
275 | } | |
276 | ||
277 | return false; | |
278 | } | |
279 | ||
280 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) | |
281 | { | |
282 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); | |
283 | } | |
284 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
285 | ||
286 | void blk_mq_sched_request_inserted(struct request *rq) | |
287 | { | |
288 | trace_block_rq_insert(rq->q, rq); | |
289 | } | |
290 | EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); | |
291 | ||
0cacba6c OS |
292 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
293 | struct request *rq) | |
bd166ef1 JA |
294 | { |
295 | if (rq->tag == -1) { | |
296 | rq->rq_flags |= RQF_SORTED; | |
297 | return false; | |
298 | } | |
299 | ||
300 | /* | |
301 | * If we already have a real request tag, send directly to | |
302 | * the dispatch list. | |
303 | */ | |
304 | spin_lock(&hctx->lock); | |
305 | list_add(&rq->queuelist, &hctx->dispatch); | |
306 | spin_unlock(&hctx->lock); | |
307 | return true; | |
308 | } | |
bd166ef1 | 309 | |
50e1dab8 JA |
310 | static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) |
311 | { | |
312 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { | |
313 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
314 | if (blk_mq_hctx_has_pending(hctx)) | |
315 | blk_mq_run_hw_queue(hctx, true); | |
316 | } | |
317 | } | |
318 | ||
319 | void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) | |
320 | { | |
321 | unsigned int i; | |
322 | ||
323 | if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) | |
324 | blk_mq_sched_restart_hctx(hctx); | |
325 | else { | |
326 | struct request_queue *q = hctx->queue; | |
327 | ||
328 | if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) | |
329 | return; | |
330 | ||
331 | clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags); | |
332 | ||
333 | queue_for_each_hw_ctx(q, hctx, i) | |
334 | blk_mq_sched_restart_hctx(hctx); | |
335 | } | |
336 | } | |
337 | ||
bd6737f1 JA |
338 | /* |
339 | * Add flush/fua to the queue. If we fail getting a driver tag, then | |
340 | * punt to the requeue list. Requeue will re-invoke us from a context | |
341 | * that's safe to block from. | |
342 | */ | |
343 | static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx, | |
344 | struct request *rq, bool can_block) | |
345 | { | |
346 | if (blk_mq_get_driver_tag(rq, &hctx, can_block)) { | |
347 | blk_insert_flush(rq); | |
348 | blk_mq_run_hw_queue(hctx, true); | |
349 | } else | |
350 | blk_mq_add_to_requeue_list(rq, true, true); | |
351 | } | |
352 | ||
353 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, | |
354 | bool run_queue, bool async, bool can_block) | |
355 | { | |
356 | struct request_queue *q = rq->q; | |
357 | struct elevator_queue *e = q->elevator; | |
358 | struct blk_mq_ctx *ctx = rq->mq_ctx; | |
359 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
360 | ||
f3a8ab7d | 361 | if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) { |
bd6737f1 JA |
362 | blk_mq_sched_insert_flush(hctx, rq, can_block); |
363 | return; | |
364 | } | |
365 | ||
0cacba6c OS |
366 | if (e && blk_mq_sched_bypass_insert(hctx, rq)) |
367 | goto run; | |
368 | ||
bd6737f1 JA |
369 | if (e && e->type->ops.mq.insert_requests) { |
370 | LIST_HEAD(list); | |
371 | ||
372 | list_add(&rq->queuelist, &list); | |
373 | e->type->ops.mq.insert_requests(hctx, &list, at_head); | |
374 | } else { | |
375 | spin_lock(&ctx->lock); | |
376 | __blk_mq_insert_request(hctx, rq, at_head); | |
377 | spin_unlock(&ctx->lock); | |
378 | } | |
379 | ||
0cacba6c | 380 | run: |
bd6737f1 JA |
381 | if (run_queue) |
382 | blk_mq_run_hw_queue(hctx, async); | |
383 | } | |
384 | ||
385 | void blk_mq_sched_insert_requests(struct request_queue *q, | |
386 | struct blk_mq_ctx *ctx, | |
387 | struct list_head *list, bool run_queue_async) | |
388 | { | |
389 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
390 | struct elevator_queue *e = hctx->queue->elevator; | |
391 | ||
0cacba6c OS |
392 | if (e) { |
393 | struct request *rq, *next; | |
394 | ||
395 | /* | |
396 | * We bypass requests that already have a driver tag assigned, | |
397 | * which should only be flushes. Flushes are only ever inserted | |
398 | * as single requests, so we shouldn't ever hit the | |
399 | * WARN_ON_ONCE() below (but let's handle it just in case). | |
400 | */ | |
401 | list_for_each_entry_safe(rq, next, list, queuelist) { | |
402 | if (WARN_ON_ONCE(rq->tag != -1)) { | |
403 | list_del_init(&rq->queuelist); | |
404 | blk_mq_sched_bypass_insert(hctx, rq); | |
405 | } | |
406 | } | |
407 | } | |
408 | ||
bd6737f1 JA |
409 | if (e && e->type->ops.mq.insert_requests) |
410 | e->type->ops.mq.insert_requests(hctx, list, false); | |
411 | else | |
412 | blk_mq_insert_requests(hctx, ctx, list); | |
413 | ||
414 | blk_mq_run_hw_queue(hctx, run_queue_async); | |
415 | } | |
416 | ||
bd166ef1 JA |
417 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, |
418 | struct blk_mq_hw_ctx *hctx, | |
419 | unsigned int hctx_idx) | |
420 | { | |
421 | if (hctx->sched_tags) { | |
422 | blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); | |
423 | blk_mq_free_rq_map(hctx->sched_tags); | |
424 | hctx->sched_tags = NULL; | |
425 | } | |
426 | } | |
427 | ||
428 | int blk_mq_sched_setup(struct request_queue *q) | |
429 | { | |
430 | struct blk_mq_tag_set *set = q->tag_set; | |
431 | struct blk_mq_hw_ctx *hctx; | |
432 | int ret, i; | |
433 | ||
434 | /* | |
435 | * Default to 256, since we don't split into sync/async like the | |
436 | * old code did. Additionally, this is a per-hw queue depth. | |
437 | */ | |
438 | q->nr_requests = 2 * BLKDEV_MAX_RQ; | |
439 | ||
440 | /* | |
441 | * We're switching to using an IO scheduler, so setup the hctx | |
442 | * scheduler tags and switch the request map from the regular | |
443 | * tags to scheduler tags. First allocate what we need, so we | |
444 | * can safely fail and fallback, if needed. | |
445 | */ | |
446 | ret = 0; | |
447 | queue_for_each_hw_ctx(q, hctx, i) { | |
448 | hctx->sched_tags = blk_mq_alloc_rq_map(set, i, q->nr_requests, 0); | |
449 | if (!hctx->sched_tags) { | |
450 | ret = -ENOMEM; | |
451 | break; | |
452 | } | |
453 | ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests); | |
454 | if (ret) | |
455 | break; | |
456 | } | |
457 | ||
458 | /* | |
459 | * If we failed, free what we did allocate | |
460 | */ | |
461 | if (ret) { | |
462 | queue_for_each_hw_ctx(q, hctx, i) { | |
463 | if (!hctx->sched_tags) | |
464 | continue; | |
465 | blk_mq_sched_free_tags(set, hctx, i); | |
466 | } | |
467 | ||
468 | return ret; | |
469 | } | |
470 | ||
471 | return 0; | |
472 | } | |
473 | ||
474 | void blk_mq_sched_teardown(struct request_queue *q) | |
475 | { | |
476 | struct blk_mq_tag_set *set = q->tag_set; | |
477 | struct blk_mq_hw_ctx *hctx; | |
478 | int i; | |
479 | ||
480 | queue_for_each_hw_ctx(q, hctx, i) | |
481 | blk_mq_sched_free_tags(set, hctx, i); | |
482 | } | |
d3484991 JA |
483 | |
484 | int blk_mq_sched_init(struct request_queue *q) | |
485 | { | |
486 | int ret; | |
487 | ||
488 | #if defined(CONFIG_DEFAULT_SQ_NONE) | |
489 | if (q->nr_hw_queues == 1) | |
490 | return 0; | |
491 | #endif | |
492 | #if defined(CONFIG_DEFAULT_MQ_NONE) | |
493 | if (q->nr_hw_queues > 1) | |
494 | return 0; | |
495 | #endif | |
496 | ||
497 | mutex_lock(&q->sysfs_lock); | |
498 | ret = elevator_init(q, NULL); | |
499 | mutex_unlock(&q->sysfs_lock); | |
500 | ||
501 | return ret; | |
502 | } |