]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * blk-mq scheduling framework | |
3 | * | |
4 | * Copyright (C) 2016 Jens Axboe | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/blk-mq.h> | |
9 | ||
10 | #include <trace/events/block.h> | |
11 | ||
12 | #include "blk.h" | |
13 | #include "blk-mq.h" | |
14 | #include "blk-mq-debugfs.h" | |
15 | #include "blk-mq-sched.h" | |
16 | #include "blk-mq-tag.h" | |
17 | #include "blk-wbt.h" | |
18 | ||
19 | void blk_mq_sched_free_hctx_data(struct request_queue *q, | |
20 | void (*exit)(struct blk_mq_hw_ctx *)) | |
21 | { | |
22 | struct blk_mq_hw_ctx *hctx; | |
23 | int i; | |
24 | ||
25 | queue_for_each_hw_ctx(q, hctx, i) { | |
26 | if (exit && hctx->sched_data) | |
27 | exit(hctx); | |
28 | kfree(hctx->sched_data); | |
29 | hctx->sched_data = NULL; | |
30 | } | |
31 | } | |
32 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); | |
33 | ||
34 | void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) | |
35 | { | |
36 | struct request_queue *q = rq->q; | |
37 | struct io_context *ioc = rq_ioc(bio); | |
38 | struct io_cq *icq; | |
39 | ||
40 | spin_lock_irq(q->queue_lock); | |
41 | icq = ioc_lookup_icq(ioc, q); | |
42 | spin_unlock_irq(q->queue_lock); | |
43 | ||
44 | if (!icq) { | |
45 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); | |
46 | if (!icq) | |
47 | return; | |
48 | } | |
49 | get_io_context(icq->ioc); | |
50 | rq->elv.icq = icq; | |
51 | } | |
52 | ||
53 | /* | |
54 | * Mark a hardware queue as needing a restart. For shared queues, maintain | |
55 | * a count of how many hardware queues are marked for restart. | |
56 | */ | |
57 | static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) | |
58 | { | |
59 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
60 | return; | |
61 | ||
62 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | |
63 | struct request_queue *q = hctx->queue; | |
64 | ||
65 | if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
66 | atomic_inc(&q->shared_hctx_restart); | |
67 | } else | |
68 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
69 | } | |
70 | ||
71 | static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) | |
72 | { | |
73 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
74 | return false; | |
75 | ||
76 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | |
77 | struct request_queue *q = hctx->queue; | |
78 | ||
79 | if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
80 | atomic_dec(&q->shared_hctx_restart); | |
81 | } else | |
82 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
83 | ||
84 | return blk_mq_run_hw_queue(hctx, true); | |
85 | } | |
86 | ||
87 | /* | |
88 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
89 | * its queue by itself in its completion handler, so we don't need to | |
90 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
91 | */ | |
92 | static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | |
93 | { | |
94 | struct request_queue *q = hctx->queue; | |
95 | struct elevator_queue *e = q->elevator; | |
96 | LIST_HEAD(rq_list); | |
97 | ||
98 | do { | |
99 | struct request *rq; | |
100 | ||
101 | if (e->type->ops.mq.has_work && | |
102 | !e->type->ops.mq.has_work(hctx)) | |
103 | break; | |
104 | ||
105 | if (!blk_mq_get_dispatch_budget(hctx)) | |
106 | break; | |
107 | ||
108 | rq = e->type->ops.mq.dispatch_request(hctx); | |
109 | if (!rq) { | |
110 | blk_mq_put_dispatch_budget(hctx); | |
111 | break; | |
112 | } | |
113 | ||
114 | /* | |
115 | * Now this rq owns the budget which has to be released | |
116 | * if this rq won't be queued to driver via .queue_rq() | |
117 | * in blk_mq_dispatch_rq_list(). | |
118 | */ | |
119 | list_add(&rq->queuelist, &rq_list); | |
120 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); | |
121 | } | |
122 | ||
123 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, | |
124 | struct blk_mq_ctx *ctx) | |
125 | { | |
126 | unsigned idx = ctx->index_hw; | |
127 | ||
128 | if (++idx == hctx->nr_ctx) | |
129 | idx = 0; | |
130 | ||
131 | return hctx->ctxs[idx]; | |
132 | } | |
133 | ||
134 | /* | |
135 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
136 | * its queue by itself in its completion handler, so we don't need to | |
137 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
138 | */ | |
139 | static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) | |
140 | { | |
141 | struct request_queue *q = hctx->queue; | |
142 | LIST_HEAD(rq_list); | |
143 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); | |
144 | ||
145 | do { | |
146 | struct request *rq; | |
147 | ||
148 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) | |
149 | break; | |
150 | ||
151 | if (!blk_mq_get_dispatch_budget(hctx)) | |
152 | break; | |
153 | ||
154 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); | |
155 | if (!rq) { | |
156 | blk_mq_put_dispatch_budget(hctx); | |
157 | break; | |
158 | } | |
159 | ||
160 | /* | |
161 | * Now this rq owns the budget which has to be released | |
162 | * if this rq won't be queued to driver via .queue_rq() | |
163 | * in blk_mq_dispatch_rq_list(). | |
164 | */ | |
165 | list_add(&rq->queuelist, &rq_list); | |
166 | ||
167 | /* round robin for fair dispatch */ | |
168 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); | |
169 | ||
170 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); | |
171 | ||
172 | WRITE_ONCE(hctx->dispatch_from, ctx); | |
173 | } | |
174 | ||
175 | /* return true if hw queue need to be run again */ | |
176 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |
177 | { | |
178 | struct request_queue *q = hctx->queue; | |
179 | struct elevator_queue *e = q->elevator; | |
180 | const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; | |
181 | LIST_HEAD(rq_list); | |
182 | ||
183 | /* RCU or SRCU read lock is needed before checking quiesced flag */ | |
184 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | |
185 | return; | |
186 | ||
187 | hctx->run++; | |
188 | ||
189 | /* | |
190 | * If we have previous entries on our dispatch list, grab them first for | |
191 | * more fair dispatch. | |
192 | */ | |
193 | if (!list_empty_careful(&hctx->dispatch)) { | |
194 | spin_lock(&hctx->lock); | |
195 | if (!list_empty(&hctx->dispatch)) | |
196 | list_splice_init(&hctx->dispatch, &rq_list); | |
197 | spin_unlock(&hctx->lock); | |
198 | } | |
199 | ||
200 | /* | |
201 | * Only ask the scheduler for requests, if we didn't have residual | |
202 | * requests from the dispatch list. This is to avoid the case where | |
203 | * we only ever dispatch a fraction of the requests available because | |
204 | * of low device queue depth. Once we pull requests out of the IO | |
205 | * scheduler, we can no longer merge or sort them. So it's best to | |
206 | * leave them there for as long as we can. Mark the hw queue as | |
207 | * needing a restart in that case. | |
208 | * | |
209 | * We want to dispatch from the scheduler if there was nothing | |
210 | * on the dispatch list or we were able to dispatch from the | |
211 | * dispatch list. | |
212 | */ | |
213 | if (!list_empty(&rq_list)) { | |
214 | blk_mq_sched_mark_restart_hctx(hctx); | |
215 | if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { | |
216 | if (has_sched_dispatch) | |
217 | blk_mq_do_dispatch_sched(hctx); | |
218 | else | |
219 | blk_mq_do_dispatch_ctx(hctx); | |
220 | } | |
221 | } else if (has_sched_dispatch) { | |
222 | blk_mq_do_dispatch_sched(hctx); | |
223 | } else if (hctx->dispatch_busy) { | |
224 | /* dequeue request one by one from sw queue if queue is busy */ | |
225 | blk_mq_do_dispatch_ctx(hctx); | |
226 | } else { | |
227 | blk_mq_flush_busy_ctxs(hctx, &rq_list); | |
228 | blk_mq_dispatch_rq_list(q, &rq_list, false); | |
229 | } | |
230 | } | |
231 | ||
232 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, | |
233 | struct request **merged_request) | |
234 | { | |
235 | struct request *rq; | |
236 | ||
237 | switch (elv_merge(q, &rq, bio)) { | |
238 | case ELEVATOR_BACK_MERGE: | |
239 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
240 | return false; | |
241 | if (!bio_attempt_back_merge(q, rq, bio)) | |
242 | return false; | |
243 | *merged_request = attempt_back_merge(q, rq); | |
244 | if (!*merged_request) | |
245 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); | |
246 | return true; | |
247 | case ELEVATOR_FRONT_MERGE: | |
248 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
249 | return false; | |
250 | if (!bio_attempt_front_merge(q, rq, bio)) | |
251 | return false; | |
252 | *merged_request = attempt_front_merge(q, rq); | |
253 | if (!*merged_request) | |
254 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); | |
255 | return true; | |
256 | default: | |
257 | return false; | |
258 | } | |
259 | } | |
260 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); | |
261 | ||
262 | /* | |
263 | * Reverse check our software queue for entries that we could potentially | |
264 | * merge with. Currently includes a hand-wavy stop count of 8, to not spend | |
265 | * too much time checking for merges. | |
266 | */ | |
267 | static bool blk_mq_attempt_merge(struct request_queue *q, | |
268 | struct blk_mq_ctx *ctx, struct bio *bio) | |
269 | { | |
270 | struct request *rq; | |
271 | int checked = 8; | |
272 | ||
273 | lockdep_assert_held(&ctx->lock); | |
274 | ||
275 | list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { | |
276 | bool merged = false; | |
277 | ||
278 | if (!checked--) | |
279 | break; | |
280 | ||
281 | if (!blk_rq_merge_ok(rq, bio)) | |
282 | continue; | |
283 | ||
284 | switch (blk_try_merge(rq, bio)) { | |
285 | case ELEVATOR_BACK_MERGE: | |
286 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
287 | merged = bio_attempt_back_merge(q, rq, bio); | |
288 | break; | |
289 | case ELEVATOR_FRONT_MERGE: | |
290 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
291 | merged = bio_attempt_front_merge(q, rq, bio); | |
292 | break; | |
293 | case ELEVATOR_DISCARD_MERGE: | |
294 | merged = bio_attempt_discard_merge(q, rq, bio); | |
295 | break; | |
296 | default: | |
297 | continue; | |
298 | } | |
299 | ||
300 | if (merged) | |
301 | ctx->rq_merged++; | |
302 | return merged; | |
303 | } | |
304 | ||
305 | return false; | |
306 | } | |
307 | ||
308 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) | |
309 | { | |
310 | struct elevator_queue *e = q->elevator; | |
311 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); | |
312 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
313 | bool ret = false; | |
314 | ||
315 | if (e && e->type->ops.mq.bio_merge) { | |
316 | blk_mq_put_ctx(ctx); | |
317 | return e->type->ops.mq.bio_merge(hctx, bio); | |
318 | } | |
319 | ||
320 | if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && | |
321 | !list_empty_careful(&ctx->rq_list)) { | |
322 | /* default per sw-queue merge */ | |
323 | spin_lock(&ctx->lock); | |
324 | ret = blk_mq_attempt_merge(q, ctx, bio); | |
325 | spin_unlock(&ctx->lock); | |
326 | } | |
327 | ||
328 | blk_mq_put_ctx(ctx); | |
329 | return ret; | |
330 | } | |
331 | ||
332 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) | |
333 | { | |
334 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); | |
335 | } | |
336 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
337 | ||
338 | void blk_mq_sched_request_inserted(struct request *rq) | |
339 | { | |
340 | trace_block_rq_insert(rq->q, rq); | |
341 | } | |
342 | EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); | |
343 | ||
344 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, | |
345 | bool has_sched, | |
346 | struct request *rq) | |
347 | { | |
348 | /* dispatch flush rq directly */ | |
349 | if (rq->rq_flags & RQF_FLUSH_SEQ) { | |
350 | spin_lock(&hctx->lock); | |
351 | list_add(&rq->queuelist, &hctx->dispatch); | |
352 | spin_unlock(&hctx->lock); | |
353 | return true; | |
354 | } | |
355 | ||
356 | if (has_sched) | |
357 | rq->rq_flags |= RQF_SORTED; | |
358 | ||
359 | return false; | |
360 | } | |
361 | ||
362 | /** | |
363 | * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list | |
364 | * @pos: loop cursor. | |
365 | * @skip: the list element that will not be examined. Iteration starts at | |
366 | * @skip->next. | |
367 | * @head: head of the list to examine. This list must have at least one | |
368 | * element, namely @skip. | |
369 | * @member: name of the list_head structure within typeof(*pos). | |
370 | */ | |
371 | #define list_for_each_entry_rcu_rr(pos, skip, head, member) \ | |
372 | for ((pos) = (skip); \ | |
373 | (pos = (pos)->member.next != (head) ? list_entry_rcu( \ | |
374 | (pos)->member.next, typeof(*pos), member) : \ | |
375 | list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ | |
376 | (pos) != (skip); ) | |
377 | ||
378 | /* | |
379 | * Called after a driver tag has been freed to check whether a hctx needs to | |
380 | * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware | |
381 | * queues in a round-robin fashion if the tag set of @hctx is shared with other | |
382 | * hardware queues. | |
383 | */ | |
384 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) | |
385 | { | |
386 | struct blk_mq_tags *const tags = hctx->tags; | |
387 | struct blk_mq_tag_set *const set = hctx->queue->tag_set; | |
388 | struct request_queue *const queue = hctx->queue, *q; | |
389 | struct blk_mq_hw_ctx *hctx2; | |
390 | unsigned int i, j; | |
391 | ||
392 | if (set->flags & BLK_MQ_F_TAG_SHARED) { | |
393 | /* | |
394 | * If this is 0, then we know that no hardware queues | |
395 | * have RESTART marked. We're done. | |
396 | */ | |
397 | if (!atomic_read(&queue->shared_hctx_restart)) | |
398 | return; | |
399 | ||
400 | rcu_read_lock(); | |
401 | list_for_each_entry_rcu_rr(q, queue, &set->tag_list, | |
402 | tag_set_list) { | |
403 | queue_for_each_hw_ctx(q, hctx2, i) | |
404 | if (hctx2->tags == tags && | |
405 | blk_mq_sched_restart_hctx(hctx2)) | |
406 | goto done; | |
407 | } | |
408 | j = hctx->queue_num + 1; | |
409 | for (i = 0; i < queue->nr_hw_queues; i++, j++) { | |
410 | if (j == queue->nr_hw_queues) | |
411 | j = 0; | |
412 | hctx2 = queue->queue_hw_ctx[j]; | |
413 | if (hctx2->tags == tags && | |
414 | blk_mq_sched_restart_hctx(hctx2)) | |
415 | break; | |
416 | } | |
417 | done: | |
418 | rcu_read_unlock(); | |
419 | } else { | |
420 | blk_mq_sched_restart_hctx(hctx); | |
421 | } | |
422 | } | |
423 | ||
424 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, | |
425 | bool run_queue, bool async) | |
426 | { | |
427 | struct request_queue *q = rq->q; | |
428 | struct elevator_queue *e = q->elevator; | |
429 | struct blk_mq_ctx *ctx = rq->mq_ctx; | |
430 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
431 | ||
432 | /* flush rq in flush machinery need to be dispatched directly */ | |
433 | if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { | |
434 | blk_insert_flush(rq); | |
435 | goto run; | |
436 | } | |
437 | ||
438 | WARN_ON(e && (rq->tag != -1)); | |
439 | ||
440 | if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) | |
441 | goto run; | |
442 | ||
443 | if (e && e->type->ops.mq.insert_requests) { | |
444 | LIST_HEAD(list); | |
445 | ||
446 | list_add(&rq->queuelist, &list); | |
447 | e->type->ops.mq.insert_requests(hctx, &list, at_head); | |
448 | } else { | |
449 | spin_lock(&ctx->lock); | |
450 | __blk_mq_insert_request(hctx, rq, at_head); | |
451 | spin_unlock(&ctx->lock); | |
452 | } | |
453 | ||
454 | run: | |
455 | if (run_queue) | |
456 | blk_mq_run_hw_queue(hctx, async); | |
457 | } | |
458 | ||
459 | void blk_mq_sched_insert_requests(struct request_queue *q, | |
460 | struct blk_mq_ctx *ctx, | |
461 | struct list_head *list, bool run_queue_async) | |
462 | { | |
463 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
464 | struct elevator_queue *e = hctx->queue->elevator; | |
465 | ||
466 | if (e && e->type->ops.mq.insert_requests) | |
467 | e->type->ops.mq.insert_requests(hctx, list, false); | |
468 | else { | |
469 | /* | |
470 | * try to issue requests directly if the hw queue isn't | |
471 | * busy in case of 'none' scheduler, and this way may save | |
472 | * us one extra enqueue & dequeue to sw queue. | |
473 | */ | |
474 | if (!hctx->dispatch_busy && !e && !run_queue_async) { | |
475 | blk_mq_try_issue_list_directly(hctx, list); | |
476 | if (list_empty(list)) | |
477 | return; | |
478 | } | |
479 | blk_mq_insert_requests(hctx, ctx, list); | |
480 | } | |
481 | ||
482 | blk_mq_run_hw_queue(hctx, run_queue_async); | |
483 | } | |
484 | ||
485 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, | |
486 | struct blk_mq_hw_ctx *hctx, | |
487 | unsigned int hctx_idx) | |
488 | { | |
489 | if (hctx->sched_tags) { | |
490 | blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); | |
491 | blk_mq_free_rq_map(hctx->sched_tags); | |
492 | hctx->sched_tags = NULL; | |
493 | } | |
494 | } | |
495 | ||
496 | static int blk_mq_sched_alloc_tags(struct request_queue *q, | |
497 | struct blk_mq_hw_ctx *hctx, | |
498 | unsigned int hctx_idx) | |
499 | { | |
500 | struct blk_mq_tag_set *set = q->tag_set; | |
501 | int ret; | |
502 | ||
503 | hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, | |
504 | set->reserved_tags); | |
505 | if (!hctx->sched_tags) | |
506 | return -ENOMEM; | |
507 | ||
508 | ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); | |
509 | if (ret) | |
510 | blk_mq_sched_free_tags(set, hctx, hctx_idx); | |
511 | ||
512 | return ret; | |
513 | } | |
514 | ||
515 | static void blk_mq_sched_tags_teardown(struct request_queue *q) | |
516 | { | |
517 | struct blk_mq_tag_set *set = q->tag_set; | |
518 | struct blk_mq_hw_ctx *hctx; | |
519 | int i; | |
520 | ||
521 | queue_for_each_hw_ctx(q, hctx, i) | |
522 | blk_mq_sched_free_tags(set, hctx, i); | |
523 | } | |
524 | ||
525 | int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | |
526 | unsigned int hctx_idx) | |
527 | { | |
528 | struct elevator_queue *e = q->elevator; | |
529 | int ret; | |
530 | ||
531 | if (!e) | |
532 | return 0; | |
533 | ||
534 | ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx); | |
535 | if (ret) | |
536 | return ret; | |
537 | ||
538 | if (e->type->ops.mq.init_hctx) { | |
539 | ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); | |
540 | if (ret) { | |
541 | blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); | |
542 | return ret; | |
543 | } | |
544 | } | |
545 | ||
546 | blk_mq_debugfs_register_sched_hctx(q, hctx); | |
547 | ||
548 | return 0; | |
549 | } | |
550 | ||
551 | void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | |
552 | unsigned int hctx_idx) | |
553 | { | |
554 | struct elevator_queue *e = q->elevator; | |
555 | ||
556 | if (!e) | |
557 | return; | |
558 | ||
559 | blk_mq_debugfs_unregister_sched_hctx(hctx); | |
560 | ||
561 | if (e->type->ops.mq.exit_hctx && hctx->sched_data) { | |
562 | e->type->ops.mq.exit_hctx(hctx, hctx_idx); | |
563 | hctx->sched_data = NULL; | |
564 | } | |
565 | ||
566 | blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); | |
567 | } | |
568 | ||
569 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) | |
570 | { | |
571 | struct blk_mq_hw_ctx *hctx; | |
572 | struct elevator_queue *eq; | |
573 | unsigned int i; | |
574 | int ret; | |
575 | ||
576 | if (!e) { | |
577 | q->elevator = NULL; | |
578 | return 0; | |
579 | } | |
580 | ||
581 | /* | |
582 | * Default to double of smaller one between hw queue_depth and 128, | |
583 | * since we don't split into sync/async like the old code did. | |
584 | * Additionally, this is a per-hw queue depth. | |
585 | */ | |
586 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, | |
587 | BLKDEV_MAX_RQ); | |
588 | ||
589 | queue_for_each_hw_ctx(q, hctx, i) { | |
590 | ret = blk_mq_sched_alloc_tags(q, hctx, i); | |
591 | if (ret) | |
592 | goto err; | |
593 | } | |
594 | ||
595 | ret = e->ops.mq.init_sched(q, e); | |
596 | if (ret) | |
597 | goto err; | |
598 | ||
599 | blk_mq_debugfs_register_sched(q); | |
600 | ||
601 | queue_for_each_hw_ctx(q, hctx, i) { | |
602 | if (e->ops.mq.init_hctx) { | |
603 | ret = e->ops.mq.init_hctx(hctx, i); | |
604 | if (ret) { | |
605 | eq = q->elevator; | |
606 | blk_mq_exit_sched(q, eq); | |
607 | kobject_put(&eq->kobj); | |
608 | return ret; | |
609 | } | |
610 | } | |
611 | blk_mq_debugfs_register_sched_hctx(q, hctx); | |
612 | } | |
613 | ||
614 | return 0; | |
615 | ||
616 | err: | |
617 | blk_mq_sched_tags_teardown(q); | |
618 | q->elevator = NULL; | |
619 | return ret; | |
620 | } | |
621 | ||
622 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) | |
623 | { | |
624 | struct blk_mq_hw_ctx *hctx; | |
625 | unsigned int i; | |
626 | ||
627 | queue_for_each_hw_ctx(q, hctx, i) { | |
628 | blk_mq_debugfs_unregister_sched_hctx(hctx); | |
629 | if (e->type->ops.mq.exit_hctx && hctx->sched_data) { | |
630 | e->type->ops.mq.exit_hctx(hctx, i); | |
631 | hctx->sched_data = NULL; | |
632 | } | |
633 | } | |
634 | blk_mq_debugfs_unregister_sched(q); | |
635 | if (e->type->ops.mq.exit_sched) | |
636 | e->type->ops.mq.exit_sched(e); | |
637 | blk_mq_sched_tags_teardown(q); | |
638 | q->elevator = NULL; | |
639 | } | |
640 | ||
641 | int blk_mq_sched_init(struct request_queue *q) | |
642 | { | |
643 | int ret; | |
644 | ||
645 | mutex_lock(&q->sysfs_lock); | |
646 | ret = elevator_init(q, NULL); | |
647 | mutex_unlock(&q->sysfs_lock); | |
648 | ||
649 | return ret; | |
650 | } |