]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-mq.c
blk-mq-sched: Allocate sched reserved tags as specified in the original queue tagset
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.c
CommitLineData
75bb4625
JA
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
320ae51f
JA
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
f75782e4 12#include <linux/kmemleak.h>
320ae51f
JA
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
23#include <linux/delay.h>
aedcd72f 24#include <linux/crash_dump.h>
88c7b2b7 25#include <linux/prefetch.h>
320ae51f
JA
26
27#include <trace/events/block.h>
28
29#include <linux/blk-mq.h>
30#include "blk.h"
31#include "blk-mq.h"
32#include "blk-mq-tag.h"
cf43e6be 33#include "blk-stat.h"
87760e5e 34#include "blk-wbt.h"
bd166ef1 35#include "blk-mq-sched.h"
320ae51f
JA
36
37static DEFINE_MUTEX(all_q_mutex);
38static LIST_HEAD(all_q_list);
39
320ae51f
JA
40/*
41 * Check if any of the ctx's have pending work in this hardware queue
42 */
50e1dab8 43bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
320ae51f 44{
bd166ef1
JA
45 return sbitmap_any_bit_set(&hctx->ctx_map) ||
46 !list_empty_careful(&hctx->dispatch) ||
47 blk_mq_sched_has_work(hctx);
1429d7c9
JA
48}
49
320ae51f
JA
50/*
51 * Mark this ctx as having pending work in this hardware queue
52 */
53static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
54 struct blk_mq_ctx *ctx)
55{
88459642
OS
56 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
57 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
1429d7c9
JA
58}
59
60static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
61 struct blk_mq_ctx *ctx)
62{
88459642 63 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
320ae51f
JA
64}
65
b4c6a028 66void blk_mq_freeze_queue_start(struct request_queue *q)
43a5e4e2 67{
4ecd4fef 68 int freeze_depth;
cddd5d17 69
4ecd4fef
CH
70 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
71 if (freeze_depth == 1) {
3ef28e83 72 percpu_ref_kill(&q->q_usage_counter);
b94ec296 73 blk_mq_run_hw_queues(q, false);
cddd5d17 74 }
f3af020b 75}
b4c6a028 76EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
f3af020b
TH
77
78static void blk_mq_freeze_queue_wait(struct request_queue *q)
79{
3ef28e83 80 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
43a5e4e2
ML
81}
82
f3af020b
TH
83/*
84 * Guarantee no request is in use, so we can change any data structure of
85 * the queue afterward.
86 */
3ef28e83 87void blk_freeze_queue(struct request_queue *q)
f3af020b 88{
3ef28e83
DW
89 /*
90 * In the !blk_mq case we are only calling this to kill the
91 * q_usage_counter, otherwise this increases the freeze depth
92 * and waits for it to return to zero. For this reason there is
93 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
94 * exported to drivers as the only user for unfreeze is blk_mq.
95 */
f3af020b
TH
96 blk_mq_freeze_queue_start(q);
97 blk_mq_freeze_queue_wait(q);
98}
3ef28e83
DW
99
100void blk_mq_freeze_queue(struct request_queue *q)
101{
102 /*
103 * ...just an alias to keep freeze and unfreeze actions balanced
104 * in the blk_mq_* namespace
105 */
106 blk_freeze_queue(q);
107}
c761d96b 108EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
f3af020b 109
b4c6a028 110void blk_mq_unfreeze_queue(struct request_queue *q)
320ae51f 111{
4ecd4fef 112 int freeze_depth;
320ae51f 113
4ecd4fef
CH
114 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
115 WARN_ON_ONCE(freeze_depth < 0);
116 if (!freeze_depth) {
3ef28e83 117 percpu_ref_reinit(&q->q_usage_counter);
320ae51f 118 wake_up_all(&q->mq_freeze_wq);
add703fd 119 }
320ae51f 120}
b4c6a028 121EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
320ae51f 122
6a83e74d
BVA
123/**
124 * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
125 * @q: request queue.
126 *
127 * Note: this function does not prevent that the struct request end_io()
128 * callback function is invoked. Additionally, it is not prevented that
129 * new queue_rq() calls occur unless the queue has been stopped first.
130 */
131void blk_mq_quiesce_queue(struct request_queue *q)
132{
133 struct blk_mq_hw_ctx *hctx;
134 unsigned int i;
135 bool rcu = false;
136
137 blk_mq_stop_hw_queues(q);
138
139 queue_for_each_hw_ctx(q, hctx, i) {
140 if (hctx->flags & BLK_MQ_F_BLOCKING)
141 synchronize_srcu(&hctx->queue_rq_srcu);
142 else
143 rcu = true;
144 }
145 if (rcu)
146 synchronize_rcu();
147}
148EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
149
aed3ea94
JA
150void blk_mq_wake_waiters(struct request_queue *q)
151{
152 struct blk_mq_hw_ctx *hctx;
153 unsigned int i;
154
155 queue_for_each_hw_ctx(q, hctx, i)
156 if (blk_mq_hw_queue_mapped(hctx))
157 blk_mq_tag_wakeup_all(hctx->tags, true);
3fd5940c
KB
158
159 /*
160 * If we are called because the queue has now been marked as
161 * dying, we need to ensure that processes currently waiting on
162 * the queue are notified as well.
163 */
164 wake_up_all(&q->mq_freeze_wq);
aed3ea94
JA
165}
166
320ae51f
JA
167bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
168{
169 return blk_mq_has_free_tags(hctx->tags);
170}
171EXPORT_SYMBOL(blk_mq_can_queue);
172
2c3ad667
JA
173void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
174 struct request *rq, unsigned int op)
320ae51f 175{
af76e555
CH
176 INIT_LIST_HEAD(&rq->queuelist);
177 /* csd/requeue_work/fifo_time is initialized before use */
178 rq->q = q;
320ae51f 179 rq->mq_ctx = ctx;
ef295ecf 180 rq->cmd_flags = op;
e8064021
CH
181 if (blk_queue_io_stat(q))
182 rq->rq_flags |= RQF_IO_STAT;
af76e555
CH
183 /* do not touch atomic flags, it needs atomic ops against the timer */
184 rq->cpu = -1;
af76e555
CH
185 INIT_HLIST_NODE(&rq->hash);
186 RB_CLEAR_NODE(&rq->rb_node);
af76e555
CH
187 rq->rq_disk = NULL;
188 rq->part = NULL;
3ee32372 189 rq->start_time = jiffies;
af76e555
CH
190#ifdef CONFIG_BLK_CGROUP
191 rq->rl = NULL;
0fec08b4 192 set_start_time_ns(rq);
af76e555
CH
193 rq->io_start_time_ns = 0;
194#endif
195 rq->nr_phys_segments = 0;
196#if defined(CONFIG_BLK_DEV_INTEGRITY)
197 rq->nr_integrity_segments = 0;
198#endif
af76e555
CH
199 rq->special = NULL;
200 /* tag was already set */
201 rq->errors = 0;
af76e555 202 rq->extra_len = 0;
af76e555 203
af76e555 204 INIT_LIST_HEAD(&rq->timeout_list);
f6be4fb4
JA
205 rq->timeout = 0;
206
af76e555
CH
207 rq->end_io = NULL;
208 rq->end_io_data = NULL;
209 rq->next_rq = NULL;
210
ef295ecf 211 ctx->rq_dispatched[op_is_sync(op)]++;
320ae51f 212}
2c3ad667 213EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
320ae51f 214
2c3ad667
JA
215struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
216 unsigned int op)
5dee8577
CH
217{
218 struct request *rq;
219 unsigned int tag;
220
cb96a42c 221 tag = blk_mq_get_tag(data);
5dee8577 222 if (tag != BLK_MQ_TAG_FAIL) {
bd166ef1
JA
223 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
224
225 rq = tags->static_rqs[tag];
5dee8577 226
bd166ef1
JA
227 if (data->flags & BLK_MQ_REQ_INTERNAL) {
228 rq->tag = -1;
229 rq->internal_tag = tag;
230 } else {
200e86b3
JA
231 if (blk_mq_tag_busy(data->hctx)) {
232 rq->rq_flags = RQF_MQ_INFLIGHT;
233 atomic_inc(&data->hctx->nr_active);
234 }
bd166ef1
JA
235 rq->tag = tag;
236 rq->internal_tag = -1;
237 }
238
ef295ecf 239 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
5dee8577
CH
240 return rq;
241 }
242
243 return NULL;
244}
2c3ad667 245EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
5dee8577 246
6f3b0e8b
CH
247struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
248 unsigned int flags)
320ae51f 249{
5a797e00 250 struct blk_mq_alloc_data alloc_data = { .flags = flags };
bd166ef1 251 struct request *rq;
a492f075 252 int ret;
320ae51f 253
6f3b0e8b 254 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
a492f075
JL
255 if (ret)
256 return ERR_PTR(ret);
320ae51f 257
bd166ef1 258 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
841bac2c 259
bd166ef1
JA
260 blk_mq_put_ctx(alloc_data.ctx);
261 blk_queue_exit(q);
262
263 if (!rq)
a492f075 264 return ERR_PTR(-EWOULDBLOCK);
0c4de0f3
CH
265
266 rq->__data_len = 0;
267 rq->__sector = (sector_t) -1;
268 rq->bio = rq->biotail = NULL;
320ae51f
JA
269 return rq;
270}
4bb659b1 271EXPORT_SYMBOL(blk_mq_alloc_request);
320ae51f 272
1f5bd336
ML
273struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
274 unsigned int flags, unsigned int hctx_idx)
275{
276 struct blk_mq_hw_ctx *hctx;
277 struct blk_mq_ctx *ctx;
278 struct request *rq;
279 struct blk_mq_alloc_data alloc_data;
280 int ret;
281
282 /*
283 * If the tag allocator sleeps we could get an allocation for a
284 * different hardware context. No need to complicate the low level
285 * allocator for this for the rare use case of a command tied to
286 * a specific queue.
287 */
288 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
289 return ERR_PTR(-EINVAL);
290
291 if (hctx_idx >= q->nr_hw_queues)
292 return ERR_PTR(-EIO);
293
294 ret = blk_queue_enter(q, true);
295 if (ret)
296 return ERR_PTR(ret);
297
c8712c6a
CH
298 /*
299 * Check if the hardware context is actually mapped to anything.
300 * If not tell the caller that it should skip this queue.
301 */
1f5bd336 302 hctx = q->queue_hw_ctx[hctx_idx];
c8712c6a
CH
303 if (!blk_mq_hw_queue_mapped(hctx)) {
304 ret = -EXDEV;
305 goto out_queue_exit;
306 }
1f5bd336
ML
307 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
308
309 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
ef295ecf 310 rq = __blk_mq_alloc_request(&alloc_data, rw);
1f5bd336 311 if (!rq) {
c8712c6a
CH
312 ret = -EWOULDBLOCK;
313 goto out_queue_exit;
1f5bd336
ML
314 }
315
316 return rq;
c8712c6a
CH
317
318out_queue_exit:
319 blk_queue_exit(q);
320 return ERR_PTR(ret);
1f5bd336
ML
321}
322EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
323
bd166ef1
JA
324void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
325 struct request *rq)
320ae51f 326{
bd166ef1 327 const int sched_tag = rq->internal_tag;
320ae51f
JA
328 struct request_queue *q = rq->q;
329
e8064021 330 if (rq->rq_flags & RQF_MQ_INFLIGHT)
0d2602ca 331 atomic_dec(&hctx->nr_active);
87760e5e
JA
332
333 wbt_done(q->rq_wb, &rq->issue_stat);
e8064021 334 rq->rq_flags = 0;
0d2602ca 335
af76e555 336 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
06426adf 337 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
bd166ef1
JA
338 if (rq->tag != -1)
339 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
340 if (sched_tag != -1)
341 blk_mq_sched_completed_request(hctx, rq);
50e1dab8 342 blk_mq_sched_restart_queues(hctx);
3ef28e83 343 blk_queue_exit(q);
320ae51f
JA
344}
345
bd166ef1 346static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
16a3c2a7 347 struct request *rq)
320ae51f
JA
348{
349 struct blk_mq_ctx *ctx = rq->mq_ctx;
320ae51f
JA
350
351 ctx->rq_completed[rq_is_sync(rq)]++;
bd166ef1
JA
352 __blk_mq_finish_request(hctx, ctx, rq);
353}
354
355void blk_mq_finish_request(struct request *rq)
356{
357 blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
7c7f2f2b 358}
7c7f2f2b
JA
359
360void blk_mq_free_request(struct request *rq)
361{
bd166ef1 362 blk_mq_sched_put_request(rq);
320ae51f 363}
1a3b595a 364EXPORT_SYMBOL_GPL(blk_mq_free_request);
320ae51f 365
c8a446ad 366inline void __blk_mq_end_request(struct request *rq, int error)
320ae51f 367{
0d11e6ac
ML
368 blk_account_io_done(rq);
369
91b63639 370 if (rq->end_io) {
87760e5e 371 wbt_done(rq->q->rq_wb, &rq->issue_stat);
320ae51f 372 rq->end_io(rq, error);
91b63639
CH
373 } else {
374 if (unlikely(blk_bidi_rq(rq)))
375 blk_mq_free_request(rq->next_rq);
320ae51f 376 blk_mq_free_request(rq);
91b63639 377 }
320ae51f 378}
c8a446ad 379EXPORT_SYMBOL(__blk_mq_end_request);
63151a44 380
c8a446ad 381void blk_mq_end_request(struct request *rq, int error)
63151a44
CH
382{
383 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
384 BUG();
c8a446ad 385 __blk_mq_end_request(rq, error);
63151a44 386}
c8a446ad 387EXPORT_SYMBOL(blk_mq_end_request);
320ae51f 388
30a91cb4 389static void __blk_mq_complete_request_remote(void *data)
320ae51f 390{
3d6efbf6 391 struct request *rq = data;
320ae51f 392
30a91cb4 393 rq->q->softirq_done_fn(rq);
320ae51f 394}
320ae51f 395
ed851860 396static void blk_mq_ipi_complete_request(struct request *rq)
320ae51f
JA
397{
398 struct blk_mq_ctx *ctx = rq->mq_ctx;
38535201 399 bool shared = false;
320ae51f
JA
400 int cpu;
401
38535201 402 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
30a91cb4
CH
403 rq->q->softirq_done_fn(rq);
404 return;
405 }
320ae51f
JA
406
407 cpu = get_cpu();
38535201
CH
408 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
409 shared = cpus_share_cache(cpu, ctx->cpu);
410
411 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
30a91cb4 412 rq->csd.func = __blk_mq_complete_request_remote;
3d6efbf6
CH
413 rq->csd.info = rq;
414 rq->csd.flags = 0;
c46fff2a 415 smp_call_function_single_async(ctx->cpu, &rq->csd);
3d6efbf6 416 } else {
30a91cb4 417 rq->q->softirq_done_fn(rq);
3d6efbf6 418 }
320ae51f
JA
419 put_cpu();
420}
30a91cb4 421
cf43e6be
JA
422static void blk_mq_stat_add(struct request *rq)
423{
424 if (rq->rq_flags & RQF_STATS) {
425 /*
426 * We could rq->mq_ctx here, but there's less of a risk
427 * of races if we have the completion event add the stats
428 * to the local software queue.
429 */
430 struct blk_mq_ctx *ctx;
431
432 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
433 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
434 }
435}
436
1fa8cc52 437static void __blk_mq_complete_request(struct request *rq)
ed851860
JA
438{
439 struct request_queue *q = rq->q;
440
cf43e6be
JA
441 blk_mq_stat_add(rq);
442
ed851860 443 if (!q->softirq_done_fn)
c8a446ad 444 blk_mq_end_request(rq, rq->errors);
ed851860
JA
445 else
446 blk_mq_ipi_complete_request(rq);
447}
448
30a91cb4
CH
449/**
450 * blk_mq_complete_request - end I/O on a request
451 * @rq: the request being processed
452 *
453 * Description:
454 * Ends all I/O on a request. It does not handle partial completions.
455 * The actual completion happens out-of-order, through a IPI handler.
456 **/
f4829a9b 457void blk_mq_complete_request(struct request *rq, int error)
30a91cb4 458{
95f09684
JA
459 struct request_queue *q = rq->q;
460
461 if (unlikely(blk_should_fake_timeout(q)))
30a91cb4 462 return;
f4829a9b
CH
463 if (!blk_mark_rq_complete(rq)) {
464 rq->errors = error;
ed851860 465 __blk_mq_complete_request(rq);
f4829a9b 466 }
30a91cb4
CH
467}
468EXPORT_SYMBOL(blk_mq_complete_request);
320ae51f 469
973c0191
KB
470int blk_mq_request_started(struct request *rq)
471{
472 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
473}
474EXPORT_SYMBOL_GPL(blk_mq_request_started);
475
e2490073 476void blk_mq_start_request(struct request *rq)
320ae51f
JA
477{
478 struct request_queue *q = rq->q;
479
bd166ef1
JA
480 blk_mq_sched_started_request(rq);
481
320ae51f
JA
482 trace_block_rq_issue(q, rq);
483
cf43e6be
JA
484 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
485 blk_stat_set_issue_time(&rq->issue_stat);
486 rq->rq_flags |= RQF_STATS;
87760e5e 487 wbt_issue(q->rq_wb, &rq->issue_stat);
cf43e6be
JA
488 }
489
2b8393b4 490 blk_add_timer(rq);
87ee7b11 491
538b7534
JA
492 /*
493 * Ensure that ->deadline is visible before set the started
494 * flag and clear the completed flag.
495 */
496 smp_mb__before_atomic();
497
87ee7b11
JA
498 /*
499 * Mark us as started and clear complete. Complete might have been
500 * set if requeue raced with timeout, which then marked it as
501 * complete. So be sure to clear complete again when we start
502 * the request, otherwise we'll ignore the completion event.
503 */
4b570521
JA
504 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
505 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
506 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
507 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
49f5baa5
CH
508
509 if (q->dma_drain_size && blk_rq_bytes(rq)) {
510 /*
511 * Make sure space for the drain appears. We know we can do
512 * this because max_hw_segments has been adjusted to be one
513 * fewer than the device can handle.
514 */
515 rq->nr_phys_segments++;
516 }
320ae51f 517}
e2490073 518EXPORT_SYMBOL(blk_mq_start_request);
320ae51f 519
ed0791b2 520static void __blk_mq_requeue_request(struct request *rq)
320ae51f
JA
521{
522 struct request_queue *q = rq->q;
523
524 trace_block_rq_requeue(q, rq);
87760e5e 525 wbt_requeue(q->rq_wb, &rq->issue_stat);
bd166ef1 526 blk_mq_sched_requeue_request(rq);
49f5baa5 527
e2490073
CH
528 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
529 if (q->dma_drain_size && blk_rq_bytes(rq))
530 rq->nr_phys_segments--;
531 }
320ae51f
JA
532}
533
2b053aca 534void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
ed0791b2 535{
ed0791b2 536 __blk_mq_requeue_request(rq);
ed0791b2 537
ed0791b2 538 BUG_ON(blk_queued_rq(rq));
2b053aca 539 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
ed0791b2
CH
540}
541EXPORT_SYMBOL(blk_mq_requeue_request);
542
6fca6a61
CH
543static void blk_mq_requeue_work(struct work_struct *work)
544{
545 struct request_queue *q =
2849450a 546 container_of(work, struct request_queue, requeue_work.work);
6fca6a61
CH
547 LIST_HEAD(rq_list);
548 struct request *rq, *next;
549 unsigned long flags;
550
551 spin_lock_irqsave(&q->requeue_lock, flags);
552 list_splice_init(&q->requeue_list, &rq_list);
553 spin_unlock_irqrestore(&q->requeue_lock, flags);
554
555 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
e8064021 556 if (!(rq->rq_flags & RQF_SOFTBARRIER))
6fca6a61
CH
557 continue;
558
e8064021 559 rq->rq_flags &= ~RQF_SOFTBARRIER;
6fca6a61 560 list_del_init(&rq->queuelist);
bd6737f1 561 blk_mq_sched_insert_request(rq, true, false, false, true);
6fca6a61
CH
562 }
563
564 while (!list_empty(&rq_list)) {
565 rq = list_entry(rq_list.next, struct request, queuelist);
566 list_del_init(&rq->queuelist);
bd6737f1 567 blk_mq_sched_insert_request(rq, false, false, false, true);
6fca6a61
CH
568 }
569
52d7f1b5 570 blk_mq_run_hw_queues(q, false);
6fca6a61
CH
571}
572
2b053aca
BVA
573void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
574 bool kick_requeue_list)
6fca6a61
CH
575{
576 struct request_queue *q = rq->q;
577 unsigned long flags;
578
579 /*
580 * We abuse this flag that is otherwise used by the I/O scheduler to
581 * request head insertation from the workqueue.
582 */
e8064021 583 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
6fca6a61
CH
584
585 spin_lock_irqsave(&q->requeue_lock, flags);
586 if (at_head) {
e8064021 587 rq->rq_flags |= RQF_SOFTBARRIER;
6fca6a61
CH
588 list_add(&rq->queuelist, &q->requeue_list);
589 } else {
590 list_add_tail(&rq->queuelist, &q->requeue_list);
591 }
592 spin_unlock_irqrestore(&q->requeue_lock, flags);
2b053aca
BVA
593
594 if (kick_requeue_list)
595 blk_mq_kick_requeue_list(q);
6fca6a61
CH
596}
597EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
598
599void blk_mq_kick_requeue_list(struct request_queue *q)
600{
2849450a 601 kblockd_schedule_delayed_work(&q->requeue_work, 0);
6fca6a61
CH
602}
603EXPORT_SYMBOL(blk_mq_kick_requeue_list);
604
2849450a
MS
605void blk_mq_delay_kick_requeue_list(struct request_queue *q,
606 unsigned long msecs)
607{
608 kblockd_schedule_delayed_work(&q->requeue_work,
609 msecs_to_jiffies(msecs));
610}
611EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
612
1885b24d
JA
613void blk_mq_abort_requeue_list(struct request_queue *q)
614{
615 unsigned long flags;
616 LIST_HEAD(rq_list);
617
618 spin_lock_irqsave(&q->requeue_lock, flags);
619 list_splice_init(&q->requeue_list, &rq_list);
620 spin_unlock_irqrestore(&q->requeue_lock, flags);
621
622 while (!list_empty(&rq_list)) {
623 struct request *rq;
624
625 rq = list_first_entry(&rq_list, struct request, queuelist);
626 list_del_init(&rq->queuelist);
627 rq->errors = -EIO;
628 blk_mq_end_request(rq, rq->errors);
629 }
630}
631EXPORT_SYMBOL(blk_mq_abort_requeue_list);
632
0e62f51f
JA
633struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
634{
88c7b2b7
JA
635 if (tag < tags->nr_tags) {
636 prefetch(tags->rqs[tag]);
4ee86bab 637 return tags->rqs[tag];
88c7b2b7 638 }
4ee86bab
HR
639
640 return NULL;
24d2f903
CH
641}
642EXPORT_SYMBOL(blk_mq_tag_to_rq);
643
320ae51f 644struct blk_mq_timeout_data {
46f92d42
CH
645 unsigned long next;
646 unsigned int next_set;
320ae51f
JA
647};
648
90415837 649void blk_mq_rq_timed_out(struct request *req, bool reserved)
320ae51f 650{
f8a5b122 651 const struct blk_mq_ops *ops = req->q->mq_ops;
46f92d42 652 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87ee7b11
JA
653
654 /*
655 * We know that complete is set at this point. If STARTED isn't set
656 * anymore, then the request isn't active and the "timeout" should
657 * just be ignored. This can happen due to the bitflag ordering.
658 * Timeout first checks if STARTED is set, and if it is, assumes
659 * the request is active. But if we race with completion, then
660 * we both flags will get cleared. So check here again, and ignore
661 * a timeout event with a request that isn't active.
662 */
46f92d42
CH
663 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
664 return;
87ee7b11 665
46f92d42 666 if (ops->timeout)
0152fb6b 667 ret = ops->timeout(req, reserved);
46f92d42
CH
668
669 switch (ret) {
670 case BLK_EH_HANDLED:
671 __blk_mq_complete_request(req);
672 break;
673 case BLK_EH_RESET_TIMER:
674 blk_add_timer(req);
675 blk_clear_rq_complete(req);
676 break;
677 case BLK_EH_NOT_HANDLED:
678 break;
679 default:
680 printk(KERN_ERR "block: bad eh return: %d\n", ret);
681 break;
682 }
87ee7b11 683}
5b3f25fc 684
81481eb4
CH
685static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
686 struct request *rq, void *priv, bool reserved)
687{
688 struct blk_mq_timeout_data *data = priv;
87ee7b11 689
eb130dbf
KB
690 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
691 /*
692 * If a request wasn't started before the queue was
693 * marked dying, kill it here or it'll go unnoticed.
694 */
a59e0f57
KB
695 if (unlikely(blk_queue_dying(rq->q))) {
696 rq->errors = -EIO;
697 blk_mq_end_request(rq, rq->errors);
698 }
46f92d42 699 return;
eb130dbf 700 }
87ee7b11 701
46f92d42
CH
702 if (time_after_eq(jiffies, rq->deadline)) {
703 if (!blk_mark_rq_complete(rq))
0152fb6b 704 blk_mq_rq_timed_out(rq, reserved);
46f92d42
CH
705 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
706 data->next = rq->deadline;
707 data->next_set = 1;
708 }
87ee7b11
JA
709}
710
287922eb 711static void blk_mq_timeout_work(struct work_struct *work)
320ae51f 712{
287922eb
CH
713 struct request_queue *q =
714 container_of(work, struct request_queue, timeout_work);
81481eb4
CH
715 struct blk_mq_timeout_data data = {
716 .next = 0,
717 .next_set = 0,
718 };
81481eb4 719 int i;
320ae51f 720
71f79fb3
GKB
721 /* A deadlock might occur if a request is stuck requiring a
722 * timeout at the same time a queue freeze is waiting
723 * completion, since the timeout code would not be able to
724 * acquire the queue reference here.
725 *
726 * That's why we don't use blk_queue_enter here; instead, we use
727 * percpu_ref_tryget directly, because we need to be able to
728 * obtain a reference even in the short window between the queue
729 * starting to freeze, by dropping the first reference in
730 * blk_mq_freeze_queue_start, and the moment the last request is
731 * consumed, marked by the instant q_usage_counter reaches
732 * zero.
733 */
734 if (!percpu_ref_tryget(&q->q_usage_counter))
287922eb
CH
735 return;
736
0bf6cd5b 737 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
320ae51f 738
81481eb4
CH
739 if (data.next_set) {
740 data.next = blk_rq_timeout(round_jiffies_up(data.next));
741 mod_timer(&q->timeout, data.next);
0d2602ca 742 } else {
0bf6cd5b
CH
743 struct blk_mq_hw_ctx *hctx;
744
f054b56c
ML
745 queue_for_each_hw_ctx(q, hctx, i) {
746 /* the hctx may be unmapped, so check it here */
747 if (blk_mq_hw_queue_mapped(hctx))
748 blk_mq_tag_idle(hctx);
749 }
0d2602ca 750 }
287922eb 751 blk_queue_exit(q);
320ae51f
JA
752}
753
754/*
755 * Reverse check our software queue for entries that we could potentially
756 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
757 * too much time checking for merges.
758 */
759static bool blk_mq_attempt_merge(struct request_queue *q,
760 struct blk_mq_ctx *ctx, struct bio *bio)
761{
762 struct request *rq;
763 int checked = 8;
764
765 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
34fe7c05 766 bool merged = false;
320ae51f
JA
767
768 if (!checked--)
769 break;
770
771 if (!blk_rq_merge_ok(rq, bio))
772 continue;
773
34fe7c05
CH
774 switch (blk_try_merge(rq, bio)) {
775 case ELEVATOR_BACK_MERGE:
776 if (blk_mq_sched_allow_merge(q, rq, bio))
777 merged = bio_attempt_back_merge(q, rq, bio);
bd166ef1 778 break;
34fe7c05
CH
779 case ELEVATOR_FRONT_MERGE:
780 if (blk_mq_sched_allow_merge(q, rq, bio))
781 merged = bio_attempt_front_merge(q, rq, bio);
320ae51f 782 break;
1e739730
CH
783 case ELEVATOR_DISCARD_MERGE:
784 merged = bio_attempt_discard_merge(q, rq, bio);
320ae51f 785 break;
34fe7c05
CH
786 default:
787 continue;
320ae51f 788 }
34fe7c05
CH
789
790 if (merged)
791 ctx->rq_merged++;
792 return merged;
320ae51f
JA
793 }
794
795 return false;
796}
797
88459642
OS
798struct flush_busy_ctx_data {
799 struct blk_mq_hw_ctx *hctx;
800 struct list_head *list;
801};
802
803static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
804{
805 struct flush_busy_ctx_data *flush_data = data;
806 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
807 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
808
809 sbitmap_clear_bit(sb, bitnr);
810 spin_lock(&ctx->lock);
811 list_splice_tail_init(&ctx->rq_list, flush_data->list);
812 spin_unlock(&ctx->lock);
813 return true;
814}
815
1429d7c9
JA
816/*
817 * Process software queues that have been marked busy, splicing them
818 * to the for-dispatch
819 */
2c3ad667 820void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1429d7c9 821{
88459642
OS
822 struct flush_busy_ctx_data data = {
823 .hctx = hctx,
824 .list = list,
825 };
1429d7c9 826
88459642 827 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1429d7c9 828}
2c3ad667 829EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1429d7c9 830
703fd1c0
JA
831static inline unsigned int queued_to_index(unsigned int queued)
832{
833 if (!queued)
834 return 0;
1429d7c9 835
703fd1c0 836 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1429d7c9
JA
837}
838
bd6737f1
JA
839bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
840 bool wait)
bd166ef1
JA
841{
842 struct blk_mq_alloc_data data = {
843 .q = rq->q,
bd166ef1
JA
844 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
845 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
846 };
847
bd166ef1
JA
848 if (rq->tag != -1) {
849done:
850 if (hctx)
851 *hctx = data.hctx;
852 return true;
853 }
854
415b806d
SG
855 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
856 data.flags |= BLK_MQ_REQ_RESERVED;
857
bd166ef1
JA
858 rq->tag = blk_mq_get_tag(&data);
859 if (rq->tag >= 0) {
200e86b3
JA
860 if (blk_mq_tag_busy(data.hctx)) {
861 rq->rq_flags |= RQF_MQ_INFLIGHT;
862 atomic_inc(&data.hctx->nr_active);
863 }
bd166ef1
JA
864 data.hctx->tags->rqs[rq->tag] = rq;
865 goto done;
866 }
867
868 return false;
869}
870
99cf1dc5
JA
871static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
872 struct request *rq)
873{
874 if (rq->tag == -1 || rq->internal_tag == -1)
875 return;
876
877 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
878 rq->tag = -1;
879
880 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
881 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
882 atomic_dec(&hctx->nr_active);
883 }
884}
885
bd166ef1
JA
886/*
887 * If we fail getting a driver tag because all the driver tags are already
888 * assigned and on the dispatch list, BUT the first entry does not have a
889 * tag, then we could deadlock. For that case, move entries with assigned
890 * driver tags to the front, leaving the set of tagged requests in the
891 * same order, and the untagged set in the same order.
892 */
893static bool reorder_tags_to_front(struct list_head *list)
894{
895 struct request *rq, *tmp, *first = NULL;
896
897 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
898 if (rq == first)
899 break;
900 if (rq->tag != -1) {
901 list_move(&rq->queuelist, list);
902 if (!first)
903 first = rq;
904 }
905 }
906
907 return first != NULL;
908}
909
da55f2cc
OS
910static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
911 void *key)
912{
913 struct blk_mq_hw_ctx *hctx;
914
915 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
916
917 list_del(&wait->task_list);
918 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
919 blk_mq_run_hw_queue(hctx, true);
920 return 1;
921}
922
923static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
924{
925 struct sbq_wait_state *ws;
926
927 /*
928 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
929 * The thread which wins the race to grab this bit adds the hardware
930 * queue to the wait queue.
931 */
932 if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
933 test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
934 return false;
935
936 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
937 ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
938
939 /*
940 * As soon as this returns, it's no longer safe to fiddle with
941 * hctx->dispatch_wait, since a completion can wake up the wait queue
942 * and unlock the bit.
943 */
944 add_wait_queue(&ws->wait, &hctx->dispatch_wait);
945 return true;
946}
947
f04c3df3 948bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
320ae51f
JA
949{
950 struct request_queue *q = hctx->queue;
320ae51f 951 struct request *rq;
74c45052
JA
952 LIST_HEAD(driver_list);
953 struct list_head *dptr;
f04c3df3 954 int queued, ret = BLK_MQ_RQ_QUEUE_OK;
320ae51f 955
74c45052
JA
956 /*
957 * Start off with dptr being NULL, so we start the first request
958 * immediately, even if we have more pending.
959 */
960 dptr = NULL;
961
320ae51f
JA
962 /*
963 * Now process all the entries, sending them to the driver.
964 */
1429d7c9 965 queued = 0;
f04c3df3 966 while (!list_empty(list)) {
74c45052 967 struct blk_mq_queue_data bd;
320ae51f 968
f04c3df3 969 rq = list_first_entry(list, struct request, queuelist);
bd166ef1
JA
970 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
971 if (!queued && reorder_tags_to_front(list))
972 continue;
3c782d67
JA
973
974 /*
da55f2cc
OS
975 * The initial allocation attempt failed, so we need to
976 * rerun the hardware queue when a tag is freed.
3c782d67 977 */
da55f2cc
OS
978 if (blk_mq_dispatch_wait_add(hctx)) {
979 /*
980 * It's possible that a tag was freed in the
981 * window between the allocation failure and
982 * adding the hardware queue to the wait queue.
983 */
984 if (!blk_mq_get_driver_tag(rq, &hctx, false))
985 break;
986 } else {
3c782d67 987 break;
da55f2cc 988 }
bd166ef1 989 }
da55f2cc 990
320ae51f 991 list_del_init(&rq->queuelist);
320ae51f 992
74c45052
JA
993 bd.rq = rq;
994 bd.list = dptr;
f04c3df3 995 bd.last = list_empty(list);
74c45052
JA
996
997 ret = q->mq_ops->queue_rq(hctx, &bd);
320ae51f
JA
998 switch (ret) {
999 case BLK_MQ_RQ_QUEUE_OK:
1000 queued++;
52b9c330 1001 break;
320ae51f 1002 case BLK_MQ_RQ_QUEUE_BUSY:
99cf1dc5 1003 blk_mq_put_driver_tag(hctx, rq);
f04c3df3 1004 list_add(&rq->queuelist, list);
ed0791b2 1005 __blk_mq_requeue_request(rq);
320ae51f
JA
1006 break;
1007 default:
1008 pr_err("blk-mq: bad return on queue: %d\n", ret);
320ae51f 1009 case BLK_MQ_RQ_QUEUE_ERROR:
1e93b8c2 1010 rq->errors = -EIO;
c8a446ad 1011 blk_mq_end_request(rq, rq->errors);
320ae51f
JA
1012 break;
1013 }
1014
1015 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1016 break;
74c45052
JA
1017
1018 /*
1019 * We've done the first request. If we have more than 1
1020 * left in the list, set dptr to defer issue.
1021 */
f04c3df3 1022 if (!dptr && list->next != list->prev)
74c45052 1023 dptr = &driver_list;
320ae51f
JA
1024 }
1025
703fd1c0 1026 hctx->dispatched[queued_to_index(queued)]++;
320ae51f
JA
1027
1028 /*
1029 * Any items that need requeuing? Stuff them into hctx->dispatch,
1030 * that is where we will continue on next queue run.
1031 */
f04c3df3 1032 if (!list_empty(list)) {
320ae51f 1033 spin_lock(&hctx->lock);
c13660a0 1034 list_splice_init(list, &hctx->dispatch);
320ae51f 1035 spin_unlock(&hctx->lock);
f04c3df3 1036
9ba52e58
SL
1037 /*
1038 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
1039 * it's possible the queue is stopped and restarted again
1040 * before this. Queue restart will dispatch requests. And since
1041 * requests in rq_list aren't added into hctx->dispatch yet,
1042 * the requests in rq_list might get lost.
1043 *
1044 * blk_mq_run_hw_queue() already checks the STOPPED bit
bd166ef1 1045 *
da55f2cc
OS
1046 * If RESTART or TAG_WAITING is set, then let completion restart
1047 * the queue instead of potentially looping here.
bd166ef1 1048 */
da55f2cc
OS
1049 if (!blk_mq_sched_needs_restart(hctx) &&
1050 !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
bd166ef1 1051 blk_mq_run_hw_queue(hctx, true);
320ae51f 1052 }
f04c3df3 1053
2aa0f21d 1054 return queued != 0;
f04c3df3
JA
1055}
1056
6a83e74d
BVA
1057static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1058{
1059 int srcu_idx;
1060
1061 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1062 cpu_online(hctx->next_cpu));
1063
1064 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1065 rcu_read_lock();
bd166ef1 1066 blk_mq_sched_dispatch_requests(hctx);
6a83e74d
BVA
1067 rcu_read_unlock();
1068 } else {
1069 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
bd166ef1 1070 blk_mq_sched_dispatch_requests(hctx);
6a83e74d
BVA
1071 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1072 }
1073}
1074
506e931f
JA
1075/*
1076 * It'd be great if the workqueue API had a way to pass
1077 * in a mask and had some smarts for more clever placement.
1078 * For now we just round-robin here, switching for every
1079 * BLK_MQ_CPU_WORK_BATCH queued items.
1080 */
1081static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1082{
b657d7e6
CH
1083 if (hctx->queue->nr_hw_queues == 1)
1084 return WORK_CPU_UNBOUND;
506e931f
JA
1085
1086 if (--hctx->next_cpu_batch <= 0) {
c02ebfdd 1087 int next_cpu;
506e931f
JA
1088
1089 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1090 if (next_cpu >= nr_cpu_ids)
1091 next_cpu = cpumask_first(hctx->cpumask);
1092
1093 hctx->next_cpu = next_cpu;
1094 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1095 }
1096
b657d7e6 1097 return hctx->next_cpu;
506e931f
JA
1098}
1099
320ae51f
JA
1100void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1101{
5d1b25c1
BVA
1102 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1103 !blk_mq_hw_queue_mapped(hctx)))
320ae51f
JA
1104 return;
1105
1b792f2f 1106 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2a90d4aa
PB
1107 int cpu = get_cpu();
1108 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
398205b8 1109 __blk_mq_run_hw_queue(hctx);
2a90d4aa 1110 put_cpu();
398205b8
PB
1111 return;
1112 }
e4043dcf 1113
2a90d4aa 1114 put_cpu();
e4043dcf 1115 }
398205b8 1116
27489a3c 1117 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
320ae51f
JA
1118}
1119
b94ec296 1120void blk_mq_run_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
1121{
1122 struct blk_mq_hw_ctx *hctx;
1123 int i;
1124
1125 queue_for_each_hw_ctx(q, hctx, i) {
bd166ef1 1126 if (!blk_mq_hctx_has_pending(hctx) ||
5d1b25c1 1127 blk_mq_hctx_stopped(hctx))
320ae51f
JA
1128 continue;
1129
b94ec296 1130 blk_mq_run_hw_queue(hctx, async);
320ae51f
JA
1131 }
1132}
b94ec296 1133EXPORT_SYMBOL(blk_mq_run_hw_queues);
320ae51f 1134
fd001443
BVA
1135/**
1136 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1137 * @q: request queue.
1138 *
1139 * The caller is responsible for serializing this function against
1140 * blk_mq_{start,stop}_hw_queue().
1141 */
1142bool blk_mq_queue_stopped(struct request_queue *q)
1143{
1144 struct blk_mq_hw_ctx *hctx;
1145 int i;
1146
1147 queue_for_each_hw_ctx(q, hctx, i)
1148 if (blk_mq_hctx_stopped(hctx))
1149 return true;
1150
1151 return false;
1152}
1153EXPORT_SYMBOL(blk_mq_queue_stopped);
1154
320ae51f
JA
1155void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1156{
27489a3c 1157 cancel_work(&hctx->run_work);
70f4db63 1158 cancel_delayed_work(&hctx->delay_work);
320ae51f
JA
1159 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1160}
1161EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1162
280d45f6
CH
1163void blk_mq_stop_hw_queues(struct request_queue *q)
1164{
1165 struct blk_mq_hw_ctx *hctx;
1166 int i;
1167
1168 queue_for_each_hw_ctx(q, hctx, i)
1169 blk_mq_stop_hw_queue(hctx);
1170}
1171EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1172
320ae51f
JA
1173void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1174{
1175 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
e4043dcf 1176
0ffbce80 1177 blk_mq_run_hw_queue(hctx, false);
320ae51f
JA
1178}
1179EXPORT_SYMBOL(blk_mq_start_hw_queue);
1180
2f268556
CH
1181void blk_mq_start_hw_queues(struct request_queue *q)
1182{
1183 struct blk_mq_hw_ctx *hctx;
1184 int i;
1185
1186 queue_for_each_hw_ctx(q, hctx, i)
1187 blk_mq_start_hw_queue(hctx);
1188}
1189EXPORT_SYMBOL(blk_mq_start_hw_queues);
1190
ae911c5e
JA
1191void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1192{
1193 if (!blk_mq_hctx_stopped(hctx))
1194 return;
1195
1196 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1197 blk_mq_run_hw_queue(hctx, async);
1198}
1199EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1200
1b4a3258 1201void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
1202{
1203 struct blk_mq_hw_ctx *hctx;
1204 int i;
1205
ae911c5e
JA
1206 queue_for_each_hw_ctx(q, hctx, i)
1207 blk_mq_start_stopped_hw_queue(hctx, async);
320ae51f
JA
1208}
1209EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1210
70f4db63 1211static void blk_mq_run_work_fn(struct work_struct *work)
320ae51f
JA
1212{
1213 struct blk_mq_hw_ctx *hctx;
1214
27489a3c 1215 hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
e4043dcf 1216
320ae51f
JA
1217 __blk_mq_run_hw_queue(hctx);
1218}
1219
70f4db63
CH
1220static void blk_mq_delay_work_fn(struct work_struct *work)
1221{
1222 struct blk_mq_hw_ctx *hctx;
1223
1224 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1225
1226 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1227 __blk_mq_run_hw_queue(hctx);
1228}
1229
1230void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1231{
19c66e59
ML
1232 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1233 return;
70f4db63 1234
7e79dadc 1235 blk_mq_stop_hw_queue(hctx);
b657d7e6
CH
1236 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1237 &hctx->delay_work, msecs_to_jiffies(msecs));
70f4db63
CH
1238}
1239EXPORT_SYMBOL(blk_mq_delay_queue);
1240
cfd0c552 1241static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
cfd0c552
ML
1242 struct request *rq,
1243 bool at_head)
320ae51f 1244{
e57690fe
JA
1245 struct blk_mq_ctx *ctx = rq->mq_ctx;
1246
01b983c9
JA
1247 trace_block_rq_insert(hctx->queue, rq);
1248
72a0a36e
CH
1249 if (at_head)
1250 list_add(&rq->queuelist, &ctx->rq_list);
1251 else
1252 list_add_tail(&rq->queuelist, &ctx->rq_list);
cfd0c552 1253}
4bb659b1 1254
2c3ad667
JA
1255void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1256 bool at_head)
cfd0c552
ML
1257{
1258 struct blk_mq_ctx *ctx = rq->mq_ctx;
1259
e57690fe 1260 __blk_mq_insert_req_list(hctx, rq, at_head);
320ae51f 1261 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f
JA
1262}
1263
bd166ef1
JA
1264void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1265 struct list_head *list)
320ae51f
JA
1266
1267{
320ae51f
JA
1268 /*
1269 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1270 * offline now
1271 */
1272 spin_lock(&ctx->lock);
1273 while (!list_empty(list)) {
1274 struct request *rq;
1275
1276 rq = list_first_entry(list, struct request, queuelist);
e57690fe 1277 BUG_ON(rq->mq_ctx != ctx);
320ae51f 1278 list_del_init(&rq->queuelist);
e57690fe 1279 __blk_mq_insert_req_list(hctx, rq, false);
320ae51f 1280 }
cfd0c552 1281 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f 1282 spin_unlock(&ctx->lock);
320ae51f
JA
1283}
1284
1285static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1286{
1287 struct request *rqa = container_of(a, struct request, queuelist);
1288 struct request *rqb = container_of(b, struct request, queuelist);
1289
1290 return !(rqa->mq_ctx < rqb->mq_ctx ||
1291 (rqa->mq_ctx == rqb->mq_ctx &&
1292 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1293}
1294
1295void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1296{
1297 struct blk_mq_ctx *this_ctx;
1298 struct request_queue *this_q;
1299 struct request *rq;
1300 LIST_HEAD(list);
1301 LIST_HEAD(ctx_list);
1302 unsigned int depth;
1303
1304 list_splice_init(&plug->mq_list, &list);
1305
1306 list_sort(NULL, &list, plug_ctx_cmp);
1307
1308 this_q = NULL;
1309 this_ctx = NULL;
1310 depth = 0;
1311
1312 while (!list_empty(&list)) {
1313 rq = list_entry_rq(list.next);
1314 list_del_init(&rq->queuelist);
1315 BUG_ON(!rq->q);
1316 if (rq->mq_ctx != this_ctx) {
1317 if (this_ctx) {
bd166ef1
JA
1318 trace_block_unplug(this_q, depth, from_schedule);
1319 blk_mq_sched_insert_requests(this_q, this_ctx,
1320 &ctx_list,
1321 from_schedule);
320ae51f
JA
1322 }
1323
1324 this_ctx = rq->mq_ctx;
1325 this_q = rq->q;
1326 depth = 0;
1327 }
1328
1329 depth++;
1330 list_add_tail(&rq->queuelist, &ctx_list);
1331 }
1332
1333 /*
1334 * If 'this_ctx' is set, we know we have entries to complete
1335 * on 'ctx_list'. Do those.
1336 */
1337 if (this_ctx) {
bd166ef1
JA
1338 trace_block_unplug(this_q, depth, from_schedule);
1339 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1340 from_schedule);
320ae51f
JA
1341 }
1342}
1343
1344static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1345{
1346 init_request_from_bio(rq, bio);
4b570521 1347
6e85eaf3 1348 blk_account_io_start(rq, true);
320ae51f
JA
1349}
1350
274a5843
JA
1351static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1352{
1353 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1354 !blk_queue_nomerges(hctx->queue);
1355}
1356
07068d5b
JA
1357static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1358 struct blk_mq_ctx *ctx,
1359 struct request *rq, struct bio *bio)
320ae51f 1360{
e18378a6 1361 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
07068d5b
JA
1362 blk_mq_bio_to_request(rq, bio);
1363 spin_lock(&ctx->lock);
1364insert_rq:
1365 __blk_mq_insert_request(hctx, rq, false);
1366 spin_unlock(&ctx->lock);
1367 return false;
1368 } else {
274a5843
JA
1369 struct request_queue *q = hctx->queue;
1370
07068d5b
JA
1371 spin_lock(&ctx->lock);
1372 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1373 blk_mq_bio_to_request(rq, bio);
1374 goto insert_rq;
1375 }
320ae51f 1376
07068d5b 1377 spin_unlock(&ctx->lock);
bd166ef1 1378 __blk_mq_finish_request(hctx, ctx, rq);
07068d5b 1379 return true;
14ec77f3 1380 }
07068d5b 1381}
14ec77f3 1382
fd2d3326
JA
1383static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1384{
bd166ef1
JA
1385 if (rq->tag != -1)
1386 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1387
1388 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
fd2d3326
JA
1389}
1390
066a4a73 1391static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
f984df1f 1392{
f984df1f 1393 struct request_queue *q = rq->q;
f984df1f
SL
1394 struct blk_mq_queue_data bd = {
1395 .rq = rq,
1396 .list = NULL,
1397 .last = 1
1398 };
bd166ef1
JA
1399 struct blk_mq_hw_ctx *hctx;
1400 blk_qc_t new_cookie;
1401 int ret;
f984df1f 1402
bd166ef1 1403 if (q->elevator)
2253efc8
BVA
1404 goto insert;
1405
bd166ef1
JA
1406 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1407 goto insert;
1408
1409 new_cookie = request_to_qc_t(hctx, rq);
1410
f984df1f
SL
1411 /*
1412 * For OK queue, we are done. For error, kill it. Any other
1413 * error (busy), just add it to our list as we previously
1414 * would have done
1415 */
1416 ret = q->mq_ops->queue_rq(hctx, &bd);
7b371636
JA
1417 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1418 *cookie = new_cookie;
2253efc8 1419 return;
7b371636 1420 }
f984df1f 1421
7b371636
JA
1422 __blk_mq_requeue_request(rq);
1423
1424 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1425 *cookie = BLK_QC_T_NONE;
1426 rq->errors = -EIO;
1427 blk_mq_end_request(rq, rq->errors);
2253efc8 1428 return;
f984df1f 1429 }
7b371636 1430
2253efc8 1431insert:
bd6737f1 1432 blk_mq_sched_insert_request(rq, false, true, true, false);
f984df1f
SL
1433}
1434
07068d5b
JA
1435/*
1436 * Multiple hardware queue variant. This will not use per-process plugs,
1437 * but will attempt to bypass the hctx queueing if we can go straight to
1438 * hardware for SYNC IO.
1439 */
dece1635 1440static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
07068d5b 1441{
ef295ecf 1442 const int is_sync = op_is_sync(bio->bi_opf);
f73f44eb 1443 const int is_flush_fua = op_is_flush(bio->bi_opf);
5a797e00 1444 struct blk_mq_alloc_data data = { .flags = 0 };
07068d5b 1445 struct request *rq;
6a83e74d 1446 unsigned int request_count = 0, srcu_idx;
f984df1f 1447 struct blk_plug *plug;
5b3f341f 1448 struct request *same_queue_rq = NULL;
7b371636 1449 blk_qc_t cookie;
87760e5e 1450 unsigned int wb_acct;
07068d5b
JA
1451
1452 blk_queue_bounce(q, &bio);
1453
1454 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b6 1455 bio_io_error(bio);
dece1635 1456 return BLK_QC_T_NONE;
07068d5b
JA
1457 }
1458
54efd50b
KO
1459 blk_queue_split(q, &bio, q->bio_split);
1460
87c279e6
OS
1461 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1462 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1463 return BLK_QC_T_NONE;
f984df1f 1464
bd166ef1
JA
1465 if (blk_mq_sched_bio_merge(q, bio))
1466 return BLK_QC_T_NONE;
1467
87760e5e
JA
1468 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1469
bd166ef1
JA
1470 trace_block_getrq(q, bio, bio->bi_opf);
1471
1472 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
87760e5e
JA
1473 if (unlikely(!rq)) {
1474 __wbt_done(q->rq_wb, wb_acct);
dece1635 1475 return BLK_QC_T_NONE;
87760e5e
JA
1476 }
1477
1478 wbt_track(&rq->issue_stat, wb_acct);
07068d5b 1479
fd2d3326 1480 cookie = request_to_qc_t(data.hctx, rq);
07068d5b
JA
1481
1482 if (unlikely(is_flush_fua)) {
0c2a6fe4
JA
1483 if (q->elevator)
1484 goto elv_insert;
07068d5b
JA
1485 blk_mq_bio_to_request(rq, bio);
1486 blk_insert_flush(rq);
0c2a6fe4 1487 goto run_queue;
07068d5b
JA
1488 }
1489
f984df1f 1490 plug = current->plug;
e167dfb5
JA
1491 /*
1492 * If the driver supports defer issued based on 'last', then
1493 * queue it up like normal since we can potentially save some
1494 * CPU this way.
1495 */
f984df1f
SL
1496 if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1497 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1498 struct request *old_rq = NULL;
07068d5b
JA
1499
1500 blk_mq_bio_to_request(rq, bio);
07068d5b
JA
1501
1502 /*
6a83e74d 1503 * We do limited plugging. If the bio can be merged, do that.
f984df1f
SL
1504 * Otherwise the existing request in the plug list will be
1505 * issued. So the plug list will have one request at most
07068d5b 1506 */
f984df1f 1507 if (plug) {
5b3f341f
SL
1508 /*
1509 * The plug list might get flushed before this. If that
b094f89c
JA
1510 * happens, same_queue_rq is invalid and plug list is
1511 * empty
1512 */
5b3f341f
SL
1513 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1514 old_rq = same_queue_rq;
f984df1f 1515 list_del_init(&old_rq->queuelist);
07068d5b 1516 }
f984df1f
SL
1517 list_add_tail(&rq->queuelist, &plug->mq_list);
1518 } else /* is_sync */
1519 old_rq = rq;
1520 blk_mq_put_ctx(data.ctx);
1521 if (!old_rq)
7b371636 1522 goto done;
6a83e74d
BVA
1523
1524 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1525 rcu_read_lock();
066a4a73 1526 blk_mq_try_issue_directly(old_rq, &cookie);
6a83e74d
BVA
1527 rcu_read_unlock();
1528 } else {
1529 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
066a4a73 1530 blk_mq_try_issue_directly(old_rq, &cookie);
6a83e74d
BVA
1531 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1532 }
7b371636 1533 goto done;
07068d5b
JA
1534 }
1535
bd166ef1 1536 if (q->elevator) {
0c2a6fe4 1537elv_insert:
bd166ef1
JA
1538 blk_mq_put_ctx(data.ctx);
1539 blk_mq_bio_to_request(rq, bio);
0abad774 1540 blk_mq_sched_insert_request(rq, false, true,
bd6737f1 1541 !is_sync || is_flush_fua, true);
bd166ef1
JA
1542 goto done;
1543 }
07068d5b
JA
1544 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1545 /*
1546 * For a SYNC request, send it to the hardware immediately. For
1547 * an ASYNC request, just ensure that we run it later on. The
1548 * latter allows for merging opportunities and more efficient
1549 * dispatching.
1550 */
0c2a6fe4 1551run_queue:
07068d5b
JA
1552 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1553 }
07068d5b 1554 blk_mq_put_ctx(data.ctx);
7b371636
JA
1555done:
1556 return cookie;
07068d5b
JA
1557}
1558
1559/*
1560 * Single hardware queue variant. This will attempt to use any per-process
1561 * plug for merging and IO deferral.
1562 */
dece1635 1563static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
07068d5b 1564{
ef295ecf 1565 const int is_sync = op_is_sync(bio->bi_opf);
f73f44eb 1566 const int is_flush_fua = op_is_flush(bio->bi_opf);
e6c4438b
JM
1567 struct blk_plug *plug;
1568 unsigned int request_count = 0;
5a797e00 1569 struct blk_mq_alloc_data data = { .flags = 0 };
07068d5b 1570 struct request *rq;
7b371636 1571 blk_qc_t cookie;
87760e5e 1572 unsigned int wb_acct;
07068d5b 1573
07068d5b
JA
1574 blk_queue_bounce(q, &bio);
1575
1576 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b6 1577 bio_io_error(bio);
dece1635 1578 return BLK_QC_T_NONE;
07068d5b
JA
1579 }
1580
54efd50b
KO
1581 blk_queue_split(q, &bio, q->bio_split);
1582
87c279e6
OS
1583 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1584 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1585 return BLK_QC_T_NONE;
1586 } else
1587 request_count = blk_plug_queued_count(q);
07068d5b 1588
bd166ef1
JA
1589 if (blk_mq_sched_bio_merge(q, bio))
1590 return BLK_QC_T_NONE;
1591
87760e5e
JA
1592 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1593
bd166ef1
JA
1594 trace_block_getrq(q, bio, bio->bi_opf);
1595
1596 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
87760e5e
JA
1597 if (unlikely(!rq)) {
1598 __wbt_done(q->rq_wb, wb_acct);
dece1635 1599 return BLK_QC_T_NONE;
87760e5e
JA
1600 }
1601
1602 wbt_track(&rq->issue_stat, wb_acct);
320ae51f 1603
fd2d3326 1604 cookie = request_to_qc_t(data.hctx, rq);
320ae51f
JA
1605
1606 if (unlikely(is_flush_fua)) {
0c2a6fe4
JA
1607 if (q->elevator)
1608 goto elv_insert;
320ae51f 1609 blk_mq_bio_to_request(rq, bio);
320ae51f 1610 blk_insert_flush(rq);
0c2a6fe4 1611 goto run_queue;
320ae51f
JA
1612 }
1613
1614 /*
1615 * A task plug currently exists. Since this is completely lockless,
1616 * utilize that to temporarily store requests until the task is
1617 * either done or scheduled away.
1618 */
e6c4438b
JM
1619 plug = current->plug;
1620 if (plug) {
600271d9
SL
1621 struct request *last = NULL;
1622
e6c4438b 1623 blk_mq_bio_to_request(rq, bio);
0a6219a9
ML
1624
1625 /*
1626 * @request_count may become stale because of schedule
1627 * out, so check the list again.
1628 */
1629 if (list_empty(&plug->mq_list))
1630 request_count = 0;
676d0607 1631 if (!request_count)
e6c4438b 1632 trace_block_plug(q);
600271d9
SL
1633 else
1634 last = list_entry_rq(plug->mq_list.prev);
b094f89c
JA
1635
1636 blk_mq_put_ctx(data.ctx);
1637
600271d9
SL
1638 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1639 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
e6c4438b
JM
1640 blk_flush_plug_list(plug, false);
1641 trace_block_plug(q);
320ae51f 1642 }
b094f89c 1643
e6c4438b 1644 list_add_tail(&rq->queuelist, &plug->mq_list);
7b371636 1645 return cookie;
320ae51f
JA
1646 }
1647
bd166ef1 1648 if (q->elevator) {
0c2a6fe4 1649elv_insert:
bd166ef1
JA
1650 blk_mq_put_ctx(data.ctx);
1651 blk_mq_bio_to_request(rq, bio);
0abad774 1652 blk_mq_sched_insert_request(rq, false, true,
bd6737f1 1653 !is_sync || is_flush_fua, true);
bd166ef1
JA
1654 goto done;
1655 }
07068d5b
JA
1656 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1657 /*
1658 * For a SYNC request, send it to the hardware immediately. For
1659 * an ASYNC request, just ensure that we run it later on. The
1660 * latter allows for merging opportunities and more efficient
1661 * dispatching.
1662 */
0c2a6fe4 1663run_queue:
07068d5b 1664 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
320ae51f
JA
1665 }
1666
07068d5b 1667 blk_mq_put_ctx(data.ctx);
bd166ef1 1668done:
7b371636 1669 return cookie;
320ae51f
JA
1670}
1671
cc71a6f4
JA
1672void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1673 unsigned int hctx_idx)
95363efd 1674{
e9b267d9 1675 struct page *page;
320ae51f 1676
24d2f903 1677 if (tags->rqs && set->ops->exit_request) {
e9b267d9 1678 int i;
320ae51f 1679
24d2f903 1680 for (i = 0; i < tags->nr_tags; i++) {
2af8cbe3
JA
1681 struct request *rq = tags->static_rqs[i];
1682
1683 if (!rq)
e9b267d9 1684 continue;
2af8cbe3 1685 set->ops->exit_request(set->driver_data, rq,
24d2f903 1686 hctx_idx, i);
2af8cbe3 1687 tags->static_rqs[i] = NULL;
e9b267d9 1688 }
320ae51f 1689 }
320ae51f 1690
24d2f903
CH
1691 while (!list_empty(&tags->page_list)) {
1692 page = list_first_entry(&tags->page_list, struct page, lru);
6753471c 1693 list_del_init(&page->lru);
f75782e4
CM
1694 /*
1695 * Remove kmemleak object previously allocated in
1696 * blk_mq_init_rq_map().
1697 */
1698 kmemleak_free(page_address(page));
320ae51f
JA
1699 __free_pages(page, page->private);
1700 }
cc71a6f4 1701}
320ae51f 1702
cc71a6f4
JA
1703void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1704{
24d2f903 1705 kfree(tags->rqs);
cc71a6f4 1706 tags->rqs = NULL;
2af8cbe3
JA
1707 kfree(tags->static_rqs);
1708 tags->static_rqs = NULL;
320ae51f 1709
24d2f903 1710 blk_mq_free_tags(tags);
320ae51f
JA
1711}
1712
cc71a6f4
JA
1713struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1714 unsigned int hctx_idx,
1715 unsigned int nr_tags,
1716 unsigned int reserved_tags)
320ae51f 1717{
24d2f903 1718 struct blk_mq_tags *tags;
59f082e4 1719 int node;
320ae51f 1720
59f082e4
SL
1721 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1722 if (node == NUMA_NO_NODE)
1723 node = set->numa_node;
1724
1725 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
24391c0d 1726 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
24d2f903
CH
1727 if (!tags)
1728 return NULL;
320ae51f 1729
cc71a6f4 1730 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
36e1f3d1 1731 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
59f082e4 1732 node);
24d2f903
CH
1733 if (!tags->rqs) {
1734 blk_mq_free_tags(tags);
1735 return NULL;
1736 }
320ae51f 1737
2af8cbe3
JA
1738 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1739 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
59f082e4 1740 node);
2af8cbe3
JA
1741 if (!tags->static_rqs) {
1742 kfree(tags->rqs);
1743 blk_mq_free_tags(tags);
1744 return NULL;
1745 }
1746
cc71a6f4
JA
1747 return tags;
1748}
1749
1750static size_t order_to_size(unsigned int order)
1751{
1752 return (size_t)PAGE_SIZE << order;
1753}
1754
1755int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1756 unsigned int hctx_idx, unsigned int depth)
1757{
1758 unsigned int i, j, entries_per_page, max_order = 4;
1759 size_t rq_size, left;
59f082e4
SL
1760 int node;
1761
1762 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1763 if (node == NUMA_NO_NODE)
1764 node = set->numa_node;
cc71a6f4
JA
1765
1766 INIT_LIST_HEAD(&tags->page_list);
1767
320ae51f
JA
1768 /*
1769 * rq_size is the size of the request plus driver payload, rounded
1770 * to the cacheline size
1771 */
24d2f903 1772 rq_size = round_up(sizeof(struct request) + set->cmd_size,
320ae51f 1773 cache_line_size());
cc71a6f4 1774 left = rq_size * depth;
320ae51f 1775
cc71a6f4 1776 for (i = 0; i < depth; ) {
320ae51f
JA
1777 int this_order = max_order;
1778 struct page *page;
1779 int to_do;
1780 void *p;
1781
b3a834b1 1782 while (this_order && left < order_to_size(this_order - 1))
320ae51f
JA
1783 this_order--;
1784
1785 do {
59f082e4 1786 page = alloc_pages_node(node,
36e1f3d1 1787 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
a5164405 1788 this_order);
320ae51f
JA
1789 if (page)
1790 break;
1791 if (!this_order--)
1792 break;
1793 if (order_to_size(this_order) < rq_size)
1794 break;
1795 } while (1);
1796
1797 if (!page)
24d2f903 1798 goto fail;
320ae51f
JA
1799
1800 page->private = this_order;
24d2f903 1801 list_add_tail(&page->lru, &tags->page_list);
320ae51f
JA
1802
1803 p = page_address(page);
f75782e4
CM
1804 /*
1805 * Allow kmemleak to scan these pages as they contain pointers
1806 * to additional allocations like via ops->init_request().
1807 */
36e1f3d1 1808 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
320ae51f 1809 entries_per_page = order_to_size(this_order) / rq_size;
cc71a6f4 1810 to_do = min(entries_per_page, depth - i);
320ae51f
JA
1811 left -= to_do * rq_size;
1812 for (j = 0; j < to_do; j++) {
2af8cbe3
JA
1813 struct request *rq = p;
1814
1815 tags->static_rqs[i] = rq;
24d2f903
CH
1816 if (set->ops->init_request) {
1817 if (set->ops->init_request(set->driver_data,
2af8cbe3 1818 rq, hctx_idx, i,
59f082e4 1819 node)) {
2af8cbe3 1820 tags->static_rqs[i] = NULL;
24d2f903 1821 goto fail;
a5164405 1822 }
e9b267d9
CH
1823 }
1824
320ae51f
JA
1825 p += rq_size;
1826 i++;
1827 }
1828 }
cc71a6f4 1829 return 0;
320ae51f 1830
24d2f903 1831fail:
cc71a6f4
JA
1832 blk_mq_free_rqs(set, tags, hctx_idx);
1833 return -ENOMEM;
320ae51f
JA
1834}
1835
e57690fe
JA
1836/*
1837 * 'cpu' is going away. splice any existing rq_list entries from this
1838 * software queue to the hw queue dispatch list, and ensure that it
1839 * gets run.
1840 */
9467f859 1841static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
484b4061 1842{
9467f859 1843 struct blk_mq_hw_ctx *hctx;
484b4061
JA
1844 struct blk_mq_ctx *ctx;
1845 LIST_HEAD(tmp);
1846
9467f859 1847 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
e57690fe 1848 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
484b4061
JA
1849
1850 spin_lock(&ctx->lock);
1851 if (!list_empty(&ctx->rq_list)) {
1852 list_splice_init(&ctx->rq_list, &tmp);
1853 blk_mq_hctx_clear_pending(hctx, ctx);
1854 }
1855 spin_unlock(&ctx->lock);
1856
1857 if (list_empty(&tmp))
9467f859 1858 return 0;
484b4061 1859
e57690fe
JA
1860 spin_lock(&hctx->lock);
1861 list_splice_tail_init(&tmp, &hctx->dispatch);
1862 spin_unlock(&hctx->lock);
484b4061
JA
1863
1864 blk_mq_run_hw_queue(hctx, true);
9467f859 1865 return 0;
484b4061
JA
1866}
1867
9467f859 1868static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
484b4061 1869{
9467f859
TG
1870 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1871 &hctx->cpuhp_dead);
484b4061
JA
1872}
1873
c3b4afca 1874/* hctx->ctxs will be freed in queue's release handler */
08e98fc6
ML
1875static void blk_mq_exit_hctx(struct request_queue *q,
1876 struct blk_mq_tag_set *set,
1877 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1878{
f70ced09
ML
1879 unsigned flush_start_tag = set->queue_depth;
1880
08e98fc6
ML
1881 blk_mq_tag_idle(hctx);
1882
f70ced09
ML
1883 if (set->ops->exit_request)
1884 set->ops->exit_request(set->driver_data,
1885 hctx->fq->flush_rq, hctx_idx,
1886 flush_start_tag + hctx_idx);
1887
08e98fc6
ML
1888 if (set->ops->exit_hctx)
1889 set->ops->exit_hctx(hctx, hctx_idx);
1890
6a83e74d
BVA
1891 if (hctx->flags & BLK_MQ_F_BLOCKING)
1892 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1893
9467f859 1894 blk_mq_remove_cpuhp(hctx);
f70ced09 1895 blk_free_flush_queue(hctx->fq);
88459642 1896 sbitmap_free(&hctx->ctx_map);
08e98fc6
ML
1897}
1898
624dbe47
ML
1899static void blk_mq_exit_hw_queues(struct request_queue *q,
1900 struct blk_mq_tag_set *set, int nr_queue)
1901{
1902 struct blk_mq_hw_ctx *hctx;
1903 unsigned int i;
1904
1905 queue_for_each_hw_ctx(q, hctx, i) {
1906 if (i == nr_queue)
1907 break;
08e98fc6 1908 blk_mq_exit_hctx(q, set, hctx, i);
624dbe47 1909 }
624dbe47
ML
1910}
1911
1912static void blk_mq_free_hw_queues(struct request_queue *q,
1913 struct blk_mq_tag_set *set)
1914{
1915 struct blk_mq_hw_ctx *hctx;
1916 unsigned int i;
1917
e09aae7e 1918 queue_for_each_hw_ctx(q, hctx, i)
624dbe47 1919 free_cpumask_var(hctx->cpumask);
624dbe47
ML
1920}
1921
08e98fc6
ML
1922static int blk_mq_init_hctx(struct request_queue *q,
1923 struct blk_mq_tag_set *set,
1924 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
320ae51f 1925{
08e98fc6 1926 int node;
f70ced09 1927 unsigned flush_start_tag = set->queue_depth;
08e98fc6
ML
1928
1929 node = hctx->numa_node;
1930 if (node == NUMA_NO_NODE)
1931 node = hctx->numa_node = set->numa_node;
1932
27489a3c 1933 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
08e98fc6
ML
1934 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1935 spin_lock_init(&hctx->lock);
1936 INIT_LIST_HEAD(&hctx->dispatch);
1937 hctx->queue = q;
1938 hctx->queue_num = hctx_idx;
2404e607 1939 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
08e98fc6 1940
9467f859 1941 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
08e98fc6
ML
1942
1943 hctx->tags = set->tags[hctx_idx];
320ae51f
JA
1944
1945 /*
08e98fc6
ML
1946 * Allocate space for all possible cpus to avoid allocation at
1947 * runtime
320ae51f 1948 */
08e98fc6
ML
1949 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1950 GFP_KERNEL, node);
1951 if (!hctx->ctxs)
1952 goto unregister_cpu_notifier;
320ae51f 1953
88459642
OS
1954 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1955 node))
08e98fc6 1956 goto free_ctxs;
320ae51f 1957
08e98fc6 1958 hctx->nr_ctx = 0;
320ae51f 1959
08e98fc6
ML
1960 if (set->ops->init_hctx &&
1961 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1962 goto free_bitmap;
320ae51f 1963
f70ced09
ML
1964 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1965 if (!hctx->fq)
1966 goto exit_hctx;
320ae51f 1967
f70ced09
ML
1968 if (set->ops->init_request &&
1969 set->ops->init_request(set->driver_data,
1970 hctx->fq->flush_rq, hctx_idx,
1971 flush_start_tag + hctx_idx, node))
1972 goto free_fq;
320ae51f 1973
6a83e74d
BVA
1974 if (hctx->flags & BLK_MQ_F_BLOCKING)
1975 init_srcu_struct(&hctx->queue_rq_srcu);
1976
08e98fc6 1977 return 0;
320ae51f 1978
f70ced09
ML
1979 free_fq:
1980 kfree(hctx->fq);
1981 exit_hctx:
1982 if (set->ops->exit_hctx)
1983 set->ops->exit_hctx(hctx, hctx_idx);
08e98fc6 1984 free_bitmap:
88459642 1985 sbitmap_free(&hctx->ctx_map);
08e98fc6
ML
1986 free_ctxs:
1987 kfree(hctx->ctxs);
1988 unregister_cpu_notifier:
9467f859 1989 blk_mq_remove_cpuhp(hctx);
08e98fc6
ML
1990 return -1;
1991}
320ae51f 1992
320ae51f
JA
1993static void blk_mq_init_cpu_queues(struct request_queue *q,
1994 unsigned int nr_hw_queues)
1995{
1996 unsigned int i;
1997
1998 for_each_possible_cpu(i) {
1999 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2000 struct blk_mq_hw_ctx *hctx;
2001
2002 memset(__ctx, 0, sizeof(*__ctx));
2003 __ctx->cpu = i;
2004 spin_lock_init(&__ctx->lock);
2005 INIT_LIST_HEAD(&__ctx->rq_list);
2006 __ctx->queue = q;
cf43e6be
JA
2007 blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
2008 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
320ae51f
JA
2009
2010 /* If the cpu isn't online, the cpu is mapped to first hctx */
320ae51f
JA
2011 if (!cpu_online(i))
2012 continue;
2013
7d7e0f90 2014 hctx = blk_mq_map_queue(q, i);
e4043dcf 2015
320ae51f
JA
2016 /*
2017 * Set local node, IFF we have more than one hw queue. If
2018 * not, we remain on the home node of the device
2019 */
2020 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
bffed457 2021 hctx->numa_node = local_memory_node(cpu_to_node(i));
320ae51f
JA
2022 }
2023}
2024
cc71a6f4
JA
2025static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2026{
2027 int ret = 0;
2028
2029 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2030 set->queue_depth, set->reserved_tags);
2031 if (!set->tags[hctx_idx])
2032 return false;
2033
2034 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2035 set->queue_depth);
2036 if (!ret)
2037 return true;
2038
2039 blk_mq_free_rq_map(set->tags[hctx_idx]);
2040 set->tags[hctx_idx] = NULL;
2041 return false;
2042}
2043
2044static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2045 unsigned int hctx_idx)
2046{
bd166ef1
JA
2047 if (set->tags[hctx_idx]) {
2048 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2049 blk_mq_free_rq_map(set->tags[hctx_idx]);
2050 set->tags[hctx_idx] = NULL;
2051 }
cc71a6f4
JA
2052}
2053
5778322e
AM
2054static void blk_mq_map_swqueue(struct request_queue *q,
2055 const struct cpumask *online_mask)
320ae51f 2056{
d1b1cea1 2057 unsigned int i, hctx_idx;
320ae51f
JA
2058 struct blk_mq_hw_ctx *hctx;
2059 struct blk_mq_ctx *ctx;
2a34c087 2060 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 2061
60de074b
AM
2062 /*
2063 * Avoid others reading imcomplete hctx->cpumask through sysfs
2064 */
2065 mutex_lock(&q->sysfs_lock);
2066
320ae51f 2067 queue_for_each_hw_ctx(q, hctx, i) {
e4043dcf 2068 cpumask_clear(hctx->cpumask);
320ae51f
JA
2069 hctx->nr_ctx = 0;
2070 }
2071
2072 /*
2073 * Map software to hardware queues
2074 */
897bb0c7 2075 for_each_possible_cpu(i) {
320ae51f 2076 /* If the cpu isn't online, the cpu is mapped to first hctx */
5778322e 2077 if (!cpumask_test_cpu(i, online_mask))
e4043dcf
JA
2078 continue;
2079
d1b1cea1
GKB
2080 hctx_idx = q->mq_map[i];
2081 /* unmapped hw queue can be remapped after CPU topo changed */
cc71a6f4
JA
2082 if (!set->tags[hctx_idx] &&
2083 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
d1b1cea1
GKB
2084 /*
2085 * If tags initialization fail for some hctx,
2086 * that hctx won't be brought online. In this
2087 * case, remap the current ctx to hctx[0] which
2088 * is guaranteed to always have tags allocated
2089 */
cc71a6f4 2090 q->mq_map[i] = 0;
d1b1cea1
GKB
2091 }
2092
897bb0c7 2093 ctx = per_cpu_ptr(q->queue_ctx, i);
7d7e0f90 2094 hctx = blk_mq_map_queue(q, i);
868f2f0b 2095
e4043dcf 2096 cpumask_set_cpu(i, hctx->cpumask);
320ae51f
JA
2097 ctx->index_hw = hctx->nr_ctx;
2098 hctx->ctxs[hctx->nr_ctx++] = ctx;
2099 }
506e931f 2100
60de074b
AM
2101 mutex_unlock(&q->sysfs_lock);
2102
506e931f 2103 queue_for_each_hw_ctx(q, hctx, i) {
484b4061 2104 /*
a68aafa5
JA
2105 * If no software queues are mapped to this hardware queue,
2106 * disable it and free the request entries.
484b4061
JA
2107 */
2108 if (!hctx->nr_ctx) {
d1b1cea1
GKB
2109 /* Never unmap queue 0. We need it as a
2110 * fallback in case of a new remap fails
2111 * allocation
2112 */
cc71a6f4
JA
2113 if (i && set->tags[i])
2114 blk_mq_free_map_and_requests(set, i);
2115
2a34c087 2116 hctx->tags = NULL;
484b4061
JA
2117 continue;
2118 }
2119
2a34c087
ML
2120 hctx->tags = set->tags[i];
2121 WARN_ON(!hctx->tags);
2122
889fa31f
CY
2123 /*
2124 * Set the map size to the number of mapped software queues.
2125 * This is more accurate and more efficient than looping
2126 * over all possibly mapped software queues.
2127 */
88459642 2128 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
889fa31f 2129
484b4061
JA
2130 /*
2131 * Initialize batch roundrobin counts
2132 */
506e931f
JA
2133 hctx->next_cpu = cpumask_first(hctx->cpumask);
2134 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2135 }
320ae51f
JA
2136}
2137
2404e607 2138static void queue_set_hctx_shared(struct request_queue *q, bool shared)
0d2602ca
JA
2139{
2140 struct blk_mq_hw_ctx *hctx;
0d2602ca
JA
2141 int i;
2142
2404e607
JM
2143 queue_for_each_hw_ctx(q, hctx, i) {
2144 if (shared)
2145 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2146 else
2147 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2148 }
2149}
2150
2151static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2152{
2153 struct request_queue *q;
0d2602ca
JA
2154
2155 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2156 blk_mq_freeze_queue(q);
2404e607 2157 queue_set_hctx_shared(q, shared);
0d2602ca
JA
2158 blk_mq_unfreeze_queue(q);
2159 }
2160}
2161
2162static void blk_mq_del_queue_tag_set(struct request_queue *q)
2163{
2164 struct blk_mq_tag_set *set = q->tag_set;
2165
0d2602ca
JA
2166 mutex_lock(&set->tag_list_lock);
2167 list_del_init(&q->tag_set_list);
2404e607
JM
2168 if (list_is_singular(&set->tag_list)) {
2169 /* just transitioned to unshared */
2170 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2171 /* update existing queue */
2172 blk_mq_update_tag_set_depth(set, false);
2173 }
0d2602ca 2174 mutex_unlock(&set->tag_list_lock);
0d2602ca
JA
2175}
2176
2177static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2178 struct request_queue *q)
2179{
2180 q->tag_set = set;
2181
2182 mutex_lock(&set->tag_list_lock);
2404e607
JM
2183
2184 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2185 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2186 set->flags |= BLK_MQ_F_TAG_SHARED;
2187 /* update existing queue */
2188 blk_mq_update_tag_set_depth(set, true);
2189 }
2190 if (set->flags & BLK_MQ_F_TAG_SHARED)
2191 queue_set_hctx_shared(q, true);
0d2602ca 2192 list_add_tail(&q->tag_set_list, &set->tag_list);
2404e607 2193
0d2602ca
JA
2194 mutex_unlock(&set->tag_list_lock);
2195}
2196
e09aae7e
ML
2197/*
2198 * It is the actual release handler for mq, but we do it from
2199 * request queue's release handler for avoiding use-after-free
2200 * and headache because q->mq_kobj shouldn't have been introduced,
2201 * but we can't group ctx/kctx kobj without it.
2202 */
2203void blk_mq_release(struct request_queue *q)
2204{
2205 struct blk_mq_hw_ctx *hctx;
2206 unsigned int i;
2207
bd166ef1
JA
2208 blk_mq_sched_teardown(q);
2209
e09aae7e 2210 /* hctx kobj stays in hctx */
c3b4afca
ML
2211 queue_for_each_hw_ctx(q, hctx, i) {
2212 if (!hctx)
2213 continue;
2214 kfree(hctx->ctxs);
e09aae7e 2215 kfree(hctx);
c3b4afca 2216 }
e09aae7e 2217
a723bab3
AM
2218 q->mq_map = NULL;
2219
e09aae7e
ML
2220 kfree(q->queue_hw_ctx);
2221
2222 /* ctx kobj stays in queue_ctx */
2223 free_percpu(q->queue_ctx);
2224}
2225
24d2f903 2226struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
b62c21b7
MS
2227{
2228 struct request_queue *uninit_q, *q;
2229
2230 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2231 if (!uninit_q)
2232 return ERR_PTR(-ENOMEM);
2233
2234 q = blk_mq_init_allocated_queue(set, uninit_q);
2235 if (IS_ERR(q))
2236 blk_cleanup_queue(uninit_q);
2237
2238 return q;
2239}
2240EXPORT_SYMBOL(blk_mq_init_queue);
2241
868f2f0b
KB
2242static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2243 struct request_queue *q)
320ae51f 2244{
868f2f0b
KB
2245 int i, j;
2246 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
f14bbe77 2247
868f2f0b 2248 blk_mq_sysfs_unregister(q);
24d2f903 2249 for (i = 0; i < set->nr_hw_queues; i++) {
868f2f0b 2250 int node;
f14bbe77 2251
868f2f0b
KB
2252 if (hctxs[i])
2253 continue;
2254
2255 node = blk_mq_hw_queue_to_node(q->mq_map, i);
cdef54dd
CH
2256 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2257 GFP_KERNEL, node);
320ae51f 2258 if (!hctxs[i])
868f2f0b 2259 break;
320ae51f 2260
a86073e4 2261 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
868f2f0b
KB
2262 node)) {
2263 kfree(hctxs[i]);
2264 hctxs[i] = NULL;
2265 break;
2266 }
e4043dcf 2267
0d2602ca 2268 atomic_set(&hctxs[i]->nr_active, 0);
f14bbe77 2269 hctxs[i]->numa_node = node;
320ae51f 2270 hctxs[i]->queue_num = i;
868f2f0b
KB
2271
2272 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2273 free_cpumask_var(hctxs[i]->cpumask);
2274 kfree(hctxs[i]);
2275 hctxs[i] = NULL;
2276 break;
2277 }
2278 blk_mq_hctx_kobj_init(hctxs[i]);
320ae51f 2279 }
868f2f0b
KB
2280 for (j = i; j < q->nr_hw_queues; j++) {
2281 struct blk_mq_hw_ctx *hctx = hctxs[j];
2282
2283 if (hctx) {
cc71a6f4
JA
2284 if (hctx->tags)
2285 blk_mq_free_map_and_requests(set, j);
868f2f0b
KB
2286 blk_mq_exit_hctx(q, set, hctx, j);
2287 free_cpumask_var(hctx->cpumask);
2288 kobject_put(&hctx->kobj);
2289 kfree(hctx->ctxs);
2290 kfree(hctx);
2291 hctxs[j] = NULL;
2292
2293 }
2294 }
2295 q->nr_hw_queues = i;
2296 blk_mq_sysfs_register(q);
2297}
2298
2299struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2300 struct request_queue *q)
2301{
66841672
ML
2302 /* mark the queue as mq asap */
2303 q->mq_ops = set->ops;
2304
868f2f0b
KB
2305 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2306 if (!q->queue_ctx)
c7de5726 2307 goto err_exit;
868f2f0b
KB
2308
2309 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2310 GFP_KERNEL, set->numa_node);
2311 if (!q->queue_hw_ctx)
2312 goto err_percpu;
2313
bdd17e75 2314 q->mq_map = set->mq_map;
868f2f0b
KB
2315
2316 blk_mq_realloc_hw_ctxs(set, q);
2317 if (!q->nr_hw_queues)
2318 goto err_hctxs;
320ae51f 2319
287922eb 2320 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
e56f698b 2321 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
320ae51f
JA
2322
2323 q->nr_queues = nr_cpu_ids;
320ae51f 2324
94eddfbe 2325 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
320ae51f 2326
05f1dd53
JA
2327 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2328 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2329
1be036e9
CH
2330 q->sg_reserved_size = INT_MAX;
2331
2849450a 2332 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
6fca6a61
CH
2333 INIT_LIST_HEAD(&q->requeue_list);
2334 spin_lock_init(&q->requeue_lock);
2335
07068d5b
JA
2336 if (q->nr_hw_queues > 1)
2337 blk_queue_make_request(q, blk_mq_make_request);
2338 else
2339 blk_queue_make_request(q, blk_sq_make_request);
2340
eba71768
JA
2341 /*
2342 * Do this after blk_queue_make_request() overrides it...
2343 */
2344 q->nr_requests = set->queue_depth;
2345
64f1c21e
JA
2346 /*
2347 * Default to classic polling
2348 */
2349 q->poll_nsec = -1;
2350
24d2f903
CH
2351 if (set->ops->complete)
2352 blk_queue_softirq_done(q, set->ops->complete);
30a91cb4 2353
24d2f903 2354 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
320ae51f 2355
5778322e 2356 get_online_cpus();
320ae51f 2357 mutex_lock(&all_q_mutex);
320ae51f 2358
4593fdbe 2359 list_add_tail(&q->all_q_node, &all_q_list);
0d2602ca 2360 blk_mq_add_queue_tag_set(set, q);
5778322e 2361 blk_mq_map_swqueue(q, cpu_online_mask);
484b4061 2362
4593fdbe 2363 mutex_unlock(&all_q_mutex);
5778322e 2364 put_online_cpus();
4593fdbe 2365
d3484991
JA
2366 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2367 int ret;
2368
2369 ret = blk_mq_sched_init(q);
2370 if (ret)
2371 return ERR_PTR(ret);
2372 }
2373
320ae51f 2374 return q;
18741986 2375
320ae51f 2376err_hctxs:
868f2f0b 2377 kfree(q->queue_hw_ctx);
320ae51f 2378err_percpu:
868f2f0b 2379 free_percpu(q->queue_ctx);
c7de5726
ML
2380err_exit:
2381 q->mq_ops = NULL;
320ae51f
JA
2382 return ERR_PTR(-ENOMEM);
2383}
b62c21b7 2384EXPORT_SYMBOL(blk_mq_init_allocated_queue);
320ae51f
JA
2385
2386void blk_mq_free_queue(struct request_queue *q)
2387{
624dbe47 2388 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 2389
0e626368
AM
2390 mutex_lock(&all_q_mutex);
2391 list_del_init(&q->all_q_node);
2392 mutex_unlock(&all_q_mutex);
2393
87760e5e
JA
2394 wbt_exit(q);
2395
0d2602ca
JA
2396 blk_mq_del_queue_tag_set(q);
2397
624dbe47
ML
2398 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2399 blk_mq_free_hw_queues(q, set);
320ae51f 2400}
320ae51f
JA
2401
2402/* Basically redo blk_mq_init_queue with queue frozen */
5778322e
AM
2403static void blk_mq_queue_reinit(struct request_queue *q,
2404 const struct cpumask *online_mask)
320ae51f 2405{
4ecd4fef 2406 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
320ae51f 2407
67aec14c
JA
2408 blk_mq_sysfs_unregister(q);
2409
320ae51f
JA
2410 /*
2411 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2412 * we should change hctx numa_node according to new topology (this
2413 * involves free and re-allocate memory, worthy doing?)
2414 */
2415
5778322e 2416 blk_mq_map_swqueue(q, online_mask);
320ae51f 2417
67aec14c 2418 blk_mq_sysfs_register(q);
320ae51f
JA
2419}
2420
65d5291e
SAS
2421/*
2422 * New online cpumask which is going to be set in this hotplug event.
2423 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2424 * one-by-one and dynamically allocating this could result in a failure.
2425 */
2426static struct cpumask cpuhp_online_new;
2427
2428static void blk_mq_queue_reinit_work(void)
320ae51f
JA
2429{
2430 struct request_queue *q;
320ae51f
JA
2431
2432 mutex_lock(&all_q_mutex);
f3af020b
TH
2433 /*
2434 * We need to freeze and reinit all existing queues. Freezing
2435 * involves synchronous wait for an RCU grace period and doing it
2436 * one by one may take a long time. Start freezing all queues in
2437 * one swoop and then wait for the completions so that freezing can
2438 * take place in parallel.
2439 */
2440 list_for_each_entry(q, &all_q_list, all_q_node)
2441 blk_mq_freeze_queue_start(q);
415d3dab 2442 list_for_each_entry(q, &all_q_list, all_q_node)
f3af020b
TH
2443 blk_mq_freeze_queue_wait(q);
2444
320ae51f 2445 list_for_each_entry(q, &all_q_list, all_q_node)
65d5291e 2446 blk_mq_queue_reinit(q, &cpuhp_online_new);
f3af020b
TH
2447
2448 list_for_each_entry(q, &all_q_list, all_q_node)
2449 blk_mq_unfreeze_queue(q);
2450
320ae51f 2451 mutex_unlock(&all_q_mutex);
65d5291e
SAS
2452}
2453
2454static int blk_mq_queue_reinit_dead(unsigned int cpu)
2455{
97a32864 2456 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
65d5291e
SAS
2457 blk_mq_queue_reinit_work();
2458 return 0;
2459}
2460
2461/*
2462 * Before hotadded cpu starts handling requests, new mappings must be
2463 * established. Otherwise, these requests in hw queue might never be
2464 * dispatched.
2465 *
2466 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2467 * for CPU0, and ctx1 for CPU1).
2468 *
2469 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2470 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2471 *
2c3ad667
JA
2472 * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2473 * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2474 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2475 * ignored.
65d5291e
SAS
2476 */
2477static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2478{
2479 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2480 cpumask_set_cpu(cpu, &cpuhp_online_new);
2481 blk_mq_queue_reinit_work();
2482 return 0;
320ae51f
JA
2483}
2484
a5164405
JA
2485static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2486{
2487 int i;
2488
cc71a6f4
JA
2489 for (i = 0; i < set->nr_hw_queues; i++)
2490 if (!__blk_mq_alloc_rq_map(set, i))
a5164405 2491 goto out_unwind;
a5164405
JA
2492
2493 return 0;
2494
2495out_unwind:
2496 while (--i >= 0)
cc71a6f4 2497 blk_mq_free_rq_map(set->tags[i]);
a5164405 2498
a5164405
JA
2499 return -ENOMEM;
2500}
2501
2502/*
2503 * Allocate the request maps associated with this tag_set. Note that this
2504 * may reduce the depth asked for, if memory is tight. set->queue_depth
2505 * will be updated to reflect the allocated depth.
2506 */
2507static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2508{
2509 unsigned int depth;
2510 int err;
2511
2512 depth = set->queue_depth;
2513 do {
2514 err = __blk_mq_alloc_rq_maps(set);
2515 if (!err)
2516 break;
2517
2518 set->queue_depth >>= 1;
2519 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2520 err = -ENOMEM;
2521 break;
2522 }
2523 } while (set->queue_depth);
2524
2525 if (!set->queue_depth || err) {
2526 pr_err("blk-mq: failed to allocate request map\n");
2527 return -ENOMEM;
2528 }
2529
2530 if (depth != set->queue_depth)
2531 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2532 depth, set->queue_depth);
2533
2534 return 0;
2535}
2536
a4391c64
JA
2537/*
2538 * Alloc a tag set to be associated with one or more request queues.
2539 * May fail with EINVAL for various error conditions. May adjust the
2540 * requested depth down, if if it too large. In that case, the set
2541 * value will be stored in set->queue_depth.
2542 */
24d2f903
CH
2543int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2544{
da695ba2
CH
2545 int ret;
2546
205fb5f5
BVA
2547 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2548
24d2f903
CH
2549 if (!set->nr_hw_queues)
2550 return -EINVAL;
a4391c64 2551 if (!set->queue_depth)
24d2f903
CH
2552 return -EINVAL;
2553 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2554 return -EINVAL;
2555
7d7e0f90 2556 if (!set->ops->queue_rq)
24d2f903
CH
2557 return -EINVAL;
2558
a4391c64
JA
2559 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2560 pr_info("blk-mq: reduced tag depth to %u\n",
2561 BLK_MQ_MAX_DEPTH);
2562 set->queue_depth = BLK_MQ_MAX_DEPTH;
2563 }
24d2f903 2564
6637fadf
SL
2565 /*
2566 * If a crashdump is active, then we are potentially in a very
2567 * memory constrained environment. Limit us to 1 queue and
2568 * 64 tags to prevent using too much memory.
2569 */
2570 if (is_kdump_kernel()) {
2571 set->nr_hw_queues = 1;
2572 set->queue_depth = min(64U, set->queue_depth);
2573 }
868f2f0b
KB
2574 /*
2575 * There is no use for more h/w queues than cpus.
2576 */
2577 if (set->nr_hw_queues > nr_cpu_ids)
2578 set->nr_hw_queues = nr_cpu_ids;
6637fadf 2579
868f2f0b 2580 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
24d2f903
CH
2581 GFP_KERNEL, set->numa_node);
2582 if (!set->tags)
a5164405 2583 return -ENOMEM;
24d2f903 2584
da695ba2
CH
2585 ret = -ENOMEM;
2586 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2587 GFP_KERNEL, set->numa_node);
bdd17e75
CH
2588 if (!set->mq_map)
2589 goto out_free_tags;
2590
da695ba2
CH
2591 if (set->ops->map_queues)
2592 ret = set->ops->map_queues(set);
2593 else
2594 ret = blk_mq_map_queues(set);
2595 if (ret)
2596 goto out_free_mq_map;
2597
2598 ret = blk_mq_alloc_rq_maps(set);
2599 if (ret)
bdd17e75 2600 goto out_free_mq_map;
24d2f903 2601
0d2602ca
JA
2602 mutex_init(&set->tag_list_lock);
2603 INIT_LIST_HEAD(&set->tag_list);
2604
24d2f903 2605 return 0;
bdd17e75
CH
2606
2607out_free_mq_map:
2608 kfree(set->mq_map);
2609 set->mq_map = NULL;
2610out_free_tags:
5676e7b6
RE
2611 kfree(set->tags);
2612 set->tags = NULL;
da695ba2 2613 return ret;
24d2f903
CH
2614}
2615EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2616
2617void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2618{
2619 int i;
2620
cc71a6f4
JA
2621 for (i = 0; i < nr_cpu_ids; i++)
2622 blk_mq_free_map_and_requests(set, i);
484b4061 2623
bdd17e75
CH
2624 kfree(set->mq_map);
2625 set->mq_map = NULL;
2626
981bd189 2627 kfree(set->tags);
5676e7b6 2628 set->tags = NULL;
24d2f903
CH
2629}
2630EXPORT_SYMBOL(blk_mq_free_tag_set);
2631
e3a2b3f9
JA
2632int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2633{
2634 struct blk_mq_tag_set *set = q->tag_set;
2635 struct blk_mq_hw_ctx *hctx;
2636 int i, ret;
2637
bd166ef1 2638 if (!set)
e3a2b3f9
JA
2639 return -EINVAL;
2640
70f36b60
JA
2641 blk_mq_freeze_queue(q);
2642 blk_mq_quiesce_queue(q);
2643
e3a2b3f9
JA
2644 ret = 0;
2645 queue_for_each_hw_ctx(q, hctx, i) {
e9137d4b
KB
2646 if (!hctx->tags)
2647 continue;
bd166ef1
JA
2648 /*
2649 * If we're using an MQ scheduler, just update the scheduler
2650 * queue depth. This is similar to what the old code would do.
2651 */
70f36b60
JA
2652 if (!hctx->sched_tags) {
2653 ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2654 min(nr, set->queue_depth),
2655 false);
2656 } else {
2657 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2658 nr, true);
2659 }
e3a2b3f9
JA
2660 if (ret)
2661 break;
2662 }
2663
2664 if (!ret)
2665 q->nr_requests = nr;
2666
70f36b60
JA
2667 blk_mq_unfreeze_queue(q);
2668 blk_mq_start_stopped_hw_queues(q, true);
2669
e3a2b3f9
JA
2670 return ret;
2671}
2672
868f2f0b
KB
2673void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2674{
2675 struct request_queue *q;
2676
2677 if (nr_hw_queues > nr_cpu_ids)
2678 nr_hw_queues = nr_cpu_ids;
2679 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2680 return;
2681
2682 list_for_each_entry(q, &set->tag_list, tag_set_list)
2683 blk_mq_freeze_queue(q);
2684
2685 set->nr_hw_queues = nr_hw_queues;
2686 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2687 blk_mq_realloc_hw_ctxs(set, q);
2688
f6f94300
JB
2689 /*
2690 * Manually set the make_request_fn as blk_queue_make_request
2691 * resets a lot of the queue settings.
2692 */
868f2f0b 2693 if (q->nr_hw_queues > 1)
f6f94300 2694 q->make_request_fn = blk_mq_make_request;
868f2f0b 2695 else
f6f94300 2696 q->make_request_fn = blk_sq_make_request;
868f2f0b
KB
2697
2698 blk_mq_queue_reinit(q, cpu_online_mask);
2699 }
2700
2701 list_for_each_entry(q, &set->tag_list, tag_set_list)
2702 blk_mq_unfreeze_queue(q);
2703}
2704EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2705
64f1c21e
JA
2706static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2707 struct blk_mq_hw_ctx *hctx,
2708 struct request *rq)
2709{
2710 struct blk_rq_stat stat[2];
2711 unsigned long ret = 0;
2712
2713 /*
2714 * If stats collection isn't on, don't sleep but turn it on for
2715 * future users
2716 */
2717 if (!blk_stat_enable(q))
2718 return 0;
2719
2720 /*
2721 * We don't have to do this once per IO, should optimize this
2722 * to just use the current window of stats until it changes
2723 */
2724 memset(&stat, 0, sizeof(stat));
2725 blk_hctx_stat_get(hctx, stat);
2726
2727 /*
2728 * As an optimistic guess, use half of the mean service time
2729 * for this type of request. We can (and should) make this smarter.
2730 * For instance, if the completion latencies are tight, we can
2731 * get closer than just half the mean. This is especially
2732 * important on devices where the completion latencies are longer
2733 * than ~10 usec.
2734 */
2735 if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2736 ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2737 else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2738 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2739
2740 return ret;
2741}
2742
06426adf 2743static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
64f1c21e 2744 struct blk_mq_hw_ctx *hctx,
06426adf
JA
2745 struct request *rq)
2746{
2747 struct hrtimer_sleeper hs;
2748 enum hrtimer_mode mode;
64f1c21e 2749 unsigned int nsecs;
06426adf
JA
2750 ktime_t kt;
2751
64f1c21e
JA
2752 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2753 return false;
2754
2755 /*
2756 * poll_nsec can be:
2757 *
2758 * -1: don't ever hybrid sleep
2759 * 0: use half of prev avg
2760 * >0: use this specific value
2761 */
2762 if (q->poll_nsec == -1)
2763 return false;
2764 else if (q->poll_nsec > 0)
2765 nsecs = q->poll_nsec;
2766 else
2767 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2768
2769 if (!nsecs)
06426adf
JA
2770 return false;
2771
2772 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2773
2774 /*
2775 * This will be replaced with the stats tracking code, using
2776 * 'avg_completion_time / 2' as the pre-sleep target.
2777 */
8b0e1953 2778 kt = nsecs;
06426adf
JA
2779
2780 mode = HRTIMER_MODE_REL;
2781 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2782 hrtimer_set_expires(&hs.timer, kt);
2783
2784 hrtimer_init_sleeper(&hs, current);
2785 do {
2786 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2787 break;
2788 set_current_state(TASK_UNINTERRUPTIBLE);
2789 hrtimer_start_expires(&hs.timer, mode);
2790 if (hs.task)
2791 io_schedule();
2792 hrtimer_cancel(&hs.timer);
2793 mode = HRTIMER_MODE_ABS;
2794 } while (hs.task && !signal_pending(current));
2795
2796 __set_current_state(TASK_RUNNING);
2797 destroy_hrtimer_on_stack(&hs.timer);
2798 return true;
2799}
2800
bbd7bb70
JA
2801static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2802{
2803 struct request_queue *q = hctx->queue;
2804 long state;
2805
06426adf
JA
2806 /*
2807 * If we sleep, have the caller restart the poll loop to reset
2808 * the state. Like for the other success return cases, the
2809 * caller is responsible for checking if the IO completed. If
2810 * the IO isn't complete, we'll get called again and will go
2811 * straight to the busy poll loop.
2812 */
64f1c21e 2813 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
06426adf
JA
2814 return true;
2815
bbd7bb70
JA
2816 hctx->poll_considered++;
2817
2818 state = current->state;
2819 while (!need_resched()) {
2820 int ret;
2821
2822 hctx->poll_invoked++;
2823
2824 ret = q->mq_ops->poll(hctx, rq->tag);
2825 if (ret > 0) {
2826 hctx->poll_success++;
2827 set_current_state(TASK_RUNNING);
2828 return true;
2829 }
2830
2831 if (signal_pending_state(state, current))
2832 set_current_state(TASK_RUNNING);
2833
2834 if (current->state == TASK_RUNNING)
2835 return true;
2836 if (ret < 0)
2837 break;
2838 cpu_relax();
2839 }
2840
2841 return false;
2842}
2843
2844bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2845{
2846 struct blk_mq_hw_ctx *hctx;
2847 struct blk_plug *plug;
2848 struct request *rq;
2849
2850 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2851 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2852 return false;
2853
2854 plug = current->plug;
2855 if (plug)
2856 blk_flush_plug_list(plug, false);
2857
2858 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
bd166ef1
JA
2859 if (!blk_qc_t_is_internal(cookie))
2860 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2861 else
2862 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
bbd7bb70
JA
2863
2864 return __blk_mq_poll(hctx, rq);
2865}
2866EXPORT_SYMBOL_GPL(blk_mq_poll);
2867
676141e4
JA
2868void blk_mq_disable_hotplug(void)
2869{
2870 mutex_lock(&all_q_mutex);
2871}
2872
2873void blk_mq_enable_hotplug(void)
2874{
2875 mutex_unlock(&all_q_mutex);
2876}
2877
320ae51f
JA
2878static int __init blk_mq_init(void)
2879{
9467f859
TG
2880 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2881 blk_mq_hctx_notify_dead);
320ae51f 2882
65d5291e
SAS
2883 cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2884 blk_mq_queue_reinit_prepare,
2885 blk_mq_queue_reinit_dead);
320ae51f
JA
2886 return 0;
2887}
2888subsys_initcall(blk_mq_init);