]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-mq.c
Revert "blk-mq: fix hctx/ctx kobject use-after-free"
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.c
CommitLineData
75bb4625
JA
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
320ae51f
JA
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/workqueue.h>
16#include <linux/smp.h>
17#include <linux/llist.h>
18#include <linux/list_sort.h>
19#include <linux/cpu.h>
20#include <linux/cache.h>
21#include <linux/sched/sysctl.h>
22#include <linux/delay.h>
aedcd72f 23#include <linux/crash_dump.h>
320ae51f
JA
24
25#include <trace/events/block.h>
26
27#include <linux/blk-mq.h>
28#include "blk.h"
29#include "blk-mq.h"
30#include "blk-mq-tag.h"
31
32static DEFINE_MUTEX(all_q_mutex);
33static LIST_HEAD(all_q_list);
34
35static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36
320ae51f
JA
37/*
38 * Check if any of the ctx's have pending work in this hardware queue
39 */
40static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41{
42 unsigned int i;
43
1429d7c9
JA
44 for (i = 0; i < hctx->ctx_map.map_size; i++)
45 if (hctx->ctx_map.map[i].word)
320ae51f
JA
46 return true;
47
48 return false;
49}
50
1429d7c9
JA
51static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52 struct blk_mq_ctx *ctx)
53{
54 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55}
56
57#define CTX_TO_BIT(hctx, ctx) \
58 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59
320ae51f
JA
60/*
61 * Mark this ctx as having pending work in this hardware queue
62 */
63static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64 struct blk_mq_ctx *ctx)
65{
1429d7c9
JA
66 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67
68 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70}
71
72static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73 struct blk_mq_ctx *ctx)
74{
75 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76
77 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
320ae51f
JA
78}
79
320ae51f
JA
80static int blk_mq_queue_enter(struct request_queue *q)
81{
add703fd
TH
82 while (true) {
83 int ret;
320ae51f 84
add703fd
TH
85 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86 return 0;
320ae51f 87
add703fd
TH
88 ret = wait_event_interruptible(q->mq_freeze_wq,
89 !q->mq_freeze_depth || blk_queue_dying(q));
90 if (blk_queue_dying(q))
91 return -ENODEV;
92 if (ret)
93 return ret;
94 }
320ae51f
JA
95}
96
97static void blk_mq_queue_exit(struct request_queue *q)
98{
add703fd
TH
99 percpu_ref_put(&q->mq_usage_counter);
100}
101
102static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103{
104 struct request_queue *q =
105 container_of(ref, struct request_queue, mq_usage_counter);
106
107 wake_up_all(&q->mq_freeze_wq);
320ae51f
JA
108}
109
b4c6a028 110void blk_mq_freeze_queue_start(struct request_queue *q)
43a5e4e2 111{
cddd5d17
TH
112 bool freeze;
113
72d6f02a 114 spin_lock_irq(q->queue_lock);
cddd5d17 115 freeze = !q->mq_freeze_depth++;
72d6f02a
TH
116 spin_unlock_irq(q->queue_lock);
117
cddd5d17 118 if (freeze) {
9eca8046 119 percpu_ref_kill(&q->mq_usage_counter);
cddd5d17
TH
120 blk_mq_run_queues(q, false);
121 }
f3af020b 122}
b4c6a028 123EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
f3af020b
TH
124
125static void blk_mq_freeze_queue_wait(struct request_queue *q)
126{
add703fd 127 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
43a5e4e2
ML
128}
129
f3af020b
TH
130/*
131 * Guarantee no request is in use, so we can change any data structure of
132 * the queue afterward.
133 */
134void blk_mq_freeze_queue(struct request_queue *q)
135{
136 blk_mq_freeze_queue_start(q);
137 blk_mq_freeze_queue_wait(q);
138}
139
b4c6a028 140void blk_mq_unfreeze_queue(struct request_queue *q)
320ae51f 141{
cddd5d17 142 bool wake;
320ae51f
JA
143
144 spin_lock_irq(q->queue_lock);
780db207
TH
145 wake = !--q->mq_freeze_depth;
146 WARN_ON_ONCE(q->mq_freeze_depth < 0);
320ae51f 147 spin_unlock_irq(q->queue_lock);
add703fd
TH
148 if (wake) {
149 percpu_ref_reinit(&q->mq_usage_counter);
320ae51f 150 wake_up_all(&q->mq_freeze_wq);
add703fd 151 }
320ae51f 152}
b4c6a028 153EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
320ae51f 154
aed3ea94
JA
155void blk_mq_wake_waiters(struct request_queue *q)
156{
157 struct blk_mq_hw_ctx *hctx;
158 unsigned int i;
159
160 queue_for_each_hw_ctx(q, hctx, i)
161 if (blk_mq_hw_queue_mapped(hctx))
162 blk_mq_tag_wakeup_all(hctx->tags, true);
3fd5940c
KB
163
164 /*
165 * If we are called because the queue has now been marked as
166 * dying, we need to ensure that processes currently waiting on
167 * the queue are notified as well.
168 */
169 wake_up_all(&q->mq_freeze_wq);
aed3ea94
JA
170}
171
320ae51f
JA
172bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
173{
174 return blk_mq_has_free_tags(hctx->tags);
175}
176EXPORT_SYMBOL(blk_mq_can_queue);
177
94eddfbe
JA
178static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
179 struct request *rq, unsigned int rw_flags)
320ae51f 180{
94eddfbe
JA
181 if (blk_queue_io_stat(q))
182 rw_flags |= REQ_IO_STAT;
183
af76e555
CH
184 INIT_LIST_HEAD(&rq->queuelist);
185 /* csd/requeue_work/fifo_time is initialized before use */
186 rq->q = q;
320ae51f 187 rq->mq_ctx = ctx;
0d2602ca 188 rq->cmd_flags |= rw_flags;
af76e555
CH
189 /* do not touch atomic flags, it needs atomic ops against the timer */
190 rq->cpu = -1;
af76e555
CH
191 INIT_HLIST_NODE(&rq->hash);
192 RB_CLEAR_NODE(&rq->rb_node);
af76e555
CH
193 rq->rq_disk = NULL;
194 rq->part = NULL;
3ee32372 195 rq->start_time = jiffies;
af76e555
CH
196#ifdef CONFIG_BLK_CGROUP
197 rq->rl = NULL;
0fec08b4 198 set_start_time_ns(rq);
af76e555
CH
199 rq->io_start_time_ns = 0;
200#endif
201 rq->nr_phys_segments = 0;
202#if defined(CONFIG_BLK_DEV_INTEGRITY)
203 rq->nr_integrity_segments = 0;
204#endif
af76e555
CH
205 rq->special = NULL;
206 /* tag was already set */
207 rq->errors = 0;
af76e555 208
6f4a1626
TB
209 rq->cmd = rq->__cmd;
210
af76e555
CH
211 rq->extra_len = 0;
212 rq->sense_len = 0;
213 rq->resid_len = 0;
214 rq->sense = NULL;
215
af76e555 216 INIT_LIST_HEAD(&rq->timeout_list);
f6be4fb4
JA
217 rq->timeout = 0;
218
af76e555
CH
219 rq->end_io = NULL;
220 rq->end_io_data = NULL;
221 rq->next_rq = NULL;
222
320ae51f
JA
223 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
224}
225
5dee8577 226static struct request *
cb96a42c 227__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
5dee8577
CH
228{
229 struct request *rq;
230 unsigned int tag;
231
cb96a42c 232 tag = blk_mq_get_tag(data);
5dee8577 233 if (tag != BLK_MQ_TAG_FAIL) {
cb96a42c 234 rq = data->hctx->tags->rqs[tag];
5dee8577 235
cb96a42c 236 if (blk_mq_tag_busy(data->hctx)) {
5dee8577 237 rq->cmd_flags = REQ_MQ_INFLIGHT;
cb96a42c 238 atomic_inc(&data->hctx->nr_active);
5dee8577
CH
239 }
240
241 rq->tag = tag;
cb96a42c 242 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
5dee8577
CH
243 return rq;
244 }
245
246 return NULL;
247}
248
4ce01dd1
CH
249struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
250 bool reserved)
320ae51f 251{
d852564f
CH
252 struct blk_mq_ctx *ctx;
253 struct blk_mq_hw_ctx *hctx;
320ae51f 254 struct request *rq;
cb96a42c 255 struct blk_mq_alloc_data alloc_data;
a492f075 256 int ret;
320ae51f 257
a492f075
JL
258 ret = blk_mq_queue_enter(q);
259 if (ret)
260 return ERR_PTR(ret);
320ae51f 261
d852564f
CH
262 ctx = blk_mq_get_ctx(q);
263 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
264 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
265 reserved, ctx, hctx);
d852564f 266
cb96a42c 267 rq = __blk_mq_alloc_request(&alloc_data, rw);
d852564f
CH
268 if (!rq && (gfp & __GFP_WAIT)) {
269 __blk_mq_run_hw_queue(hctx);
270 blk_mq_put_ctx(ctx);
271
272 ctx = blk_mq_get_ctx(q);
273 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
274 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
275 hctx);
276 rq = __blk_mq_alloc_request(&alloc_data, rw);
277 ctx = alloc_data.ctx;
d852564f
CH
278 }
279 blk_mq_put_ctx(ctx);
c76541a9
KB
280 if (!rq) {
281 blk_mq_queue_exit(q);
a492f075 282 return ERR_PTR(-EWOULDBLOCK);
c76541a9 283 }
320ae51f
JA
284 return rq;
285}
4bb659b1 286EXPORT_SYMBOL(blk_mq_alloc_request);
320ae51f 287
320ae51f
JA
288static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
289 struct blk_mq_ctx *ctx, struct request *rq)
290{
291 const int tag = rq->tag;
292 struct request_queue *q = rq->q;
293
0d2602ca
JA
294 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
295 atomic_dec(&hctx->nr_active);
683d0e12 296 rq->cmd_flags = 0;
0d2602ca 297
af76e555 298 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
0d2602ca 299 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
320ae51f
JA
300 blk_mq_queue_exit(q);
301}
302
7c7f2f2b 303void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
320ae51f
JA
304{
305 struct blk_mq_ctx *ctx = rq->mq_ctx;
320ae51f
JA
306
307 ctx->rq_completed[rq_is_sync(rq)]++;
320ae51f 308 __blk_mq_free_request(hctx, ctx, rq);
7c7f2f2b
JA
309
310}
311EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
312
313void blk_mq_free_request(struct request *rq)
314{
315 struct blk_mq_hw_ctx *hctx;
316 struct request_queue *q = rq->q;
317
318 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
319 blk_mq_free_hctx_request(hctx, rq);
320ae51f 320}
1a3b595a 321EXPORT_SYMBOL_GPL(blk_mq_free_request);
320ae51f 322
c8a446ad 323inline void __blk_mq_end_request(struct request *rq, int error)
320ae51f 324{
0d11e6ac
ML
325 blk_account_io_done(rq);
326
91b63639 327 if (rq->end_io) {
320ae51f 328 rq->end_io(rq, error);
91b63639
CH
329 } else {
330 if (unlikely(blk_bidi_rq(rq)))
331 blk_mq_free_request(rq->next_rq);
320ae51f 332 blk_mq_free_request(rq);
91b63639 333 }
320ae51f 334}
c8a446ad 335EXPORT_SYMBOL(__blk_mq_end_request);
63151a44 336
c8a446ad 337void blk_mq_end_request(struct request *rq, int error)
63151a44
CH
338{
339 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
340 BUG();
c8a446ad 341 __blk_mq_end_request(rq, error);
63151a44 342}
c8a446ad 343EXPORT_SYMBOL(blk_mq_end_request);
320ae51f 344
30a91cb4 345static void __blk_mq_complete_request_remote(void *data)
320ae51f 346{
3d6efbf6 347 struct request *rq = data;
320ae51f 348
30a91cb4 349 rq->q->softirq_done_fn(rq);
320ae51f 350}
320ae51f 351
ed851860 352static void blk_mq_ipi_complete_request(struct request *rq)
320ae51f
JA
353{
354 struct blk_mq_ctx *ctx = rq->mq_ctx;
38535201 355 bool shared = false;
320ae51f
JA
356 int cpu;
357
38535201 358 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
30a91cb4
CH
359 rq->q->softirq_done_fn(rq);
360 return;
361 }
320ae51f
JA
362
363 cpu = get_cpu();
38535201
CH
364 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
365 shared = cpus_share_cache(cpu, ctx->cpu);
366
367 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
30a91cb4 368 rq->csd.func = __blk_mq_complete_request_remote;
3d6efbf6
CH
369 rq->csd.info = rq;
370 rq->csd.flags = 0;
c46fff2a 371 smp_call_function_single_async(ctx->cpu, &rq->csd);
3d6efbf6 372 } else {
30a91cb4 373 rq->q->softirq_done_fn(rq);
3d6efbf6 374 }
320ae51f
JA
375 put_cpu();
376}
30a91cb4 377
ed851860
JA
378void __blk_mq_complete_request(struct request *rq)
379{
380 struct request_queue *q = rq->q;
381
382 if (!q->softirq_done_fn)
c8a446ad 383 blk_mq_end_request(rq, rq->errors);
ed851860
JA
384 else
385 blk_mq_ipi_complete_request(rq);
386}
387
30a91cb4
CH
388/**
389 * blk_mq_complete_request - end I/O on a request
390 * @rq: the request being processed
391 *
392 * Description:
393 * Ends all I/O on a request. It does not handle partial completions.
394 * The actual completion happens out-of-order, through a IPI handler.
395 **/
396void blk_mq_complete_request(struct request *rq)
397{
95f09684
JA
398 struct request_queue *q = rq->q;
399
400 if (unlikely(blk_should_fake_timeout(q)))
30a91cb4 401 return;
ed851860
JA
402 if (!blk_mark_rq_complete(rq))
403 __blk_mq_complete_request(rq);
30a91cb4
CH
404}
405EXPORT_SYMBOL(blk_mq_complete_request);
320ae51f 406
973c0191
KB
407int blk_mq_request_started(struct request *rq)
408{
409 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
410}
411EXPORT_SYMBOL_GPL(blk_mq_request_started);
412
e2490073 413void blk_mq_start_request(struct request *rq)
320ae51f
JA
414{
415 struct request_queue *q = rq->q;
416
417 trace_block_rq_issue(q, rq);
418
742ee69b 419 rq->resid_len = blk_rq_bytes(rq);
91b63639
CH
420 if (unlikely(blk_bidi_rq(rq)))
421 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
742ee69b 422
2b8393b4 423 blk_add_timer(rq);
87ee7b11 424
538b7534
JA
425 /*
426 * Ensure that ->deadline is visible before set the started
427 * flag and clear the completed flag.
428 */
429 smp_mb__before_atomic();
430
87ee7b11
JA
431 /*
432 * Mark us as started and clear complete. Complete might have been
433 * set if requeue raced with timeout, which then marked it as
434 * complete. So be sure to clear complete again when we start
435 * the request, otherwise we'll ignore the completion event.
436 */
4b570521
JA
437 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
438 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
439 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
440 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
49f5baa5
CH
441
442 if (q->dma_drain_size && blk_rq_bytes(rq)) {
443 /*
444 * Make sure space for the drain appears. We know we can do
445 * this because max_hw_segments has been adjusted to be one
446 * fewer than the device can handle.
447 */
448 rq->nr_phys_segments++;
449 }
320ae51f 450}
e2490073 451EXPORT_SYMBOL(blk_mq_start_request);
320ae51f 452
ed0791b2 453static void __blk_mq_requeue_request(struct request *rq)
320ae51f
JA
454{
455 struct request_queue *q = rq->q;
456
457 trace_block_rq_requeue(q, rq);
49f5baa5 458
e2490073
CH
459 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
460 if (q->dma_drain_size && blk_rq_bytes(rq))
461 rq->nr_phys_segments--;
462 }
320ae51f
JA
463}
464
ed0791b2
CH
465void blk_mq_requeue_request(struct request *rq)
466{
ed0791b2 467 __blk_mq_requeue_request(rq);
ed0791b2 468
ed0791b2 469 BUG_ON(blk_queued_rq(rq));
6fca6a61 470 blk_mq_add_to_requeue_list(rq, true);
ed0791b2
CH
471}
472EXPORT_SYMBOL(blk_mq_requeue_request);
473
6fca6a61
CH
474static void blk_mq_requeue_work(struct work_struct *work)
475{
476 struct request_queue *q =
477 container_of(work, struct request_queue, requeue_work);
478 LIST_HEAD(rq_list);
479 struct request *rq, *next;
480 unsigned long flags;
481
482 spin_lock_irqsave(&q->requeue_lock, flags);
483 list_splice_init(&q->requeue_list, &rq_list);
484 spin_unlock_irqrestore(&q->requeue_lock, flags);
485
486 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
487 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
488 continue;
489
490 rq->cmd_flags &= ~REQ_SOFTBARRIER;
491 list_del_init(&rq->queuelist);
492 blk_mq_insert_request(rq, true, false, false);
493 }
494
495 while (!list_empty(&rq_list)) {
496 rq = list_entry(rq_list.next, struct request, queuelist);
497 list_del_init(&rq->queuelist);
498 blk_mq_insert_request(rq, false, false, false);
499 }
500
8b957415
JA
501 /*
502 * Use the start variant of queue running here, so that running
503 * the requeue work will kick stopped queues.
504 */
505 blk_mq_start_hw_queues(q);
6fca6a61
CH
506}
507
508void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
509{
510 struct request_queue *q = rq->q;
511 unsigned long flags;
512
513 /*
514 * We abuse this flag that is otherwise used by the I/O scheduler to
515 * request head insertation from the workqueue.
516 */
517 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
518
519 spin_lock_irqsave(&q->requeue_lock, flags);
520 if (at_head) {
521 rq->cmd_flags |= REQ_SOFTBARRIER;
522 list_add(&rq->queuelist, &q->requeue_list);
523 } else {
524 list_add_tail(&rq->queuelist, &q->requeue_list);
525 }
526 spin_unlock_irqrestore(&q->requeue_lock, flags);
527}
528EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
529
c68ed59f
KB
530void blk_mq_cancel_requeue_work(struct request_queue *q)
531{
532 cancel_work_sync(&q->requeue_work);
533}
534EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
535
6fca6a61
CH
536void blk_mq_kick_requeue_list(struct request_queue *q)
537{
538 kblockd_schedule_work(&q->requeue_work);
539}
540EXPORT_SYMBOL(blk_mq_kick_requeue_list);
541
1885b24d
JA
542void blk_mq_abort_requeue_list(struct request_queue *q)
543{
544 unsigned long flags;
545 LIST_HEAD(rq_list);
546
547 spin_lock_irqsave(&q->requeue_lock, flags);
548 list_splice_init(&q->requeue_list, &rq_list);
549 spin_unlock_irqrestore(&q->requeue_lock, flags);
550
551 while (!list_empty(&rq_list)) {
552 struct request *rq;
553
554 rq = list_first_entry(&rq_list, struct request, queuelist);
555 list_del_init(&rq->queuelist);
556 rq->errors = -EIO;
557 blk_mq_end_request(rq, rq->errors);
558 }
559}
560EXPORT_SYMBOL(blk_mq_abort_requeue_list);
561
7c94e1c1
ML
562static inline bool is_flush_request(struct request *rq,
563 struct blk_flush_queue *fq, unsigned int tag)
24d2f903 564{
0e62f51f 565 return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
7c94e1c1 566 fq->flush_rq->tag == tag);
0e62f51f
JA
567}
568
569struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
570{
571 struct request *rq = tags->rqs[tag];
e97c293c
ML
572 /* mq_ctx of flush rq is always cloned from the corresponding req */
573 struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
22302375 574
7c94e1c1 575 if (!is_flush_request(rq, fq, tag))
0e62f51f 576 return rq;
22302375 577
7c94e1c1 578 return fq->flush_rq;
24d2f903
CH
579}
580EXPORT_SYMBOL(blk_mq_tag_to_rq);
581
320ae51f 582struct blk_mq_timeout_data {
46f92d42
CH
583 unsigned long next;
584 unsigned int next_set;
320ae51f
JA
585};
586
90415837 587void blk_mq_rq_timed_out(struct request *req, bool reserved)
320ae51f 588{
46f92d42
CH
589 struct blk_mq_ops *ops = req->q->mq_ops;
590 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87ee7b11
JA
591
592 /*
593 * We know that complete is set at this point. If STARTED isn't set
594 * anymore, then the request isn't active and the "timeout" should
595 * just be ignored. This can happen due to the bitflag ordering.
596 * Timeout first checks if STARTED is set, and if it is, assumes
597 * the request is active. But if we race with completion, then
598 * we both flags will get cleared. So check here again, and ignore
599 * a timeout event with a request that isn't active.
600 */
46f92d42
CH
601 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
602 return;
87ee7b11 603
46f92d42 604 if (ops->timeout)
0152fb6b 605 ret = ops->timeout(req, reserved);
46f92d42
CH
606
607 switch (ret) {
608 case BLK_EH_HANDLED:
609 __blk_mq_complete_request(req);
610 break;
611 case BLK_EH_RESET_TIMER:
612 blk_add_timer(req);
613 blk_clear_rq_complete(req);
614 break;
615 case BLK_EH_NOT_HANDLED:
616 break;
617 default:
618 printk(KERN_ERR "block: bad eh return: %d\n", ret);
619 break;
620 }
87ee7b11 621}
5b3f25fc 622
81481eb4
CH
623static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
624 struct request *rq, void *priv, bool reserved)
625{
626 struct blk_mq_timeout_data *data = priv;
87ee7b11 627
eb130dbf
KB
628 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
629 /*
630 * If a request wasn't started before the queue was
631 * marked dying, kill it here or it'll go unnoticed.
632 */
633 if (unlikely(blk_queue_dying(rq->q))) {
634 rq->errors = -EIO;
635 blk_mq_complete_request(rq);
636 }
46f92d42 637 return;
eb130dbf 638 }
5b3f25fc
KB
639 if (rq->cmd_flags & REQ_NO_TIMEOUT)
640 return;
87ee7b11 641
46f92d42
CH
642 if (time_after_eq(jiffies, rq->deadline)) {
643 if (!blk_mark_rq_complete(rq))
0152fb6b 644 blk_mq_rq_timed_out(rq, reserved);
46f92d42
CH
645 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
646 data->next = rq->deadline;
647 data->next_set = 1;
648 }
87ee7b11
JA
649}
650
81481eb4 651static void blk_mq_rq_timer(unsigned long priv)
320ae51f 652{
81481eb4
CH
653 struct request_queue *q = (struct request_queue *)priv;
654 struct blk_mq_timeout_data data = {
655 .next = 0,
656 .next_set = 0,
657 };
320ae51f 658 struct blk_mq_hw_ctx *hctx;
81481eb4 659 int i;
320ae51f 660
484b4061
JA
661 queue_for_each_hw_ctx(q, hctx, i) {
662 /*
663 * If not software queues are currently mapped to this
664 * hardware queue, there's nothing to check
665 */
19c66e59 666 if (!blk_mq_hw_queue_mapped(hctx))
484b4061
JA
667 continue;
668
81481eb4 669 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
484b4061 670 }
320ae51f 671
81481eb4
CH
672 if (data.next_set) {
673 data.next = blk_rq_timeout(round_jiffies_up(data.next));
674 mod_timer(&q->timeout, data.next);
0d2602ca
JA
675 } else {
676 queue_for_each_hw_ctx(q, hctx, i)
677 blk_mq_tag_idle(hctx);
678 }
320ae51f
JA
679}
680
681/*
682 * Reverse check our software queue for entries that we could potentially
683 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
684 * too much time checking for merges.
685 */
686static bool blk_mq_attempt_merge(struct request_queue *q,
687 struct blk_mq_ctx *ctx, struct bio *bio)
688{
689 struct request *rq;
690 int checked = 8;
691
692 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
693 int el_ret;
694
695 if (!checked--)
696 break;
697
698 if (!blk_rq_merge_ok(rq, bio))
699 continue;
700
701 el_ret = blk_try_merge(rq, bio);
702 if (el_ret == ELEVATOR_BACK_MERGE) {
703 if (bio_attempt_back_merge(q, rq, bio)) {
704 ctx->rq_merged++;
705 return true;
706 }
707 break;
708 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
709 if (bio_attempt_front_merge(q, rq, bio)) {
710 ctx->rq_merged++;
711 return true;
712 }
713 break;
714 }
715 }
716
717 return false;
718}
719
1429d7c9
JA
720/*
721 * Process software queues that have been marked busy, splicing them
722 * to the for-dispatch
723 */
724static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
725{
726 struct blk_mq_ctx *ctx;
727 int i;
728
729 for (i = 0; i < hctx->ctx_map.map_size; i++) {
730 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
731 unsigned int off, bit;
732
733 if (!bm->word)
734 continue;
735
736 bit = 0;
737 off = i * hctx->ctx_map.bits_per_word;
738 do {
739 bit = find_next_bit(&bm->word, bm->depth, bit);
740 if (bit >= bm->depth)
741 break;
742
743 ctx = hctx->ctxs[bit + off];
744 clear_bit(bit, &bm->word);
745 spin_lock(&ctx->lock);
746 list_splice_tail_init(&ctx->rq_list, list);
747 spin_unlock(&ctx->lock);
748
749 bit++;
750 } while (1);
751 }
752}
753
320ae51f
JA
754/*
755 * Run this hardware queue, pulling any software queues mapped to it in.
756 * Note that this function currently has various problems around ordering
757 * of IO. In particular, we'd like FIFO behaviour on handling existing
758 * items on the hctx->dispatch list. Ignore that for now.
759 */
760static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
761{
762 struct request_queue *q = hctx->queue;
320ae51f
JA
763 struct request *rq;
764 LIST_HEAD(rq_list);
74c45052
JA
765 LIST_HEAD(driver_list);
766 struct list_head *dptr;
1429d7c9 767 int queued;
320ae51f 768
fd1270d5 769 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
e4043dcf 770
5d12f905 771 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
320ae51f
JA
772 return;
773
774 hctx->run++;
775
776 /*
777 * Touch any software queue that has pending entries.
778 */
1429d7c9 779 flush_busy_ctxs(hctx, &rq_list);
320ae51f
JA
780
781 /*
782 * If we have previous entries on our dispatch list, grab them
783 * and stuff them at the front for more fair dispatch.
784 */
785 if (!list_empty_careful(&hctx->dispatch)) {
786 spin_lock(&hctx->lock);
787 if (!list_empty(&hctx->dispatch))
788 list_splice_init(&hctx->dispatch, &rq_list);
789 spin_unlock(&hctx->lock);
790 }
791
74c45052
JA
792 /*
793 * Start off with dptr being NULL, so we start the first request
794 * immediately, even if we have more pending.
795 */
796 dptr = NULL;
797
320ae51f
JA
798 /*
799 * Now process all the entries, sending them to the driver.
800 */
1429d7c9 801 queued = 0;
320ae51f 802 while (!list_empty(&rq_list)) {
74c45052 803 struct blk_mq_queue_data bd;
320ae51f
JA
804 int ret;
805
806 rq = list_first_entry(&rq_list, struct request, queuelist);
807 list_del_init(&rq->queuelist);
320ae51f 808
74c45052
JA
809 bd.rq = rq;
810 bd.list = dptr;
811 bd.last = list_empty(&rq_list);
812
813 ret = q->mq_ops->queue_rq(hctx, &bd);
320ae51f
JA
814 switch (ret) {
815 case BLK_MQ_RQ_QUEUE_OK:
816 queued++;
817 continue;
818 case BLK_MQ_RQ_QUEUE_BUSY:
320ae51f 819 list_add(&rq->queuelist, &rq_list);
ed0791b2 820 __blk_mq_requeue_request(rq);
320ae51f
JA
821 break;
822 default:
823 pr_err("blk-mq: bad return on queue: %d\n", ret);
320ae51f 824 case BLK_MQ_RQ_QUEUE_ERROR:
1e93b8c2 825 rq->errors = -EIO;
c8a446ad 826 blk_mq_end_request(rq, rq->errors);
320ae51f
JA
827 break;
828 }
829
830 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
831 break;
74c45052
JA
832
833 /*
834 * We've done the first request. If we have more than 1
835 * left in the list, set dptr to defer issue.
836 */
837 if (!dptr && rq_list.next != rq_list.prev)
838 dptr = &driver_list;
320ae51f
JA
839 }
840
841 if (!queued)
842 hctx->dispatched[0]++;
843 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
844 hctx->dispatched[ilog2(queued) + 1]++;
845
846 /*
847 * Any items that need requeuing? Stuff them into hctx->dispatch,
848 * that is where we will continue on next queue run.
849 */
850 if (!list_empty(&rq_list)) {
851 spin_lock(&hctx->lock);
852 list_splice(&rq_list, &hctx->dispatch);
853 spin_unlock(&hctx->lock);
854 }
855}
856
506e931f
JA
857/*
858 * It'd be great if the workqueue API had a way to pass
859 * in a mask and had some smarts for more clever placement.
860 * For now we just round-robin here, switching for every
861 * BLK_MQ_CPU_WORK_BATCH queued items.
862 */
863static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
864{
b657d7e6
CH
865 if (hctx->queue->nr_hw_queues == 1)
866 return WORK_CPU_UNBOUND;
506e931f
JA
867
868 if (--hctx->next_cpu_batch <= 0) {
b657d7e6 869 int cpu = hctx->next_cpu, next_cpu;
506e931f
JA
870
871 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
872 if (next_cpu >= nr_cpu_ids)
873 next_cpu = cpumask_first(hctx->cpumask);
874
875 hctx->next_cpu = next_cpu;
876 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
b657d7e6
CH
877
878 return cpu;
506e931f
JA
879 }
880
b657d7e6 881 return hctx->next_cpu;
506e931f
JA
882}
883
320ae51f
JA
884void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
885{
19c66e59
ML
886 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
887 !blk_mq_hw_queue_mapped(hctx)))
320ae51f
JA
888 return;
889
398205b8 890 if (!async) {
2a90d4aa
PB
891 int cpu = get_cpu();
892 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
398205b8 893 __blk_mq_run_hw_queue(hctx);
2a90d4aa 894 put_cpu();
398205b8
PB
895 return;
896 }
e4043dcf 897
2a90d4aa 898 put_cpu();
e4043dcf 899 }
398205b8 900
b657d7e6
CH
901 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
902 &hctx->run_work, 0);
320ae51f
JA
903}
904
905void blk_mq_run_queues(struct request_queue *q, bool async)
906{
907 struct blk_mq_hw_ctx *hctx;
908 int i;
909
910 queue_for_each_hw_ctx(q, hctx, i) {
911 if ((!blk_mq_hctx_has_pending(hctx) &&
912 list_empty_careful(&hctx->dispatch)) ||
5d12f905 913 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
320ae51f
JA
914 continue;
915
916 blk_mq_run_hw_queue(hctx, async);
917 }
918}
919EXPORT_SYMBOL(blk_mq_run_queues);
920
921void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
922{
70f4db63
CH
923 cancel_delayed_work(&hctx->run_work);
924 cancel_delayed_work(&hctx->delay_work);
320ae51f
JA
925 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
926}
927EXPORT_SYMBOL(blk_mq_stop_hw_queue);
928
280d45f6
CH
929void blk_mq_stop_hw_queues(struct request_queue *q)
930{
931 struct blk_mq_hw_ctx *hctx;
932 int i;
933
934 queue_for_each_hw_ctx(q, hctx, i)
935 blk_mq_stop_hw_queue(hctx);
936}
937EXPORT_SYMBOL(blk_mq_stop_hw_queues);
938
320ae51f
JA
939void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
940{
941 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
e4043dcf 942
0ffbce80 943 blk_mq_run_hw_queue(hctx, false);
320ae51f
JA
944}
945EXPORT_SYMBOL(blk_mq_start_hw_queue);
946
2f268556
CH
947void blk_mq_start_hw_queues(struct request_queue *q)
948{
949 struct blk_mq_hw_ctx *hctx;
950 int i;
951
952 queue_for_each_hw_ctx(q, hctx, i)
953 blk_mq_start_hw_queue(hctx);
954}
955EXPORT_SYMBOL(blk_mq_start_hw_queues);
956
957
1b4a3258 958void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
959{
960 struct blk_mq_hw_ctx *hctx;
961 int i;
962
963 queue_for_each_hw_ctx(q, hctx, i) {
964 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
965 continue;
966
967 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1b4a3258 968 blk_mq_run_hw_queue(hctx, async);
320ae51f
JA
969 }
970}
971EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
972
70f4db63 973static void blk_mq_run_work_fn(struct work_struct *work)
320ae51f
JA
974{
975 struct blk_mq_hw_ctx *hctx;
976
70f4db63 977 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
e4043dcf 978
320ae51f
JA
979 __blk_mq_run_hw_queue(hctx);
980}
981
70f4db63
CH
982static void blk_mq_delay_work_fn(struct work_struct *work)
983{
984 struct blk_mq_hw_ctx *hctx;
985
986 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
987
988 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
989 __blk_mq_run_hw_queue(hctx);
990}
991
992void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
993{
19c66e59
ML
994 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
995 return;
70f4db63 996
b657d7e6
CH
997 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
998 &hctx->delay_work, msecs_to_jiffies(msecs));
70f4db63
CH
999}
1000EXPORT_SYMBOL(blk_mq_delay_queue);
1001
320ae51f 1002static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
72a0a36e 1003 struct request *rq, bool at_head)
320ae51f
JA
1004{
1005 struct blk_mq_ctx *ctx = rq->mq_ctx;
1006
01b983c9
JA
1007 trace_block_rq_insert(hctx->queue, rq);
1008
72a0a36e
CH
1009 if (at_head)
1010 list_add(&rq->queuelist, &ctx->rq_list);
1011 else
1012 list_add_tail(&rq->queuelist, &ctx->rq_list);
4bb659b1 1013
320ae51f 1014 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f
JA
1015}
1016
eeabc850
CH
1017void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1018 bool async)
320ae51f 1019{
eeabc850 1020 struct request_queue *q = rq->q;
320ae51f 1021 struct blk_mq_hw_ctx *hctx;
eeabc850
CH
1022 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1023
1024 current_ctx = blk_mq_get_ctx(q);
1025 if (!cpu_online(ctx->cpu))
1026 rq->mq_ctx = ctx = current_ctx;
320ae51f 1027
320ae51f
JA
1028 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1029
a57a178a
CH
1030 spin_lock(&ctx->lock);
1031 __blk_mq_insert_request(hctx, rq, at_head);
1032 spin_unlock(&ctx->lock);
320ae51f 1033
320ae51f
JA
1034 if (run_queue)
1035 blk_mq_run_hw_queue(hctx, async);
e4043dcf
JA
1036
1037 blk_mq_put_ctx(current_ctx);
320ae51f
JA
1038}
1039
1040static void blk_mq_insert_requests(struct request_queue *q,
1041 struct blk_mq_ctx *ctx,
1042 struct list_head *list,
1043 int depth,
1044 bool from_schedule)
1045
1046{
1047 struct blk_mq_hw_ctx *hctx;
1048 struct blk_mq_ctx *current_ctx;
1049
1050 trace_block_unplug(q, depth, !from_schedule);
1051
1052 current_ctx = blk_mq_get_ctx(q);
1053
1054 if (!cpu_online(ctx->cpu))
1055 ctx = current_ctx;
1056 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1057
1058 /*
1059 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1060 * offline now
1061 */
1062 spin_lock(&ctx->lock);
1063 while (!list_empty(list)) {
1064 struct request *rq;
1065
1066 rq = list_first_entry(list, struct request, queuelist);
1067 list_del_init(&rq->queuelist);
1068 rq->mq_ctx = ctx;
72a0a36e 1069 __blk_mq_insert_request(hctx, rq, false);
320ae51f
JA
1070 }
1071 spin_unlock(&ctx->lock);
1072
320ae51f 1073 blk_mq_run_hw_queue(hctx, from_schedule);
e4043dcf 1074 blk_mq_put_ctx(current_ctx);
320ae51f
JA
1075}
1076
1077static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1078{
1079 struct request *rqa = container_of(a, struct request, queuelist);
1080 struct request *rqb = container_of(b, struct request, queuelist);
1081
1082 return !(rqa->mq_ctx < rqb->mq_ctx ||
1083 (rqa->mq_ctx == rqb->mq_ctx &&
1084 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1085}
1086
1087void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1088{
1089 struct blk_mq_ctx *this_ctx;
1090 struct request_queue *this_q;
1091 struct request *rq;
1092 LIST_HEAD(list);
1093 LIST_HEAD(ctx_list);
1094 unsigned int depth;
1095
1096 list_splice_init(&plug->mq_list, &list);
1097
1098 list_sort(NULL, &list, plug_ctx_cmp);
1099
1100 this_q = NULL;
1101 this_ctx = NULL;
1102 depth = 0;
1103
1104 while (!list_empty(&list)) {
1105 rq = list_entry_rq(list.next);
1106 list_del_init(&rq->queuelist);
1107 BUG_ON(!rq->q);
1108 if (rq->mq_ctx != this_ctx) {
1109 if (this_ctx) {
1110 blk_mq_insert_requests(this_q, this_ctx,
1111 &ctx_list, depth,
1112 from_schedule);
1113 }
1114
1115 this_ctx = rq->mq_ctx;
1116 this_q = rq->q;
1117 depth = 0;
1118 }
1119
1120 depth++;
1121 list_add_tail(&rq->queuelist, &ctx_list);
1122 }
1123
1124 /*
1125 * If 'this_ctx' is set, we know we have entries to complete
1126 * on 'ctx_list'. Do those.
1127 */
1128 if (this_ctx) {
1129 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1130 from_schedule);
1131 }
1132}
1133
1134static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1135{
1136 init_request_from_bio(rq, bio);
4b570521 1137
3ee32372 1138 if (blk_do_io_stat(rq))
4b570521 1139 blk_account_io_start(rq, 1);
320ae51f
JA
1140}
1141
274a5843
JA
1142static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1143{
1144 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1145 !blk_queue_nomerges(hctx->queue);
1146}
1147
07068d5b
JA
1148static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1149 struct blk_mq_ctx *ctx,
1150 struct request *rq, struct bio *bio)
320ae51f 1151{
274a5843 1152 if (!hctx_allow_merges(hctx)) {
07068d5b
JA
1153 blk_mq_bio_to_request(rq, bio);
1154 spin_lock(&ctx->lock);
1155insert_rq:
1156 __blk_mq_insert_request(hctx, rq, false);
1157 spin_unlock(&ctx->lock);
1158 return false;
1159 } else {
274a5843
JA
1160 struct request_queue *q = hctx->queue;
1161
07068d5b
JA
1162 spin_lock(&ctx->lock);
1163 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1164 blk_mq_bio_to_request(rq, bio);
1165 goto insert_rq;
1166 }
320ae51f 1167
07068d5b
JA
1168 spin_unlock(&ctx->lock);
1169 __blk_mq_free_request(hctx, ctx, rq);
1170 return true;
14ec77f3 1171 }
07068d5b 1172}
14ec77f3 1173
07068d5b
JA
1174struct blk_map_ctx {
1175 struct blk_mq_hw_ctx *hctx;
1176 struct blk_mq_ctx *ctx;
1177};
1178
1179static struct request *blk_mq_map_request(struct request_queue *q,
1180 struct bio *bio,
1181 struct blk_map_ctx *data)
1182{
1183 struct blk_mq_hw_ctx *hctx;
1184 struct blk_mq_ctx *ctx;
1185 struct request *rq;
1186 int rw = bio_data_dir(bio);
cb96a42c 1187 struct blk_mq_alloc_data alloc_data;
320ae51f 1188
07068d5b 1189 if (unlikely(blk_mq_queue_enter(q))) {
320ae51f 1190 bio_endio(bio, -EIO);
07068d5b 1191 return NULL;
320ae51f
JA
1192 }
1193
1194 ctx = blk_mq_get_ctx(q);
1195 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1196
07068d5b 1197 if (rw_is_sync(bio->bi_rw))
27fbf4e8 1198 rw |= REQ_SYNC;
07068d5b 1199
320ae51f 1200 trace_block_getrq(q, bio, rw);
cb96a42c
ML
1201 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1202 hctx);
1203 rq = __blk_mq_alloc_request(&alloc_data, rw);
5dee8577 1204 if (unlikely(!rq)) {
793597a6 1205 __blk_mq_run_hw_queue(hctx);
320ae51f
JA
1206 blk_mq_put_ctx(ctx);
1207 trace_block_sleeprq(q, bio, rw);
793597a6
CH
1208
1209 ctx = blk_mq_get_ctx(q);
320ae51f 1210 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
1211 blk_mq_set_alloc_data(&alloc_data, q,
1212 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1213 rq = __blk_mq_alloc_request(&alloc_data, rw);
1214 ctx = alloc_data.ctx;
1215 hctx = alloc_data.hctx;
320ae51f
JA
1216 }
1217
1218 hctx->queued++;
07068d5b
JA
1219 data->hctx = hctx;
1220 data->ctx = ctx;
1221 return rq;
1222}
1223
1224/*
1225 * Multiple hardware queue variant. This will not use per-process plugs,
1226 * but will attempt to bypass the hctx queueing if we can go straight to
1227 * hardware for SYNC IO.
1228 */
1229static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1230{
1231 const int is_sync = rw_is_sync(bio->bi_rw);
1232 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1233 struct blk_map_ctx data;
1234 struct request *rq;
1235
1236 blk_queue_bounce(q, &bio);
1237
1238 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1239 bio_endio(bio, -EIO);
1240 return;
1241 }
1242
1243 rq = blk_mq_map_request(q, bio, &data);
1244 if (unlikely(!rq))
1245 return;
1246
1247 if (unlikely(is_flush_fua)) {
1248 blk_mq_bio_to_request(rq, bio);
1249 blk_insert_flush(rq);
1250 goto run_queue;
1251 }
1252
e167dfb5
JA
1253 /*
1254 * If the driver supports defer issued based on 'last', then
1255 * queue it up like normal since we can potentially save some
1256 * CPU this way.
1257 */
1258 if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
74c45052
JA
1259 struct blk_mq_queue_data bd = {
1260 .rq = rq,
1261 .list = NULL,
1262 .last = 1
1263 };
07068d5b
JA
1264 int ret;
1265
1266 blk_mq_bio_to_request(rq, bio);
07068d5b
JA
1267
1268 /*
1269 * For OK queue, we are done. For error, kill it. Any other
1270 * error (busy), just add it to our list as we previously
1271 * would have done
1272 */
74c45052 1273 ret = q->mq_ops->queue_rq(data.hctx, &bd);
07068d5b
JA
1274 if (ret == BLK_MQ_RQ_QUEUE_OK)
1275 goto done;
1276 else {
1277 __blk_mq_requeue_request(rq);
1278
1279 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1280 rq->errors = -EIO;
c8a446ad 1281 blk_mq_end_request(rq, rq->errors);
07068d5b
JA
1282 goto done;
1283 }
1284 }
1285 }
1286
1287 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1288 /*
1289 * For a SYNC request, send it to the hardware immediately. For
1290 * an ASYNC request, just ensure that we run it later on. The
1291 * latter allows for merging opportunities and more efficient
1292 * dispatching.
1293 */
1294run_queue:
1295 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1296 }
1297done:
1298 blk_mq_put_ctx(data.ctx);
1299}
1300
1301/*
1302 * Single hardware queue variant. This will attempt to use any per-process
1303 * plug for merging and IO deferral.
1304 */
1305static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1306{
1307 const int is_sync = rw_is_sync(bio->bi_rw);
1308 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1309 unsigned int use_plug, request_count = 0;
1310 struct blk_map_ctx data;
1311 struct request *rq;
1312
1313 /*
1314 * If we have multiple hardware queues, just go directly to
1315 * one of those for sync IO.
1316 */
1317 use_plug = !is_flush_fua && !is_sync;
1318
1319 blk_queue_bounce(q, &bio);
1320
1321 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1322 bio_endio(bio, -EIO);
1323 return;
1324 }
1325
1326 if (use_plug && !blk_queue_nomerges(q) &&
1327 blk_attempt_plug_merge(q, bio, &request_count))
1328 return;
1329
1330 rq = blk_mq_map_request(q, bio, &data);
ff87bcec
JA
1331 if (unlikely(!rq))
1332 return;
320ae51f
JA
1333
1334 if (unlikely(is_flush_fua)) {
1335 blk_mq_bio_to_request(rq, bio);
320ae51f
JA
1336 blk_insert_flush(rq);
1337 goto run_queue;
1338 }
1339
1340 /*
1341 * A task plug currently exists. Since this is completely lockless,
1342 * utilize that to temporarily store requests until the task is
1343 * either done or scheduled away.
1344 */
1345 if (use_plug) {
1346 struct blk_plug *plug = current->plug;
1347
1348 if (plug) {
1349 blk_mq_bio_to_request(rq, bio);
92f399c7 1350 if (list_empty(&plug->mq_list))
320ae51f
JA
1351 trace_block_plug(q);
1352 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1353 blk_flush_plug_list(plug, false);
1354 trace_block_plug(q);
1355 }
1356 list_add_tail(&rq->queuelist, &plug->mq_list);
07068d5b 1357 blk_mq_put_ctx(data.ctx);
320ae51f
JA
1358 return;
1359 }
1360 }
1361
07068d5b
JA
1362 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1363 /*
1364 * For a SYNC request, send it to the hardware immediately. For
1365 * an ASYNC request, just ensure that we run it later on. The
1366 * latter allows for merging opportunities and more efficient
1367 * dispatching.
1368 */
1369run_queue:
1370 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
320ae51f
JA
1371 }
1372
07068d5b 1373 blk_mq_put_ctx(data.ctx);
320ae51f
JA
1374}
1375
1376/*
1377 * Default mapping to a software queue, since we use one per CPU.
1378 */
1379struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1380{
1381 return q->queue_hw_ctx[q->mq_map[cpu]];
1382}
1383EXPORT_SYMBOL(blk_mq_map_queue);
1384
24d2f903
CH
1385static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1386 struct blk_mq_tags *tags, unsigned int hctx_idx)
95363efd 1387{
e9b267d9 1388 struct page *page;
320ae51f 1389
24d2f903 1390 if (tags->rqs && set->ops->exit_request) {
e9b267d9 1391 int i;
320ae51f 1392
24d2f903
CH
1393 for (i = 0; i < tags->nr_tags; i++) {
1394 if (!tags->rqs[i])
e9b267d9 1395 continue;
24d2f903
CH
1396 set->ops->exit_request(set->driver_data, tags->rqs[i],
1397 hctx_idx, i);
a5164405 1398 tags->rqs[i] = NULL;
e9b267d9 1399 }
320ae51f 1400 }
320ae51f 1401
24d2f903
CH
1402 while (!list_empty(&tags->page_list)) {
1403 page = list_first_entry(&tags->page_list, struct page, lru);
6753471c 1404 list_del_init(&page->lru);
320ae51f
JA
1405 __free_pages(page, page->private);
1406 }
1407
24d2f903 1408 kfree(tags->rqs);
320ae51f 1409
24d2f903 1410 blk_mq_free_tags(tags);
320ae51f
JA
1411}
1412
1413static size_t order_to_size(unsigned int order)
1414{
4ca08500 1415 return (size_t)PAGE_SIZE << order;
320ae51f
JA
1416}
1417
24d2f903
CH
1418static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1419 unsigned int hctx_idx)
320ae51f 1420{
24d2f903 1421 struct blk_mq_tags *tags;
320ae51f
JA
1422 unsigned int i, j, entries_per_page, max_order = 4;
1423 size_t rq_size, left;
1424
24d2f903
CH
1425 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1426 set->numa_node);
1427 if (!tags)
1428 return NULL;
320ae51f 1429
24d2f903
CH
1430 INIT_LIST_HEAD(&tags->page_list);
1431
a5164405
JA
1432 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1433 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1434 set->numa_node);
24d2f903
CH
1435 if (!tags->rqs) {
1436 blk_mq_free_tags(tags);
1437 return NULL;
1438 }
320ae51f
JA
1439
1440 /*
1441 * rq_size is the size of the request plus driver payload, rounded
1442 * to the cacheline size
1443 */
24d2f903 1444 rq_size = round_up(sizeof(struct request) + set->cmd_size,
320ae51f 1445 cache_line_size());
24d2f903 1446 left = rq_size * set->queue_depth;
320ae51f 1447
24d2f903 1448 for (i = 0; i < set->queue_depth; ) {
320ae51f
JA
1449 int this_order = max_order;
1450 struct page *page;
1451 int to_do;
1452 void *p;
1453
1454 while (left < order_to_size(this_order - 1) && this_order)
1455 this_order--;
1456
1457 do {
a5164405
JA
1458 page = alloc_pages_node(set->numa_node,
1459 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1460 this_order);
320ae51f
JA
1461 if (page)
1462 break;
1463 if (!this_order--)
1464 break;
1465 if (order_to_size(this_order) < rq_size)
1466 break;
1467 } while (1);
1468
1469 if (!page)
24d2f903 1470 goto fail;
320ae51f
JA
1471
1472 page->private = this_order;
24d2f903 1473 list_add_tail(&page->lru, &tags->page_list);
320ae51f
JA
1474
1475 p = page_address(page);
1476 entries_per_page = order_to_size(this_order) / rq_size;
24d2f903 1477 to_do = min(entries_per_page, set->queue_depth - i);
320ae51f
JA
1478 left -= to_do * rq_size;
1479 for (j = 0; j < to_do; j++) {
24d2f903 1480 tags->rqs[i] = p;
683d0e12
DH
1481 tags->rqs[i]->atomic_flags = 0;
1482 tags->rqs[i]->cmd_flags = 0;
24d2f903
CH
1483 if (set->ops->init_request) {
1484 if (set->ops->init_request(set->driver_data,
1485 tags->rqs[i], hctx_idx, i,
a5164405
JA
1486 set->numa_node)) {
1487 tags->rqs[i] = NULL;
24d2f903 1488 goto fail;
a5164405 1489 }
e9b267d9
CH
1490 }
1491
320ae51f
JA
1492 p += rq_size;
1493 i++;
1494 }
1495 }
1496
24d2f903 1497 return tags;
320ae51f 1498
24d2f903 1499fail:
24d2f903
CH
1500 blk_mq_free_rq_map(set, tags, hctx_idx);
1501 return NULL;
320ae51f
JA
1502}
1503
1429d7c9
JA
1504static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1505{
1506 kfree(bitmap->map);
1507}
1508
1509static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1510{
1511 unsigned int bpw = 8, total, num_maps, i;
1512
1513 bitmap->bits_per_word = bpw;
1514
1515 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1516 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1517 GFP_KERNEL, node);
1518 if (!bitmap->map)
1519 return -ENOMEM;
1520
1521 bitmap->map_size = num_maps;
1522
1523 total = nr_cpu_ids;
1524 for (i = 0; i < num_maps; i++) {
1525 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1526 total -= bitmap->map[i].depth;
1527 }
1528
1529 return 0;
1530}
1531
484b4061
JA
1532static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1533{
1534 struct request_queue *q = hctx->queue;
1535 struct blk_mq_ctx *ctx;
1536 LIST_HEAD(tmp);
1537
1538 /*
1539 * Move ctx entries to new CPU, if this one is going away.
1540 */
1541 ctx = __blk_mq_get_ctx(q, cpu);
1542
1543 spin_lock(&ctx->lock);
1544 if (!list_empty(&ctx->rq_list)) {
1545 list_splice_init(&ctx->rq_list, &tmp);
1546 blk_mq_hctx_clear_pending(hctx, ctx);
1547 }
1548 spin_unlock(&ctx->lock);
1549
1550 if (list_empty(&tmp))
1551 return NOTIFY_OK;
1552
1553 ctx = blk_mq_get_ctx(q);
1554 spin_lock(&ctx->lock);
1555
1556 while (!list_empty(&tmp)) {
1557 struct request *rq;
1558
1559 rq = list_first_entry(&tmp, struct request, queuelist);
1560 rq->mq_ctx = ctx;
1561 list_move_tail(&rq->queuelist, &ctx->rq_list);
1562 }
1563
1564 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1565 blk_mq_hctx_mark_pending(hctx, ctx);
1566
1567 spin_unlock(&ctx->lock);
1568
1569 blk_mq_run_hw_queue(hctx, true);
1570 blk_mq_put_ctx(ctx);
1571 return NOTIFY_OK;
1572}
1573
1574static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1575{
1576 struct request_queue *q = hctx->queue;
1577 struct blk_mq_tag_set *set = q->tag_set;
1578
1579 if (set->tags[hctx->queue_num])
1580 return NOTIFY_OK;
1581
1582 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1583 if (!set->tags[hctx->queue_num])
1584 return NOTIFY_STOP;
1585
1586 hctx->tags = set->tags[hctx->queue_num];
1587 return NOTIFY_OK;
1588}
1589
1590static int blk_mq_hctx_notify(void *data, unsigned long action,
1591 unsigned int cpu)
1592{
1593 struct blk_mq_hw_ctx *hctx = data;
1594
1595 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1596 return blk_mq_hctx_cpu_offline(hctx, cpu);
1597 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1598 return blk_mq_hctx_cpu_online(hctx, cpu);
1599
1600 return NOTIFY_OK;
1601}
1602
08e98fc6
ML
1603static void blk_mq_exit_hctx(struct request_queue *q,
1604 struct blk_mq_tag_set *set,
1605 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1606{
f70ced09
ML
1607 unsigned flush_start_tag = set->queue_depth;
1608
08e98fc6
ML
1609 blk_mq_tag_idle(hctx);
1610
f70ced09
ML
1611 if (set->ops->exit_request)
1612 set->ops->exit_request(set->driver_data,
1613 hctx->fq->flush_rq, hctx_idx,
1614 flush_start_tag + hctx_idx);
1615
08e98fc6
ML
1616 if (set->ops->exit_hctx)
1617 set->ops->exit_hctx(hctx, hctx_idx);
1618
1619 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
f70ced09 1620 blk_free_flush_queue(hctx->fq);
08e98fc6
ML
1621 kfree(hctx->ctxs);
1622 blk_mq_free_bitmap(&hctx->ctx_map);
1623}
1624
624dbe47
ML
1625static void blk_mq_exit_hw_queues(struct request_queue *q,
1626 struct blk_mq_tag_set *set, int nr_queue)
1627{
1628 struct blk_mq_hw_ctx *hctx;
1629 unsigned int i;
1630
1631 queue_for_each_hw_ctx(q, hctx, i) {
1632 if (i == nr_queue)
1633 break;
08e98fc6 1634 blk_mq_exit_hctx(q, set, hctx, i);
624dbe47 1635 }
624dbe47
ML
1636}
1637
1638static void blk_mq_free_hw_queues(struct request_queue *q,
1639 struct blk_mq_tag_set *set)
1640{
1641 struct blk_mq_hw_ctx *hctx;
1642 unsigned int i;
1643
74170118 1644 queue_for_each_hw_ctx(q, hctx, i) {
624dbe47 1645 free_cpumask_var(hctx->cpumask);
74170118
ML
1646 kfree(hctx);
1647 }
624dbe47
ML
1648}
1649
08e98fc6
ML
1650static int blk_mq_init_hctx(struct request_queue *q,
1651 struct blk_mq_tag_set *set,
1652 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
320ae51f 1653{
08e98fc6 1654 int node;
f70ced09 1655 unsigned flush_start_tag = set->queue_depth;
08e98fc6
ML
1656
1657 node = hctx->numa_node;
1658 if (node == NUMA_NO_NODE)
1659 node = hctx->numa_node = set->numa_node;
1660
1661 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1662 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1663 spin_lock_init(&hctx->lock);
1664 INIT_LIST_HEAD(&hctx->dispatch);
1665 hctx->queue = q;
1666 hctx->queue_num = hctx_idx;
1667 hctx->flags = set->flags;
08e98fc6
ML
1668
1669 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1670 blk_mq_hctx_notify, hctx);
1671 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1672
1673 hctx->tags = set->tags[hctx_idx];
320ae51f
JA
1674
1675 /*
08e98fc6
ML
1676 * Allocate space for all possible cpus to avoid allocation at
1677 * runtime
320ae51f 1678 */
08e98fc6
ML
1679 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1680 GFP_KERNEL, node);
1681 if (!hctx->ctxs)
1682 goto unregister_cpu_notifier;
320ae51f 1683
08e98fc6
ML
1684 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1685 goto free_ctxs;
320ae51f 1686
08e98fc6 1687 hctx->nr_ctx = 0;
320ae51f 1688
08e98fc6
ML
1689 if (set->ops->init_hctx &&
1690 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1691 goto free_bitmap;
320ae51f 1692
f70ced09
ML
1693 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1694 if (!hctx->fq)
1695 goto exit_hctx;
320ae51f 1696
f70ced09
ML
1697 if (set->ops->init_request &&
1698 set->ops->init_request(set->driver_data,
1699 hctx->fq->flush_rq, hctx_idx,
1700 flush_start_tag + hctx_idx, node))
1701 goto free_fq;
320ae51f 1702
08e98fc6 1703 return 0;
320ae51f 1704
f70ced09
ML
1705 free_fq:
1706 kfree(hctx->fq);
1707 exit_hctx:
1708 if (set->ops->exit_hctx)
1709 set->ops->exit_hctx(hctx, hctx_idx);
08e98fc6
ML
1710 free_bitmap:
1711 blk_mq_free_bitmap(&hctx->ctx_map);
1712 free_ctxs:
1713 kfree(hctx->ctxs);
1714 unregister_cpu_notifier:
1715 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
320ae51f 1716
08e98fc6
ML
1717 return -1;
1718}
320ae51f 1719
08e98fc6
ML
1720static int blk_mq_init_hw_queues(struct request_queue *q,
1721 struct blk_mq_tag_set *set)
1722{
1723 struct blk_mq_hw_ctx *hctx;
1724 unsigned int i;
320ae51f 1725
08e98fc6
ML
1726 /*
1727 * Initialize hardware queues
1728 */
1729 queue_for_each_hw_ctx(q, hctx, i) {
1730 if (blk_mq_init_hctx(q, set, hctx, i))
320ae51f
JA
1731 break;
1732 }
1733
1734 if (i == q->nr_hw_queues)
1735 return 0;
1736
1737 /*
1738 * Init failed
1739 */
624dbe47 1740 blk_mq_exit_hw_queues(q, set, i);
320ae51f
JA
1741
1742 return 1;
1743}
1744
1745static void blk_mq_init_cpu_queues(struct request_queue *q,
1746 unsigned int nr_hw_queues)
1747{
1748 unsigned int i;
1749
1750 for_each_possible_cpu(i) {
1751 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1752 struct blk_mq_hw_ctx *hctx;
1753
1754 memset(__ctx, 0, sizeof(*__ctx));
1755 __ctx->cpu = i;
1756 spin_lock_init(&__ctx->lock);
1757 INIT_LIST_HEAD(&__ctx->rq_list);
1758 __ctx->queue = q;
1759
1760 /* If the cpu isn't online, the cpu is mapped to first hctx */
320ae51f
JA
1761 if (!cpu_online(i))
1762 continue;
1763
e4043dcf
JA
1764 hctx = q->mq_ops->map_queue(q, i);
1765 cpumask_set_cpu(i, hctx->cpumask);
1766 hctx->nr_ctx++;
1767
320ae51f
JA
1768 /*
1769 * Set local node, IFF we have more than one hw queue. If
1770 * not, we remain on the home node of the device
1771 */
1772 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1773 hctx->numa_node = cpu_to_node(i);
1774 }
1775}
1776
1777static void blk_mq_map_swqueue(struct request_queue *q)
1778{
1779 unsigned int i;
1780 struct blk_mq_hw_ctx *hctx;
1781 struct blk_mq_ctx *ctx;
1782
1783 queue_for_each_hw_ctx(q, hctx, i) {
e4043dcf 1784 cpumask_clear(hctx->cpumask);
320ae51f
JA
1785 hctx->nr_ctx = 0;
1786 }
1787
1788 /*
1789 * Map software to hardware queues
1790 */
1791 queue_for_each_ctx(q, ctx, i) {
1792 /* If the cpu isn't online, the cpu is mapped to first hctx */
e4043dcf
JA
1793 if (!cpu_online(i))
1794 continue;
1795
320ae51f 1796 hctx = q->mq_ops->map_queue(q, i);
e4043dcf 1797 cpumask_set_cpu(i, hctx->cpumask);
320ae51f
JA
1798 ctx->index_hw = hctx->nr_ctx;
1799 hctx->ctxs[hctx->nr_ctx++] = ctx;
1800 }
506e931f
JA
1801
1802 queue_for_each_hw_ctx(q, hctx, i) {
484b4061 1803 /*
a68aafa5
JA
1804 * If no software queues are mapped to this hardware queue,
1805 * disable it and free the request entries.
484b4061
JA
1806 */
1807 if (!hctx->nr_ctx) {
1808 struct blk_mq_tag_set *set = q->tag_set;
1809
1810 if (set->tags[i]) {
1811 blk_mq_free_rq_map(set, set->tags[i], i);
1812 set->tags[i] = NULL;
1813 hctx->tags = NULL;
1814 }
1815 continue;
1816 }
1817
1818 /*
1819 * Initialize batch roundrobin counts
1820 */
506e931f
JA
1821 hctx->next_cpu = cpumask_first(hctx->cpumask);
1822 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1823 }
320ae51f
JA
1824}
1825
0d2602ca
JA
1826static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1827{
1828 struct blk_mq_hw_ctx *hctx;
1829 struct request_queue *q;
1830 bool shared;
1831 int i;
1832
1833 if (set->tag_list.next == set->tag_list.prev)
1834 shared = false;
1835 else
1836 shared = true;
1837
1838 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1839 blk_mq_freeze_queue(q);
1840
1841 queue_for_each_hw_ctx(q, hctx, i) {
1842 if (shared)
1843 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1844 else
1845 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1846 }
1847 blk_mq_unfreeze_queue(q);
1848 }
1849}
1850
1851static void blk_mq_del_queue_tag_set(struct request_queue *q)
1852{
1853 struct blk_mq_tag_set *set = q->tag_set;
1854
0d2602ca
JA
1855 mutex_lock(&set->tag_list_lock);
1856 list_del_init(&q->tag_set_list);
1857 blk_mq_update_tag_set_depth(set);
1858 mutex_unlock(&set->tag_list_lock);
0d2602ca
JA
1859}
1860
1861static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1862 struct request_queue *q)
1863{
1864 q->tag_set = set;
1865
1866 mutex_lock(&set->tag_list_lock);
1867 list_add_tail(&q->tag_set_list, &set->tag_list);
1868 blk_mq_update_tag_set_depth(set);
1869 mutex_unlock(&set->tag_list_lock);
1870}
1871
24d2f903 1872struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
320ae51f
JA
1873{
1874 struct blk_mq_hw_ctx **hctxs;
e6cdb092 1875 struct blk_mq_ctx __percpu *ctx;
320ae51f 1876 struct request_queue *q;
f14bbe77 1877 unsigned int *map;
320ae51f
JA
1878 int i;
1879
320ae51f
JA
1880 ctx = alloc_percpu(struct blk_mq_ctx);
1881 if (!ctx)
1882 return ERR_PTR(-ENOMEM);
1883
24d2f903
CH
1884 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1885 set->numa_node);
320ae51f
JA
1886
1887 if (!hctxs)
1888 goto err_percpu;
1889
f14bbe77
JA
1890 map = blk_mq_make_queue_map(set);
1891 if (!map)
1892 goto err_map;
1893
24d2f903 1894 for (i = 0; i < set->nr_hw_queues; i++) {
f14bbe77
JA
1895 int node = blk_mq_hw_queue_to_node(map, i);
1896
cdef54dd
CH
1897 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1898 GFP_KERNEL, node);
320ae51f
JA
1899 if (!hctxs[i])
1900 goto err_hctxs;
1901
a86073e4
JA
1902 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1903 node))
e4043dcf
JA
1904 goto err_hctxs;
1905
0d2602ca 1906 atomic_set(&hctxs[i]->nr_active, 0);
f14bbe77 1907 hctxs[i]->numa_node = node;
320ae51f
JA
1908 hctxs[i]->queue_num = i;
1909 }
1910
24d2f903 1911 q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
320ae51f
JA
1912 if (!q)
1913 goto err_hctxs;
1914
17497acb
TH
1915 /*
1916 * Init percpu_ref in atomic mode so that it's faster to shutdown.
1917 * See blk_register_queue() for details.
1918 */
a34375ef 1919 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
17497acb 1920 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
3d2936f4
ML
1921 goto err_map;
1922
320ae51f
JA
1923 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1924 blk_queue_rq_timeout(q, 30000);
1925
1926 q->nr_queues = nr_cpu_ids;
24d2f903 1927 q->nr_hw_queues = set->nr_hw_queues;
f14bbe77 1928 q->mq_map = map;
320ae51f
JA
1929
1930 q->queue_ctx = ctx;
1931 q->queue_hw_ctx = hctxs;
1932
24d2f903 1933 q->mq_ops = set->ops;
94eddfbe 1934 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
320ae51f 1935
05f1dd53
JA
1936 if (!(set->flags & BLK_MQ_F_SG_MERGE))
1937 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1938
1be036e9
CH
1939 q->sg_reserved_size = INT_MAX;
1940
6fca6a61
CH
1941 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1942 INIT_LIST_HEAD(&q->requeue_list);
1943 spin_lock_init(&q->requeue_lock);
1944
07068d5b
JA
1945 if (q->nr_hw_queues > 1)
1946 blk_queue_make_request(q, blk_mq_make_request);
1947 else
1948 blk_queue_make_request(q, blk_sq_make_request);
1949
24d2f903
CH
1950 if (set->timeout)
1951 blk_queue_rq_timeout(q, set->timeout);
320ae51f 1952
eba71768
JA
1953 /*
1954 * Do this after blk_queue_make_request() overrides it...
1955 */
1956 q->nr_requests = set->queue_depth;
1957
24d2f903
CH
1958 if (set->ops->complete)
1959 blk_queue_softirq_done(q, set->ops->complete);
30a91cb4 1960
24d2f903 1961 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
320ae51f 1962
24d2f903 1963 if (blk_mq_init_hw_queues(q, set))
1bcb1ead 1964 goto err_hw;
18741986 1965
320ae51f
JA
1966 mutex_lock(&all_q_mutex);
1967 list_add_tail(&q->all_q_node, &all_q_list);
1968 mutex_unlock(&all_q_mutex);
1969
0d2602ca
JA
1970 blk_mq_add_queue_tag_set(set, q);
1971
484b4061
JA
1972 blk_mq_map_swqueue(q);
1973
320ae51f 1974 return q;
18741986 1975
320ae51f 1976err_hw:
320ae51f
JA
1977 blk_cleanup_queue(q);
1978err_hctxs:
f14bbe77 1979 kfree(map);
24d2f903 1980 for (i = 0; i < set->nr_hw_queues; i++) {
320ae51f
JA
1981 if (!hctxs[i])
1982 break;
e4043dcf 1983 free_cpumask_var(hctxs[i]->cpumask);
cdef54dd 1984 kfree(hctxs[i]);
320ae51f 1985 }
f14bbe77 1986err_map:
320ae51f
JA
1987 kfree(hctxs);
1988err_percpu:
1989 free_percpu(ctx);
1990 return ERR_PTR(-ENOMEM);
1991}
1992EXPORT_SYMBOL(blk_mq_init_queue);
1993
1994void blk_mq_free_queue(struct request_queue *q)
1995{
624dbe47 1996 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 1997
0d2602ca
JA
1998 blk_mq_del_queue_tag_set(q);
1999
624dbe47
ML
2000 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2001 blk_mq_free_hw_queues(q, set);
320ae51f 2002
add703fd 2003 percpu_ref_exit(&q->mq_usage_counter);
3d2936f4 2004
74170118 2005 free_percpu(q->queue_ctx);
320ae51f
JA
2006 kfree(q->queue_hw_ctx);
2007 kfree(q->mq_map);
2008
74170118 2009 q->queue_ctx = NULL;
320ae51f
JA
2010 q->queue_hw_ctx = NULL;
2011 q->mq_map = NULL;
2012
2013 mutex_lock(&all_q_mutex);
2014 list_del_init(&q->all_q_node);
2015 mutex_unlock(&all_q_mutex);
2016}
320ae51f
JA
2017
2018/* Basically redo blk_mq_init_queue with queue frozen */
f618ef7c 2019static void blk_mq_queue_reinit(struct request_queue *q)
320ae51f 2020{
f3af020b 2021 WARN_ON_ONCE(!q->mq_freeze_depth);
320ae51f 2022
67aec14c
JA
2023 blk_mq_sysfs_unregister(q);
2024
320ae51f
JA
2025 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
2026
2027 /*
2028 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2029 * we should change hctx numa_node according to new topology (this
2030 * involves free and re-allocate memory, worthy doing?)
2031 */
2032
2033 blk_mq_map_swqueue(q);
2034
67aec14c 2035 blk_mq_sysfs_register(q);
320ae51f
JA
2036}
2037
f618ef7c
PG
2038static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2039 unsigned long action, void *hcpu)
320ae51f
JA
2040{
2041 struct request_queue *q;
2042
2043 /*
9fccfed8
JA
2044 * Before new mappings are established, hotadded cpu might already
2045 * start handling requests. This doesn't break anything as we map
2046 * offline CPUs to first hardware queue. We will re-init the queue
2047 * below to get optimal settings.
320ae51f
JA
2048 */
2049 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
2050 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
2051 return NOTIFY_OK;
2052
2053 mutex_lock(&all_q_mutex);
f3af020b
TH
2054
2055 /*
2056 * We need to freeze and reinit all existing queues. Freezing
2057 * involves synchronous wait for an RCU grace period and doing it
2058 * one by one may take a long time. Start freezing all queues in
2059 * one swoop and then wait for the completions so that freezing can
2060 * take place in parallel.
2061 */
2062 list_for_each_entry(q, &all_q_list, all_q_node)
2063 blk_mq_freeze_queue_start(q);
2064 list_for_each_entry(q, &all_q_list, all_q_node)
2065 blk_mq_freeze_queue_wait(q);
2066
320ae51f
JA
2067 list_for_each_entry(q, &all_q_list, all_q_node)
2068 blk_mq_queue_reinit(q);
f3af020b
TH
2069
2070 list_for_each_entry(q, &all_q_list, all_q_node)
2071 blk_mq_unfreeze_queue(q);
2072
320ae51f
JA
2073 mutex_unlock(&all_q_mutex);
2074 return NOTIFY_OK;
2075}
2076
a5164405
JA
2077static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2078{
2079 int i;
2080
2081 for (i = 0; i < set->nr_hw_queues; i++) {
2082 set->tags[i] = blk_mq_init_rq_map(set, i);
2083 if (!set->tags[i])
2084 goto out_unwind;
2085 }
2086
2087 return 0;
2088
2089out_unwind:
2090 while (--i >= 0)
2091 blk_mq_free_rq_map(set, set->tags[i], i);
2092
a5164405
JA
2093 return -ENOMEM;
2094}
2095
2096/*
2097 * Allocate the request maps associated with this tag_set. Note that this
2098 * may reduce the depth asked for, if memory is tight. set->queue_depth
2099 * will be updated to reflect the allocated depth.
2100 */
2101static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2102{
2103 unsigned int depth;
2104 int err;
2105
2106 depth = set->queue_depth;
2107 do {
2108 err = __blk_mq_alloc_rq_maps(set);
2109 if (!err)
2110 break;
2111
2112 set->queue_depth >>= 1;
2113 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2114 err = -ENOMEM;
2115 break;
2116 }
2117 } while (set->queue_depth);
2118
2119 if (!set->queue_depth || err) {
2120 pr_err("blk-mq: failed to allocate request map\n");
2121 return -ENOMEM;
2122 }
2123
2124 if (depth != set->queue_depth)
2125 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2126 depth, set->queue_depth);
2127
2128 return 0;
2129}
2130
a4391c64
JA
2131/*
2132 * Alloc a tag set to be associated with one or more request queues.
2133 * May fail with EINVAL for various error conditions. May adjust the
2134 * requested depth down, if if it too large. In that case, the set
2135 * value will be stored in set->queue_depth.
2136 */
24d2f903
CH
2137int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2138{
205fb5f5
BVA
2139 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2140
24d2f903
CH
2141 if (!set->nr_hw_queues)
2142 return -EINVAL;
a4391c64 2143 if (!set->queue_depth)
24d2f903
CH
2144 return -EINVAL;
2145 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2146 return -EINVAL;
2147
cdef54dd 2148 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
24d2f903
CH
2149 return -EINVAL;
2150
a4391c64
JA
2151 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2152 pr_info("blk-mq: reduced tag depth to %u\n",
2153 BLK_MQ_MAX_DEPTH);
2154 set->queue_depth = BLK_MQ_MAX_DEPTH;
2155 }
24d2f903 2156
6637fadf
SL
2157 /*
2158 * If a crashdump is active, then we are potentially in a very
2159 * memory constrained environment. Limit us to 1 queue and
2160 * 64 tags to prevent using too much memory.
2161 */
2162 if (is_kdump_kernel()) {
2163 set->nr_hw_queues = 1;
2164 set->queue_depth = min(64U, set->queue_depth);
2165 }
2166
48479005
ML
2167 set->tags = kmalloc_node(set->nr_hw_queues *
2168 sizeof(struct blk_mq_tags *),
24d2f903
CH
2169 GFP_KERNEL, set->numa_node);
2170 if (!set->tags)
a5164405 2171 return -ENOMEM;
24d2f903 2172
a5164405
JA
2173 if (blk_mq_alloc_rq_maps(set))
2174 goto enomem;
24d2f903 2175
0d2602ca
JA
2176 mutex_init(&set->tag_list_lock);
2177 INIT_LIST_HEAD(&set->tag_list);
2178
24d2f903 2179 return 0;
a5164405 2180enomem:
5676e7b6
RE
2181 kfree(set->tags);
2182 set->tags = NULL;
24d2f903
CH
2183 return -ENOMEM;
2184}
2185EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2186
2187void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2188{
2189 int i;
2190
484b4061
JA
2191 for (i = 0; i < set->nr_hw_queues; i++) {
2192 if (set->tags[i])
2193 blk_mq_free_rq_map(set, set->tags[i], i);
2194 }
2195
981bd189 2196 kfree(set->tags);
5676e7b6 2197 set->tags = NULL;
24d2f903
CH
2198}
2199EXPORT_SYMBOL(blk_mq_free_tag_set);
2200
e3a2b3f9
JA
2201int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2202{
2203 struct blk_mq_tag_set *set = q->tag_set;
2204 struct blk_mq_hw_ctx *hctx;
2205 int i, ret;
2206
2207 if (!set || nr > set->queue_depth)
2208 return -EINVAL;
2209
2210 ret = 0;
2211 queue_for_each_hw_ctx(q, hctx, i) {
2212 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2213 if (ret)
2214 break;
2215 }
2216
2217 if (!ret)
2218 q->nr_requests = nr;
2219
2220 return ret;
2221}
2222
676141e4
JA
2223void blk_mq_disable_hotplug(void)
2224{
2225 mutex_lock(&all_q_mutex);
2226}
2227
2228void blk_mq_enable_hotplug(void)
2229{
2230 mutex_unlock(&all_q_mutex);
2231}
2232
320ae51f
JA
2233static int __init blk_mq_init(void)
2234{
320ae51f
JA
2235 blk_mq_cpu_init();
2236
add703fd 2237 hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
320ae51f
JA
2238
2239 return 0;
2240}
2241subsys_initcall(blk_mq_init);