]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-mq.c
Merge tag 'rtc-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[mirror_ubuntu-jammy-kernel.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/kmemleak.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/workqueue.h>
18 #include <linux/smp.h>
19 #include <linux/llist.h>
20 #include <linux/list_sort.h>
21 #include <linux/cpu.h>
22 #include <linux/cache.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/sched/topology.h>
25 #include <linux/sched/signal.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
28 #include <linux/prefetch.h>
29 #include <linux/blk-crypto.h>
30
31 #include <trace/events/block.h>
32
33 #include <linux/blk-mq.h>
34 #include <linux/t10-pi.h>
35 #include "blk.h"
36 #include "blk-mq.h"
37 #include "blk-mq-debugfs.h"
38 #include "blk-mq-tag.h"
39 #include "blk-pm.h"
40 #include "blk-stat.h"
41 #include "blk-mq-sched.h"
42 #include "blk-rq-qos.h"
43
44 static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
45
46 static void blk_mq_poll_stats_start(struct request_queue *q);
47 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
48
49 static int blk_mq_poll_stats_bkt(const struct request *rq)
50 {
51 int ddir, sectors, bucket;
52
53 ddir = rq_data_dir(rq);
54 sectors = blk_rq_stats_sectors(rq);
55
56 bucket = ddir + 2 * ilog2(sectors);
57
58 if (bucket < 0)
59 return -1;
60 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
61 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
62
63 return bucket;
64 }
65
66 /*
67 * Check if any of the ctx, dispatch list or elevator
68 * have pending work in this hardware queue.
69 */
70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
71 {
72 return !list_empty_careful(&hctx->dispatch) ||
73 sbitmap_any_bit_set(&hctx->ctx_map) ||
74 blk_mq_sched_has_work(hctx);
75 }
76
77 /*
78 * Mark this ctx as having pending work in this hardware queue
79 */
80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
81 struct blk_mq_ctx *ctx)
82 {
83 const int bit = ctx->index_hw[hctx->type];
84
85 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
86 sbitmap_set_bit(&hctx->ctx_map, bit);
87 }
88
89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
90 struct blk_mq_ctx *ctx)
91 {
92 const int bit = ctx->index_hw[hctx->type];
93
94 sbitmap_clear_bit(&hctx->ctx_map, bit);
95 }
96
97 struct mq_inflight {
98 struct block_device *part;
99 unsigned int inflight[2];
100 };
101
102 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
103 struct request *rq, void *priv,
104 bool reserved)
105 {
106 struct mq_inflight *mi = priv;
107
108 if ((!mi->part->bd_partno || rq->part == mi->part) &&
109 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
110 mi->inflight[rq_data_dir(rq)]++;
111
112 return true;
113 }
114
115 unsigned int blk_mq_in_flight(struct request_queue *q,
116 struct block_device *part)
117 {
118 struct mq_inflight mi = { .part = part };
119
120 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
121
122 return mi.inflight[0] + mi.inflight[1];
123 }
124
125 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
126 unsigned int inflight[2])
127 {
128 struct mq_inflight mi = { .part = part };
129
130 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
131 inflight[0] = mi.inflight[0];
132 inflight[1] = mi.inflight[1];
133 }
134
135 void blk_freeze_queue_start(struct request_queue *q)
136 {
137 mutex_lock(&q->mq_freeze_lock);
138 if (++q->mq_freeze_depth == 1) {
139 percpu_ref_kill(&q->q_usage_counter);
140 mutex_unlock(&q->mq_freeze_lock);
141 if (queue_is_mq(q))
142 blk_mq_run_hw_queues(q, false);
143 } else {
144 mutex_unlock(&q->mq_freeze_lock);
145 }
146 }
147 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
148
149 void blk_mq_freeze_queue_wait(struct request_queue *q)
150 {
151 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
152 }
153 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
154
155 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
156 unsigned long timeout)
157 {
158 return wait_event_timeout(q->mq_freeze_wq,
159 percpu_ref_is_zero(&q->q_usage_counter),
160 timeout);
161 }
162 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
163
164 /*
165 * Guarantee no request is in use, so we can change any data structure of
166 * the queue afterward.
167 */
168 void blk_freeze_queue(struct request_queue *q)
169 {
170 /*
171 * In the !blk_mq case we are only calling this to kill the
172 * q_usage_counter, otherwise this increases the freeze depth
173 * and waits for it to return to zero. For this reason there is
174 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
175 * exported to drivers as the only user for unfreeze is blk_mq.
176 */
177 blk_freeze_queue_start(q);
178 blk_mq_freeze_queue_wait(q);
179 }
180
181 void blk_mq_freeze_queue(struct request_queue *q)
182 {
183 /*
184 * ...just an alias to keep freeze and unfreeze actions balanced
185 * in the blk_mq_* namespace
186 */
187 blk_freeze_queue(q);
188 }
189 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
190
191 void blk_mq_unfreeze_queue(struct request_queue *q)
192 {
193 mutex_lock(&q->mq_freeze_lock);
194 q->mq_freeze_depth--;
195 WARN_ON_ONCE(q->mq_freeze_depth < 0);
196 if (!q->mq_freeze_depth) {
197 percpu_ref_resurrect(&q->q_usage_counter);
198 wake_up_all(&q->mq_freeze_wq);
199 }
200 mutex_unlock(&q->mq_freeze_lock);
201 }
202 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
203
204 /*
205 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
206 * mpt3sas driver such that this function can be removed.
207 */
208 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
209 {
210 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
211 }
212 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
213
214 /**
215 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
216 * @q: request queue.
217 *
218 * Note: this function does not prevent that the struct request end_io()
219 * callback function is invoked. Once this function is returned, we make
220 * sure no dispatch can happen until the queue is unquiesced via
221 * blk_mq_unquiesce_queue().
222 */
223 void blk_mq_quiesce_queue(struct request_queue *q)
224 {
225 struct blk_mq_hw_ctx *hctx;
226 unsigned int i;
227 bool rcu = false;
228
229 blk_mq_quiesce_queue_nowait(q);
230
231 queue_for_each_hw_ctx(q, hctx, i) {
232 if (hctx->flags & BLK_MQ_F_BLOCKING)
233 synchronize_srcu(hctx->srcu);
234 else
235 rcu = true;
236 }
237 if (rcu)
238 synchronize_rcu();
239 }
240 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
241
242 /*
243 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
244 * @q: request queue.
245 *
246 * This function recovers queue into the state before quiescing
247 * which is done by blk_mq_quiesce_queue.
248 */
249 void blk_mq_unquiesce_queue(struct request_queue *q)
250 {
251 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
252
253 /* dispatch requests which are inserted during quiescing */
254 blk_mq_run_hw_queues(q, true);
255 }
256 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
257
258 void blk_mq_wake_waiters(struct request_queue *q)
259 {
260 struct blk_mq_hw_ctx *hctx;
261 unsigned int i;
262
263 queue_for_each_hw_ctx(q, hctx, i)
264 if (blk_mq_hw_queue_mapped(hctx))
265 blk_mq_tag_wakeup_all(hctx->tags, true);
266 }
267
268 /*
269 * Only need start/end time stamping if we have iostat or
270 * blk stats enabled, or using an IO scheduler.
271 */
272 static inline bool blk_mq_need_time_stamp(struct request *rq)
273 {
274 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
275 }
276
277 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
278 unsigned int tag, u64 alloc_time_ns)
279 {
280 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
281 struct request *rq = tags->static_rqs[tag];
282
283 if (data->q->elevator) {
284 rq->tag = BLK_MQ_NO_TAG;
285 rq->internal_tag = tag;
286 } else {
287 rq->tag = tag;
288 rq->internal_tag = BLK_MQ_NO_TAG;
289 }
290
291 /* csd/requeue_work/fifo_time is initialized before use */
292 rq->q = data->q;
293 rq->mq_ctx = data->ctx;
294 rq->mq_hctx = data->hctx;
295 rq->rq_flags = 0;
296 rq->cmd_flags = data->cmd_flags;
297 if (data->flags & BLK_MQ_REQ_PM)
298 rq->rq_flags |= RQF_PM;
299 if (blk_queue_io_stat(data->q))
300 rq->rq_flags |= RQF_IO_STAT;
301 INIT_LIST_HEAD(&rq->queuelist);
302 INIT_HLIST_NODE(&rq->hash);
303 RB_CLEAR_NODE(&rq->rb_node);
304 rq->rq_disk = NULL;
305 rq->part = NULL;
306 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
307 rq->alloc_time_ns = alloc_time_ns;
308 #endif
309 if (blk_mq_need_time_stamp(rq))
310 rq->start_time_ns = ktime_get_ns();
311 else
312 rq->start_time_ns = 0;
313 rq->io_start_time_ns = 0;
314 rq->stats_sectors = 0;
315 rq->nr_phys_segments = 0;
316 #if defined(CONFIG_BLK_DEV_INTEGRITY)
317 rq->nr_integrity_segments = 0;
318 #endif
319 blk_crypto_rq_set_defaults(rq);
320 /* tag was already set */
321 WRITE_ONCE(rq->deadline, 0);
322
323 rq->timeout = 0;
324
325 rq->end_io = NULL;
326 rq->end_io_data = NULL;
327
328 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
329 refcount_set(&rq->ref, 1);
330
331 if (!op_is_flush(data->cmd_flags)) {
332 struct elevator_queue *e = data->q->elevator;
333
334 rq->elv.icq = NULL;
335 if (e && e->type->ops.prepare_request) {
336 if (e->type->icq_cache)
337 blk_mq_sched_assign_ioc(rq);
338
339 e->type->ops.prepare_request(rq);
340 rq->rq_flags |= RQF_ELVPRIV;
341 }
342 }
343
344 data->hctx->queued++;
345 return rq;
346 }
347
348 static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
349 {
350 struct request_queue *q = data->q;
351 struct elevator_queue *e = q->elevator;
352 u64 alloc_time_ns = 0;
353 unsigned int tag;
354
355 /* alloc_time includes depth and tag waits */
356 if (blk_queue_rq_alloc_time(q))
357 alloc_time_ns = ktime_get_ns();
358
359 if (data->cmd_flags & REQ_NOWAIT)
360 data->flags |= BLK_MQ_REQ_NOWAIT;
361
362 if (e) {
363 /*
364 * Flush requests are special and go directly to the
365 * dispatch list. Don't include reserved tags in the
366 * limiting, as it isn't useful.
367 */
368 if (!op_is_flush(data->cmd_flags) &&
369 e->type->ops.limit_depth &&
370 !(data->flags & BLK_MQ_REQ_RESERVED))
371 e->type->ops.limit_depth(data->cmd_flags, data);
372 }
373
374 retry:
375 data->ctx = blk_mq_get_ctx(q);
376 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
377 if (!e)
378 blk_mq_tag_busy(data->hctx);
379
380 /*
381 * Waiting allocations only fail because of an inactive hctx. In that
382 * case just retry the hctx assignment and tag allocation as CPU hotplug
383 * should have migrated us to an online CPU by now.
384 */
385 tag = blk_mq_get_tag(data);
386 if (tag == BLK_MQ_NO_TAG) {
387 if (data->flags & BLK_MQ_REQ_NOWAIT)
388 return NULL;
389
390 /*
391 * Give up the CPU and sleep for a random short time to ensure
392 * that thread using a realtime scheduling class are migrated
393 * off the CPU, and thus off the hctx that is going away.
394 */
395 msleep(3);
396 goto retry;
397 }
398 return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
399 }
400
401 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
402 blk_mq_req_flags_t flags)
403 {
404 struct blk_mq_alloc_data data = {
405 .q = q,
406 .flags = flags,
407 .cmd_flags = op,
408 };
409 struct request *rq;
410 int ret;
411
412 ret = blk_queue_enter(q, flags);
413 if (ret)
414 return ERR_PTR(ret);
415
416 rq = __blk_mq_alloc_request(&data);
417 if (!rq)
418 goto out_queue_exit;
419 rq->__data_len = 0;
420 rq->__sector = (sector_t) -1;
421 rq->bio = rq->biotail = NULL;
422 return rq;
423 out_queue_exit:
424 blk_queue_exit(q);
425 return ERR_PTR(-EWOULDBLOCK);
426 }
427 EXPORT_SYMBOL(blk_mq_alloc_request);
428
429 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
430 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
431 {
432 struct blk_mq_alloc_data data = {
433 .q = q,
434 .flags = flags,
435 .cmd_flags = op,
436 };
437 u64 alloc_time_ns = 0;
438 unsigned int cpu;
439 unsigned int tag;
440 int ret;
441
442 /* alloc_time includes depth and tag waits */
443 if (blk_queue_rq_alloc_time(q))
444 alloc_time_ns = ktime_get_ns();
445
446 /*
447 * If the tag allocator sleeps we could get an allocation for a
448 * different hardware context. No need to complicate the low level
449 * allocator for this for the rare use case of a command tied to
450 * a specific queue.
451 */
452 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
453 return ERR_PTR(-EINVAL);
454
455 if (hctx_idx >= q->nr_hw_queues)
456 return ERR_PTR(-EIO);
457
458 ret = blk_queue_enter(q, flags);
459 if (ret)
460 return ERR_PTR(ret);
461
462 /*
463 * Check if the hardware context is actually mapped to anything.
464 * If not tell the caller that it should skip this queue.
465 */
466 ret = -EXDEV;
467 data.hctx = q->queue_hw_ctx[hctx_idx];
468 if (!blk_mq_hw_queue_mapped(data.hctx))
469 goto out_queue_exit;
470 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
471 data.ctx = __blk_mq_get_ctx(q, cpu);
472
473 if (!q->elevator)
474 blk_mq_tag_busy(data.hctx);
475
476 ret = -EWOULDBLOCK;
477 tag = blk_mq_get_tag(&data);
478 if (tag == BLK_MQ_NO_TAG)
479 goto out_queue_exit;
480 return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
481
482 out_queue_exit:
483 blk_queue_exit(q);
484 return ERR_PTR(ret);
485 }
486 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
487
488 static void __blk_mq_free_request(struct request *rq)
489 {
490 struct request_queue *q = rq->q;
491 struct blk_mq_ctx *ctx = rq->mq_ctx;
492 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
493 const int sched_tag = rq->internal_tag;
494
495 blk_crypto_free_request(rq);
496 blk_pm_mark_last_busy(rq);
497 rq->mq_hctx = NULL;
498 if (rq->tag != BLK_MQ_NO_TAG)
499 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
500 if (sched_tag != BLK_MQ_NO_TAG)
501 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
502 blk_mq_sched_restart(hctx);
503 blk_queue_exit(q);
504 }
505
506 void blk_mq_free_request(struct request *rq)
507 {
508 struct request_queue *q = rq->q;
509 struct elevator_queue *e = q->elevator;
510 struct blk_mq_ctx *ctx = rq->mq_ctx;
511 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
512
513 if (rq->rq_flags & RQF_ELVPRIV) {
514 if (e && e->type->ops.finish_request)
515 e->type->ops.finish_request(rq);
516 if (rq->elv.icq) {
517 put_io_context(rq->elv.icq->ioc);
518 rq->elv.icq = NULL;
519 }
520 }
521
522 ctx->rq_completed[rq_is_sync(rq)]++;
523 if (rq->rq_flags & RQF_MQ_INFLIGHT)
524 __blk_mq_dec_active_requests(hctx);
525
526 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
527 laptop_io_completion(q->backing_dev_info);
528
529 rq_qos_done(q, rq);
530
531 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
532 if (refcount_dec_and_test(&rq->ref))
533 __blk_mq_free_request(rq);
534 }
535 EXPORT_SYMBOL_GPL(blk_mq_free_request);
536
537 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
538 {
539 u64 now = 0;
540
541 if (blk_mq_need_time_stamp(rq))
542 now = ktime_get_ns();
543
544 if (rq->rq_flags & RQF_STATS) {
545 blk_mq_poll_stats_start(rq->q);
546 blk_stat_add(rq, now);
547 }
548
549 blk_mq_sched_completed_request(rq, now);
550
551 blk_account_io_done(rq, now);
552
553 if (rq->end_io) {
554 rq_qos_done(rq->q, rq);
555 rq->end_io(rq, error);
556 } else {
557 blk_mq_free_request(rq);
558 }
559 }
560 EXPORT_SYMBOL(__blk_mq_end_request);
561
562 void blk_mq_end_request(struct request *rq, blk_status_t error)
563 {
564 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
565 BUG();
566 __blk_mq_end_request(rq, error);
567 }
568 EXPORT_SYMBOL(blk_mq_end_request);
569
570 /*
571 * Softirq action handler - move entries to local list and loop over them
572 * while passing them to the queue registered handler.
573 */
574 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
575 {
576 struct list_head *cpu_list, local_list;
577
578 local_irq_disable();
579 cpu_list = this_cpu_ptr(&blk_cpu_done);
580 list_replace_init(cpu_list, &local_list);
581 local_irq_enable();
582
583 while (!list_empty(&local_list)) {
584 struct request *rq;
585
586 rq = list_entry(local_list.next, struct request, ipi_list);
587 list_del_init(&rq->ipi_list);
588 rq->q->mq_ops->complete(rq);
589 }
590 }
591
592 static void blk_mq_trigger_softirq(struct request *rq)
593 {
594 struct list_head *list;
595 unsigned long flags;
596
597 local_irq_save(flags);
598 list = this_cpu_ptr(&blk_cpu_done);
599 list_add_tail(&rq->ipi_list, list);
600
601 /*
602 * If the list only contains our just added request, signal a raise of
603 * the softirq. If there are already entries there, someone already
604 * raised the irq but it hasn't run yet.
605 */
606 if (list->next == &rq->ipi_list)
607 raise_softirq_irqoff(BLOCK_SOFTIRQ);
608 local_irq_restore(flags);
609 }
610
611 static int blk_softirq_cpu_dead(unsigned int cpu)
612 {
613 /*
614 * If a CPU goes away, splice its entries to the current CPU
615 * and trigger a run of the softirq
616 */
617 local_irq_disable();
618 list_splice_init(&per_cpu(blk_cpu_done, cpu),
619 this_cpu_ptr(&blk_cpu_done));
620 raise_softirq_irqoff(BLOCK_SOFTIRQ);
621 local_irq_enable();
622
623 return 0;
624 }
625
626
627 static void __blk_mq_complete_request_remote(void *data)
628 {
629 struct request *rq = data;
630
631 /*
632 * For most of single queue controllers, there is only one irq vector
633 * for handling I/O completion, and the only irq's affinity is set
634 * to all possible CPUs. On most of ARCHs, this affinity means the irq
635 * is handled on one specific CPU.
636 *
637 * So complete I/O requests in softirq context in case of single queue
638 * devices to avoid degrading I/O performance due to irqsoff latency.
639 */
640 if (rq->q->nr_hw_queues == 1)
641 blk_mq_trigger_softirq(rq);
642 else
643 rq->q->mq_ops->complete(rq);
644 }
645
646 static inline bool blk_mq_complete_need_ipi(struct request *rq)
647 {
648 int cpu = raw_smp_processor_id();
649
650 if (!IS_ENABLED(CONFIG_SMP) ||
651 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
652 return false;
653 /*
654 * With force threaded interrupts enabled, raising softirq from an SMP
655 * function call will always result in waking the ksoftirqd thread.
656 * This is probably worse than completing the request on a different
657 * cache domain.
658 */
659 if (force_irqthreads)
660 return false;
661
662 /* same CPU or cache domain? Complete locally */
663 if (cpu == rq->mq_ctx->cpu ||
664 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
665 cpus_share_cache(cpu, rq->mq_ctx->cpu)))
666 return false;
667
668 /* don't try to IPI to an offline CPU */
669 return cpu_online(rq->mq_ctx->cpu);
670 }
671
672 bool blk_mq_complete_request_remote(struct request *rq)
673 {
674 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
675
676 /*
677 * For a polled request, always complete locallly, it's pointless
678 * to redirect the completion.
679 */
680 if (rq->cmd_flags & REQ_HIPRI)
681 return false;
682
683 if (blk_mq_complete_need_ipi(rq)) {
684 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
685 smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
686 } else {
687 if (rq->q->nr_hw_queues > 1)
688 return false;
689 blk_mq_trigger_softirq(rq);
690 }
691
692 return true;
693 }
694 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
695
696 /**
697 * blk_mq_complete_request - end I/O on a request
698 * @rq: the request being processed
699 *
700 * Description:
701 * Complete a request by scheduling the ->complete_rq operation.
702 **/
703 void blk_mq_complete_request(struct request *rq)
704 {
705 if (!blk_mq_complete_request_remote(rq))
706 rq->q->mq_ops->complete(rq);
707 }
708 EXPORT_SYMBOL(blk_mq_complete_request);
709
710 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
711 __releases(hctx->srcu)
712 {
713 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
714 rcu_read_unlock();
715 else
716 srcu_read_unlock(hctx->srcu, srcu_idx);
717 }
718
719 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
720 __acquires(hctx->srcu)
721 {
722 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
723 /* shut up gcc false positive */
724 *srcu_idx = 0;
725 rcu_read_lock();
726 } else
727 *srcu_idx = srcu_read_lock(hctx->srcu);
728 }
729
730 /**
731 * blk_mq_start_request - Start processing a request
732 * @rq: Pointer to request to be started
733 *
734 * Function used by device drivers to notify the block layer that a request
735 * is going to be processed now, so blk layer can do proper initializations
736 * such as starting the timeout timer.
737 */
738 void blk_mq_start_request(struct request *rq)
739 {
740 struct request_queue *q = rq->q;
741
742 trace_block_rq_issue(rq);
743
744 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
745 rq->io_start_time_ns = ktime_get_ns();
746 rq->stats_sectors = blk_rq_sectors(rq);
747 rq->rq_flags |= RQF_STATS;
748 rq_qos_issue(q, rq);
749 }
750
751 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
752
753 blk_add_timer(rq);
754 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
755
756 #ifdef CONFIG_BLK_DEV_INTEGRITY
757 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
758 q->integrity.profile->prepare_fn(rq);
759 #endif
760 }
761 EXPORT_SYMBOL(blk_mq_start_request);
762
763 static void __blk_mq_requeue_request(struct request *rq)
764 {
765 struct request_queue *q = rq->q;
766
767 blk_mq_put_driver_tag(rq);
768
769 trace_block_rq_requeue(rq);
770 rq_qos_requeue(q, rq);
771
772 if (blk_mq_request_started(rq)) {
773 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
774 rq->rq_flags &= ~RQF_TIMED_OUT;
775 }
776 }
777
778 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
779 {
780 __blk_mq_requeue_request(rq);
781
782 /* this request will be re-inserted to io scheduler queue */
783 blk_mq_sched_requeue_request(rq);
784
785 BUG_ON(!list_empty(&rq->queuelist));
786 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
787 }
788 EXPORT_SYMBOL(blk_mq_requeue_request);
789
790 static void blk_mq_requeue_work(struct work_struct *work)
791 {
792 struct request_queue *q =
793 container_of(work, struct request_queue, requeue_work.work);
794 LIST_HEAD(rq_list);
795 struct request *rq, *next;
796
797 spin_lock_irq(&q->requeue_lock);
798 list_splice_init(&q->requeue_list, &rq_list);
799 spin_unlock_irq(&q->requeue_lock);
800
801 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
802 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
803 continue;
804
805 rq->rq_flags &= ~RQF_SOFTBARRIER;
806 list_del_init(&rq->queuelist);
807 /*
808 * If RQF_DONTPREP, rq has contained some driver specific
809 * data, so insert it to hctx dispatch list to avoid any
810 * merge.
811 */
812 if (rq->rq_flags & RQF_DONTPREP)
813 blk_mq_request_bypass_insert(rq, false, false);
814 else
815 blk_mq_sched_insert_request(rq, true, false, false);
816 }
817
818 while (!list_empty(&rq_list)) {
819 rq = list_entry(rq_list.next, struct request, queuelist);
820 list_del_init(&rq->queuelist);
821 blk_mq_sched_insert_request(rq, false, false, false);
822 }
823
824 blk_mq_run_hw_queues(q, false);
825 }
826
827 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
828 bool kick_requeue_list)
829 {
830 struct request_queue *q = rq->q;
831 unsigned long flags;
832
833 /*
834 * We abuse this flag that is otherwise used by the I/O scheduler to
835 * request head insertion from the workqueue.
836 */
837 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
838
839 spin_lock_irqsave(&q->requeue_lock, flags);
840 if (at_head) {
841 rq->rq_flags |= RQF_SOFTBARRIER;
842 list_add(&rq->queuelist, &q->requeue_list);
843 } else {
844 list_add_tail(&rq->queuelist, &q->requeue_list);
845 }
846 spin_unlock_irqrestore(&q->requeue_lock, flags);
847
848 if (kick_requeue_list)
849 blk_mq_kick_requeue_list(q);
850 }
851
852 void blk_mq_kick_requeue_list(struct request_queue *q)
853 {
854 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
855 }
856 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
857
858 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
859 unsigned long msecs)
860 {
861 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
862 msecs_to_jiffies(msecs));
863 }
864 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
865
866 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
867 {
868 if (tag < tags->nr_tags) {
869 prefetch(tags->rqs[tag]);
870 return tags->rqs[tag];
871 }
872
873 return NULL;
874 }
875 EXPORT_SYMBOL(blk_mq_tag_to_rq);
876
877 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
878 void *priv, bool reserved)
879 {
880 /*
881 * If we find a request that isn't idle and the queue matches,
882 * we know the queue is busy. Return false to stop the iteration.
883 */
884 if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
885 bool *busy = priv;
886
887 *busy = true;
888 return false;
889 }
890
891 return true;
892 }
893
894 bool blk_mq_queue_inflight(struct request_queue *q)
895 {
896 bool busy = false;
897
898 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
899 return busy;
900 }
901 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
902
903 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
904 {
905 req->rq_flags |= RQF_TIMED_OUT;
906 if (req->q->mq_ops->timeout) {
907 enum blk_eh_timer_return ret;
908
909 ret = req->q->mq_ops->timeout(req, reserved);
910 if (ret == BLK_EH_DONE)
911 return;
912 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
913 }
914
915 blk_add_timer(req);
916 }
917
918 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
919 {
920 unsigned long deadline;
921
922 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
923 return false;
924 if (rq->rq_flags & RQF_TIMED_OUT)
925 return false;
926
927 deadline = READ_ONCE(rq->deadline);
928 if (time_after_eq(jiffies, deadline))
929 return true;
930
931 if (*next == 0)
932 *next = deadline;
933 else if (time_after(*next, deadline))
934 *next = deadline;
935 return false;
936 }
937
938 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
939 struct request *rq, void *priv, bool reserved)
940 {
941 unsigned long *next = priv;
942
943 /*
944 * Just do a quick check if it is expired before locking the request in
945 * so we're not unnecessarilly synchronizing across CPUs.
946 */
947 if (!blk_mq_req_expired(rq, next))
948 return true;
949
950 /*
951 * We have reason to believe the request may be expired. Take a
952 * reference on the request to lock this request lifetime into its
953 * currently allocated context to prevent it from being reallocated in
954 * the event the completion by-passes this timeout handler.
955 *
956 * If the reference was already released, then the driver beat the
957 * timeout handler to posting a natural completion.
958 */
959 if (!refcount_inc_not_zero(&rq->ref))
960 return true;
961
962 /*
963 * The request is now locked and cannot be reallocated underneath the
964 * timeout handler's processing. Re-verify this exact request is truly
965 * expired; if it is not expired, then the request was completed and
966 * reallocated as a new request.
967 */
968 if (blk_mq_req_expired(rq, next))
969 blk_mq_rq_timed_out(rq, reserved);
970
971 if (is_flush_rq(rq, hctx))
972 rq->end_io(rq, 0);
973 else if (refcount_dec_and_test(&rq->ref))
974 __blk_mq_free_request(rq);
975
976 return true;
977 }
978
979 static void blk_mq_timeout_work(struct work_struct *work)
980 {
981 struct request_queue *q =
982 container_of(work, struct request_queue, timeout_work);
983 unsigned long next = 0;
984 struct blk_mq_hw_ctx *hctx;
985 int i;
986
987 /* A deadlock might occur if a request is stuck requiring a
988 * timeout at the same time a queue freeze is waiting
989 * completion, since the timeout code would not be able to
990 * acquire the queue reference here.
991 *
992 * That's why we don't use blk_queue_enter here; instead, we use
993 * percpu_ref_tryget directly, because we need to be able to
994 * obtain a reference even in the short window between the queue
995 * starting to freeze, by dropping the first reference in
996 * blk_freeze_queue_start, and the moment the last request is
997 * consumed, marked by the instant q_usage_counter reaches
998 * zero.
999 */
1000 if (!percpu_ref_tryget(&q->q_usage_counter))
1001 return;
1002
1003 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
1004
1005 if (next != 0) {
1006 mod_timer(&q->timeout, next);
1007 } else {
1008 /*
1009 * Request timeouts are handled as a forward rolling timer. If
1010 * we end up here it means that no requests are pending and
1011 * also that no request has been pending for a while. Mark
1012 * each hctx as idle.
1013 */
1014 queue_for_each_hw_ctx(q, hctx, i) {
1015 /* the hctx may be unmapped, so check it here */
1016 if (blk_mq_hw_queue_mapped(hctx))
1017 blk_mq_tag_idle(hctx);
1018 }
1019 }
1020 blk_queue_exit(q);
1021 }
1022
1023 struct flush_busy_ctx_data {
1024 struct blk_mq_hw_ctx *hctx;
1025 struct list_head *list;
1026 };
1027
1028 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1029 {
1030 struct flush_busy_ctx_data *flush_data = data;
1031 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1032 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1033 enum hctx_type type = hctx->type;
1034
1035 spin_lock(&ctx->lock);
1036 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1037 sbitmap_clear_bit(sb, bitnr);
1038 spin_unlock(&ctx->lock);
1039 return true;
1040 }
1041
1042 /*
1043 * Process software queues that have been marked busy, splicing them
1044 * to the for-dispatch
1045 */
1046 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1047 {
1048 struct flush_busy_ctx_data data = {
1049 .hctx = hctx,
1050 .list = list,
1051 };
1052
1053 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1054 }
1055 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1056
1057 struct dispatch_rq_data {
1058 struct blk_mq_hw_ctx *hctx;
1059 struct request *rq;
1060 };
1061
1062 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1063 void *data)
1064 {
1065 struct dispatch_rq_data *dispatch_data = data;
1066 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1067 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1068 enum hctx_type type = hctx->type;
1069
1070 spin_lock(&ctx->lock);
1071 if (!list_empty(&ctx->rq_lists[type])) {
1072 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1073 list_del_init(&dispatch_data->rq->queuelist);
1074 if (list_empty(&ctx->rq_lists[type]))
1075 sbitmap_clear_bit(sb, bitnr);
1076 }
1077 spin_unlock(&ctx->lock);
1078
1079 return !dispatch_data->rq;
1080 }
1081
1082 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1083 struct blk_mq_ctx *start)
1084 {
1085 unsigned off = start ? start->index_hw[hctx->type] : 0;
1086 struct dispatch_rq_data data = {
1087 .hctx = hctx,
1088 .rq = NULL,
1089 };
1090
1091 __sbitmap_for_each_set(&hctx->ctx_map, off,
1092 dispatch_rq_from_ctx, &data);
1093
1094 return data.rq;
1095 }
1096
1097 static inline unsigned int queued_to_index(unsigned int queued)
1098 {
1099 if (!queued)
1100 return 0;
1101
1102 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1103 }
1104
1105 static bool __blk_mq_get_driver_tag(struct request *rq)
1106 {
1107 struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags;
1108 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1109 int tag;
1110
1111 blk_mq_tag_busy(rq->mq_hctx);
1112
1113 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1114 bt = rq->mq_hctx->tags->breserved_tags;
1115 tag_offset = 0;
1116 } else {
1117 if (!hctx_may_queue(rq->mq_hctx, bt))
1118 return false;
1119 }
1120
1121 tag = __sbitmap_queue_get(bt);
1122 if (tag == BLK_MQ_NO_TAG)
1123 return false;
1124
1125 rq->tag = tag + tag_offset;
1126 return true;
1127 }
1128
1129 static bool blk_mq_get_driver_tag(struct request *rq)
1130 {
1131 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1132
1133 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
1134 return false;
1135
1136 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1137 !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1138 rq->rq_flags |= RQF_MQ_INFLIGHT;
1139 __blk_mq_inc_active_requests(hctx);
1140 }
1141 hctx->tags->rqs[rq->tag] = rq;
1142 return true;
1143 }
1144
1145 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1146 int flags, void *key)
1147 {
1148 struct blk_mq_hw_ctx *hctx;
1149
1150 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1151
1152 spin_lock(&hctx->dispatch_wait_lock);
1153 if (!list_empty(&wait->entry)) {
1154 struct sbitmap_queue *sbq;
1155
1156 list_del_init(&wait->entry);
1157 sbq = hctx->tags->bitmap_tags;
1158 atomic_dec(&sbq->ws_active);
1159 }
1160 spin_unlock(&hctx->dispatch_wait_lock);
1161
1162 blk_mq_run_hw_queue(hctx, true);
1163 return 1;
1164 }
1165
1166 /*
1167 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1168 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1169 * restart. For both cases, take care to check the condition again after
1170 * marking us as waiting.
1171 */
1172 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1173 struct request *rq)
1174 {
1175 struct sbitmap_queue *sbq = hctx->tags->bitmap_tags;
1176 struct wait_queue_head *wq;
1177 wait_queue_entry_t *wait;
1178 bool ret;
1179
1180 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1181 blk_mq_sched_mark_restart_hctx(hctx);
1182
1183 /*
1184 * It's possible that a tag was freed in the window between the
1185 * allocation failure and adding the hardware queue to the wait
1186 * queue.
1187 *
1188 * Don't clear RESTART here, someone else could have set it.
1189 * At most this will cost an extra queue run.
1190 */
1191 return blk_mq_get_driver_tag(rq);
1192 }
1193
1194 wait = &hctx->dispatch_wait;
1195 if (!list_empty_careful(&wait->entry))
1196 return false;
1197
1198 wq = &bt_wait_ptr(sbq, hctx)->wait;
1199
1200 spin_lock_irq(&wq->lock);
1201 spin_lock(&hctx->dispatch_wait_lock);
1202 if (!list_empty(&wait->entry)) {
1203 spin_unlock(&hctx->dispatch_wait_lock);
1204 spin_unlock_irq(&wq->lock);
1205 return false;
1206 }
1207
1208 atomic_inc(&sbq->ws_active);
1209 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1210 __add_wait_queue(wq, wait);
1211
1212 /*
1213 * It's possible that a tag was freed in the window between the
1214 * allocation failure and adding the hardware queue to the wait
1215 * queue.
1216 */
1217 ret = blk_mq_get_driver_tag(rq);
1218 if (!ret) {
1219 spin_unlock(&hctx->dispatch_wait_lock);
1220 spin_unlock_irq(&wq->lock);
1221 return false;
1222 }
1223
1224 /*
1225 * We got a tag, remove ourselves from the wait queue to ensure
1226 * someone else gets the wakeup.
1227 */
1228 list_del_init(&wait->entry);
1229 atomic_dec(&sbq->ws_active);
1230 spin_unlock(&hctx->dispatch_wait_lock);
1231 spin_unlock_irq(&wq->lock);
1232
1233 return true;
1234 }
1235
1236 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
1237 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
1238 /*
1239 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1240 * - EWMA is one simple way to compute running average value
1241 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1242 * - take 4 as factor for avoiding to get too small(0) result, and this
1243 * factor doesn't matter because EWMA decreases exponentially
1244 */
1245 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1246 {
1247 unsigned int ewma;
1248
1249 if (hctx->queue->elevator)
1250 return;
1251
1252 ewma = hctx->dispatch_busy;
1253
1254 if (!ewma && !busy)
1255 return;
1256
1257 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1258 if (busy)
1259 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1260 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1261
1262 hctx->dispatch_busy = ewma;
1263 }
1264
1265 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1266
1267 static void blk_mq_handle_dev_resource(struct request *rq,
1268 struct list_head *list)
1269 {
1270 struct request *next =
1271 list_first_entry_or_null(list, struct request, queuelist);
1272
1273 /*
1274 * If an I/O scheduler has been configured and we got a driver tag for
1275 * the next request already, free it.
1276 */
1277 if (next)
1278 blk_mq_put_driver_tag(next);
1279
1280 list_add(&rq->queuelist, list);
1281 __blk_mq_requeue_request(rq);
1282 }
1283
1284 static void blk_mq_handle_zone_resource(struct request *rq,
1285 struct list_head *zone_list)
1286 {
1287 /*
1288 * If we end up here it is because we cannot dispatch a request to a
1289 * specific zone due to LLD level zone-write locking or other zone
1290 * related resource not being available. In this case, set the request
1291 * aside in zone_list for retrying it later.
1292 */
1293 list_add(&rq->queuelist, zone_list);
1294 __blk_mq_requeue_request(rq);
1295 }
1296
1297 enum prep_dispatch {
1298 PREP_DISPATCH_OK,
1299 PREP_DISPATCH_NO_TAG,
1300 PREP_DISPATCH_NO_BUDGET,
1301 };
1302
1303 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1304 bool need_budget)
1305 {
1306 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1307
1308 if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) {
1309 blk_mq_put_driver_tag(rq);
1310 return PREP_DISPATCH_NO_BUDGET;
1311 }
1312
1313 if (!blk_mq_get_driver_tag(rq)) {
1314 /*
1315 * The initial allocation attempt failed, so we need to
1316 * rerun the hardware queue when a tag is freed. The
1317 * waitqueue takes care of that. If the queue is run
1318 * before we add this entry back on the dispatch list,
1319 * we'll re-run it below.
1320 */
1321 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1322 /*
1323 * All budgets not got from this function will be put
1324 * together during handling partial dispatch
1325 */
1326 if (need_budget)
1327 blk_mq_put_dispatch_budget(rq->q);
1328 return PREP_DISPATCH_NO_TAG;
1329 }
1330 }
1331
1332 return PREP_DISPATCH_OK;
1333 }
1334
1335 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1336 static void blk_mq_release_budgets(struct request_queue *q,
1337 unsigned int nr_budgets)
1338 {
1339 int i;
1340
1341 for (i = 0; i < nr_budgets; i++)
1342 blk_mq_put_dispatch_budget(q);
1343 }
1344
1345 /*
1346 * Returns true if we did some work AND can potentially do more.
1347 */
1348 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1349 unsigned int nr_budgets)
1350 {
1351 enum prep_dispatch prep;
1352 struct request_queue *q = hctx->queue;
1353 struct request *rq, *nxt;
1354 int errors, queued;
1355 blk_status_t ret = BLK_STS_OK;
1356 LIST_HEAD(zone_list);
1357
1358 if (list_empty(list))
1359 return false;
1360
1361 /*
1362 * Now process all the entries, sending them to the driver.
1363 */
1364 errors = queued = 0;
1365 do {
1366 struct blk_mq_queue_data bd;
1367
1368 rq = list_first_entry(list, struct request, queuelist);
1369
1370 WARN_ON_ONCE(hctx != rq->mq_hctx);
1371 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1372 if (prep != PREP_DISPATCH_OK)
1373 break;
1374
1375 list_del_init(&rq->queuelist);
1376
1377 bd.rq = rq;
1378
1379 /*
1380 * Flag last if we have no more requests, or if we have more
1381 * but can't assign a driver tag to it.
1382 */
1383 if (list_empty(list))
1384 bd.last = true;
1385 else {
1386 nxt = list_first_entry(list, struct request, queuelist);
1387 bd.last = !blk_mq_get_driver_tag(nxt);
1388 }
1389
1390 /*
1391 * once the request is queued to lld, no need to cover the
1392 * budget any more
1393 */
1394 if (nr_budgets)
1395 nr_budgets--;
1396 ret = q->mq_ops->queue_rq(hctx, &bd);
1397 switch (ret) {
1398 case BLK_STS_OK:
1399 queued++;
1400 break;
1401 case BLK_STS_RESOURCE:
1402 case BLK_STS_DEV_RESOURCE:
1403 blk_mq_handle_dev_resource(rq, list);
1404 goto out;
1405 case BLK_STS_ZONE_RESOURCE:
1406 /*
1407 * Move the request to zone_list and keep going through
1408 * the dispatch list to find more requests the drive can
1409 * accept.
1410 */
1411 blk_mq_handle_zone_resource(rq, &zone_list);
1412 break;
1413 default:
1414 errors++;
1415 blk_mq_end_request(rq, ret);
1416 }
1417 } while (!list_empty(list));
1418 out:
1419 if (!list_empty(&zone_list))
1420 list_splice_tail_init(&zone_list, list);
1421
1422 hctx->dispatched[queued_to_index(queued)]++;
1423
1424 /* If we didn't flush the entire list, we could have told the driver
1425 * there was more coming, but that turned out to be a lie.
1426 */
1427 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
1428 q->mq_ops->commit_rqs(hctx);
1429 /*
1430 * Any items that need requeuing? Stuff them into hctx->dispatch,
1431 * that is where we will continue on next queue run.
1432 */
1433 if (!list_empty(list)) {
1434 bool needs_restart;
1435 /* For non-shared tags, the RESTART check will suffice */
1436 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
1437 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1438 bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
1439
1440 blk_mq_release_budgets(q, nr_budgets);
1441
1442 spin_lock(&hctx->lock);
1443 list_splice_tail_init(list, &hctx->dispatch);
1444 spin_unlock(&hctx->lock);
1445
1446 /*
1447 * Order adding requests to hctx->dispatch and checking
1448 * SCHED_RESTART flag. The pair of this smp_mb() is the one
1449 * in blk_mq_sched_restart(). Avoid restart code path to
1450 * miss the new added requests to hctx->dispatch, meantime
1451 * SCHED_RESTART is observed here.
1452 */
1453 smp_mb();
1454
1455 /*
1456 * If SCHED_RESTART was set by the caller of this function and
1457 * it is no longer set that means that it was cleared by another
1458 * thread and hence that a queue rerun is needed.
1459 *
1460 * If 'no_tag' is set, that means that we failed getting
1461 * a driver tag with an I/O scheduler attached. If our dispatch
1462 * waitqueue is no longer active, ensure that we run the queue
1463 * AFTER adding our entries back to the list.
1464 *
1465 * If no I/O scheduler has been configured it is possible that
1466 * the hardware queue got stopped and restarted before requests
1467 * were pushed back onto the dispatch list. Rerun the queue to
1468 * avoid starvation. Notes:
1469 * - blk_mq_run_hw_queue() checks whether or not a queue has
1470 * been stopped before rerunning a queue.
1471 * - Some but not all block drivers stop a queue before
1472 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1473 * and dm-rq.
1474 *
1475 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1476 * bit is set, run queue after a delay to avoid IO stalls
1477 * that could otherwise occur if the queue is idle. We'll do
1478 * similar if we couldn't get budget and SCHED_RESTART is set.
1479 */
1480 needs_restart = blk_mq_sched_needs_restart(hctx);
1481 if (!needs_restart ||
1482 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1483 blk_mq_run_hw_queue(hctx, true);
1484 else if (needs_restart && (ret == BLK_STS_RESOURCE ||
1485 no_budget_avail))
1486 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1487
1488 blk_mq_update_dispatch_busy(hctx, true);
1489 return false;
1490 } else
1491 blk_mq_update_dispatch_busy(hctx, false);
1492
1493 return (queued + errors) != 0;
1494 }
1495
1496 /**
1497 * __blk_mq_run_hw_queue - Run a hardware queue.
1498 * @hctx: Pointer to the hardware queue to run.
1499 *
1500 * Send pending requests to the hardware.
1501 */
1502 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1503 {
1504 int srcu_idx;
1505
1506 /*
1507 * We can't run the queue inline with ints disabled. Ensure that
1508 * we catch bad users of this early.
1509 */
1510 WARN_ON_ONCE(in_interrupt());
1511
1512 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1513
1514 hctx_lock(hctx, &srcu_idx);
1515 blk_mq_sched_dispatch_requests(hctx);
1516 hctx_unlock(hctx, srcu_idx);
1517 }
1518
1519 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1520 {
1521 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1522
1523 if (cpu >= nr_cpu_ids)
1524 cpu = cpumask_first(hctx->cpumask);
1525 return cpu;
1526 }
1527
1528 /*
1529 * It'd be great if the workqueue API had a way to pass
1530 * in a mask and had some smarts for more clever placement.
1531 * For now we just round-robin here, switching for every
1532 * BLK_MQ_CPU_WORK_BATCH queued items.
1533 */
1534 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1535 {
1536 bool tried = false;
1537 int next_cpu = hctx->next_cpu;
1538
1539 if (hctx->queue->nr_hw_queues == 1)
1540 return WORK_CPU_UNBOUND;
1541
1542 if (--hctx->next_cpu_batch <= 0) {
1543 select_cpu:
1544 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1545 cpu_online_mask);
1546 if (next_cpu >= nr_cpu_ids)
1547 next_cpu = blk_mq_first_mapped_cpu(hctx);
1548 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1549 }
1550
1551 /*
1552 * Do unbound schedule if we can't find a online CPU for this hctx,
1553 * and it should only happen in the path of handling CPU DEAD.
1554 */
1555 if (!cpu_online(next_cpu)) {
1556 if (!tried) {
1557 tried = true;
1558 goto select_cpu;
1559 }
1560
1561 /*
1562 * Make sure to re-select CPU next time once after CPUs
1563 * in hctx->cpumask become online again.
1564 */
1565 hctx->next_cpu = next_cpu;
1566 hctx->next_cpu_batch = 1;
1567 return WORK_CPU_UNBOUND;
1568 }
1569
1570 hctx->next_cpu = next_cpu;
1571 return next_cpu;
1572 }
1573
1574 /**
1575 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
1576 * @hctx: Pointer to the hardware queue to run.
1577 * @async: If we want to run the queue asynchronously.
1578 * @msecs: Milliseconds of delay to wait before running the queue.
1579 *
1580 * If !@async, try to run the queue now. Else, run the queue asynchronously and
1581 * with a delay of @msecs.
1582 */
1583 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1584 unsigned long msecs)
1585 {
1586 if (unlikely(blk_mq_hctx_stopped(hctx)))
1587 return;
1588
1589 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1590 int cpu = get_cpu();
1591 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1592 __blk_mq_run_hw_queue(hctx);
1593 put_cpu();
1594 return;
1595 }
1596
1597 put_cpu();
1598 }
1599
1600 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1601 msecs_to_jiffies(msecs));
1602 }
1603
1604 /**
1605 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
1606 * @hctx: Pointer to the hardware queue to run.
1607 * @msecs: Milliseconds of delay to wait before running the queue.
1608 *
1609 * Run a hardware queue asynchronously with a delay of @msecs.
1610 */
1611 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1612 {
1613 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1614 }
1615 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1616
1617 /**
1618 * blk_mq_run_hw_queue - Start to run a hardware queue.
1619 * @hctx: Pointer to the hardware queue to run.
1620 * @async: If we want to run the queue asynchronously.
1621 *
1622 * Check if the request queue is not in a quiesced state and if there are
1623 * pending requests to be sent. If this is true, run the queue to send requests
1624 * to hardware.
1625 */
1626 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1627 {
1628 int srcu_idx;
1629 bool need_run;
1630
1631 /*
1632 * When queue is quiesced, we may be switching io scheduler, or
1633 * updating nr_hw_queues, or other things, and we can't run queue
1634 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1635 *
1636 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1637 * quiesced.
1638 */
1639 hctx_lock(hctx, &srcu_idx);
1640 need_run = !blk_queue_quiesced(hctx->queue) &&
1641 blk_mq_hctx_has_pending(hctx);
1642 hctx_unlock(hctx, srcu_idx);
1643
1644 if (need_run)
1645 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1646 }
1647 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1648
1649 /*
1650 * Is the request queue handled by an IO scheduler that does not respect
1651 * hardware queues when dispatching?
1652 */
1653 static bool blk_mq_has_sqsched(struct request_queue *q)
1654 {
1655 struct elevator_queue *e = q->elevator;
1656
1657 if (e && e->type->ops.dispatch_request &&
1658 !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
1659 return true;
1660 return false;
1661 }
1662
1663 /*
1664 * Return prefered queue to dispatch from (if any) for non-mq aware IO
1665 * scheduler.
1666 */
1667 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
1668 {
1669 struct blk_mq_hw_ctx *hctx;
1670
1671 /*
1672 * If the IO scheduler does not respect hardware queues when
1673 * dispatching, we just don't bother with multiple HW queues and
1674 * dispatch from hctx for the current CPU since running multiple queues
1675 * just causes lock contention inside the scheduler and pointless cache
1676 * bouncing.
1677 */
1678 hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
1679 raw_smp_processor_id());
1680 if (!blk_mq_hctx_stopped(hctx))
1681 return hctx;
1682 return NULL;
1683 }
1684
1685 /**
1686 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
1687 * @q: Pointer to the request queue to run.
1688 * @async: If we want to run the queue asynchronously.
1689 */
1690 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1691 {
1692 struct blk_mq_hw_ctx *hctx, *sq_hctx;
1693 int i;
1694
1695 sq_hctx = NULL;
1696 if (blk_mq_has_sqsched(q))
1697 sq_hctx = blk_mq_get_sq_hctx(q);
1698 queue_for_each_hw_ctx(q, hctx, i) {
1699 if (blk_mq_hctx_stopped(hctx))
1700 continue;
1701 /*
1702 * Dispatch from this hctx either if there's no hctx preferred
1703 * by IO scheduler or if it has requests that bypass the
1704 * scheduler.
1705 */
1706 if (!sq_hctx || sq_hctx == hctx ||
1707 !list_empty_careful(&hctx->dispatch))
1708 blk_mq_run_hw_queue(hctx, async);
1709 }
1710 }
1711 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1712
1713 /**
1714 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
1715 * @q: Pointer to the request queue to run.
1716 * @msecs: Milliseconds of delay to wait before running the queues.
1717 */
1718 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
1719 {
1720 struct blk_mq_hw_ctx *hctx, *sq_hctx;
1721 int i;
1722
1723 sq_hctx = NULL;
1724 if (blk_mq_has_sqsched(q))
1725 sq_hctx = blk_mq_get_sq_hctx(q);
1726 queue_for_each_hw_ctx(q, hctx, i) {
1727 if (blk_mq_hctx_stopped(hctx))
1728 continue;
1729 /*
1730 * Dispatch from this hctx either if there's no hctx preferred
1731 * by IO scheduler or if it has requests that bypass the
1732 * scheduler.
1733 */
1734 if (!sq_hctx || sq_hctx == hctx ||
1735 !list_empty_careful(&hctx->dispatch))
1736 blk_mq_delay_run_hw_queue(hctx, msecs);
1737 }
1738 }
1739 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
1740
1741 /**
1742 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1743 * @q: request queue.
1744 *
1745 * The caller is responsible for serializing this function against
1746 * blk_mq_{start,stop}_hw_queue().
1747 */
1748 bool blk_mq_queue_stopped(struct request_queue *q)
1749 {
1750 struct blk_mq_hw_ctx *hctx;
1751 int i;
1752
1753 queue_for_each_hw_ctx(q, hctx, i)
1754 if (blk_mq_hctx_stopped(hctx))
1755 return true;
1756
1757 return false;
1758 }
1759 EXPORT_SYMBOL(blk_mq_queue_stopped);
1760
1761 /*
1762 * This function is often used for pausing .queue_rq() by driver when
1763 * there isn't enough resource or some conditions aren't satisfied, and
1764 * BLK_STS_RESOURCE is usually returned.
1765 *
1766 * We do not guarantee that dispatch can be drained or blocked
1767 * after blk_mq_stop_hw_queue() returns. Please use
1768 * blk_mq_quiesce_queue() for that requirement.
1769 */
1770 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1771 {
1772 cancel_delayed_work(&hctx->run_work);
1773
1774 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1775 }
1776 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1777
1778 /*
1779 * This function is often used for pausing .queue_rq() by driver when
1780 * there isn't enough resource or some conditions aren't satisfied, and
1781 * BLK_STS_RESOURCE is usually returned.
1782 *
1783 * We do not guarantee that dispatch can be drained or blocked
1784 * after blk_mq_stop_hw_queues() returns. Please use
1785 * blk_mq_quiesce_queue() for that requirement.
1786 */
1787 void blk_mq_stop_hw_queues(struct request_queue *q)
1788 {
1789 struct blk_mq_hw_ctx *hctx;
1790 int i;
1791
1792 queue_for_each_hw_ctx(q, hctx, i)
1793 blk_mq_stop_hw_queue(hctx);
1794 }
1795 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1796
1797 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1798 {
1799 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1800
1801 blk_mq_run_hw_queue(hctx, false);
1802 }
1803 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1804
1805 void blk_mq_start_hw_queues(struct request_queue *q)
1806 {
1807 struct blk_mq_hw_ctx *hctx;
1808 int i;
1809
1810 queue_for_each_hw_ctx(q, hctx, i)
1811 blk_mq_start_hw_queue(hctx);
1812 }
1813 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1814
1815 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1816 {
1817 if (!blk_mq_hctx_stopped(hctx))
1818 return;
1819
1820 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1821 blk_mq_run_hw_queue(hctx, async);
1822 }
1823 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1824
1825 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1826 {
1827 struct blk_mq_hw_ctx *hctx;
1828 int i;
1829
1830 queue_for_each_hw_ctx(q, hctx, i)
1831 blk_mq_start_stopped_hw_queue(hctx, async);
1832 }
1833 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1834
1835 static void blk_mq_run_work_fn(struct work_struct *work)
1836 {
1837 struct blk_mq_hw_ctx *hctx;
1838
1839 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1840
1841 /*
1842 * If we are stopped, don't run the queue.
1843 */
1844 if (blk_mq_hctx_stopped(hctx))
1845 return;
1846
1847 __blk_mq_run_hw_queue(hctx);
1848 }
1849
1850 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1851 struct request *rq,
1852 bool at_head)
1853 {
1854 struct blk_mq_ctx *ctx = rq->mq_ctx;
1855 enum hctx_type type = hctx->type;
1856
1857 lockdep_assert_held(&ctx->lock);
1858
1859 trace_block_rq_insert(rq);
1860
1861 if (at_head)
1862 list_add(&rq->queuelist, &ctx->rq_lists[type]);
1863 else
1864 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1865 }
1866
1867 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1868 bool at_head)
1869 {
1870 struct blk_mq_ctx *ctx = rq->mq_ctx;
1871
1872 lockdep_assert_held(&ctx->lock);
1873
1874 __blk_mq_insert_req_list(hctx, rq, at_head);
1875 blk_mq_hctx_mark_pending(hctx, ctx);
1876 }
1877
1878 /**
1879 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
1880 * @rq: Pointer to request to be inserted.
1881 * @at_head: true if the request should be inserted at the head of the list.
1882 * @run_queue: If we should run the hardware queue after inserting the request.
1883 *
1884 * Should only be used carefully, when the caller knows we want to
1885 * bypass a potential IO scheduler on the target device.
1886 */
1887 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
1888 bool run_queue)
1889 {
1890 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1891
1892 spin_lock(&hctx->lock);
1893 if (at_head)
1894 list_add(&rq->queuelist, &hctx->dispatch);
1895 else
1896 list_add_tail(&rq->queuelist, &hctx->dispatch);
1897 spin_unlock(&hctx->lock);
1898
1899 if (run_queue)
1900 blk_mq_run_hw_queue(hctx, false);
1901 }
1902
1903 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1904 struct list_head *list)
1905
1906 {
1907 struct request *rq;
1908 enum hctx_type type = hctx->type;
1909
1910 /*
1911 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1912 * offline now
1913 */
1914 list_for_each_entry(rq, list, queuelist) {
1915 BUG_ON(rq->mq_ctx != ctx);
1916 trace_block_rq_insert(rq);
1917 }
1918
1919 spin_lock(&ctx->lock);
1920 list_splice_tail_init(list, &ctx->rq_lists[type]);
1921 blk_mq_hctx_mark_pending(hctx, ctx);
1922 spin_unlock(&ctx->lock);
1923 }
1924
1925 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
1926 {
1927 struct request *rqa = container_of(a, struct request, queuelist);
1928 struct request *rqb = container_of(b, struct request, queuelist);
1929
1930 if (rqa->mq_ctx != rqb->mq_ctx)
1931 return rqa->mq_ctx > rqb->mq_ctx;
1932 if (rqa->mq_hctx != rqb->mq_hctx)
1933 return rqa->mq_hctx > rqb->mq_hctx;
1934
1935 return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1936 }
1937
1938 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1939 {
1940 LIST_HEAD(list);
1941
1942 if (list_empty(&plug->mq_list))
1943 return;
1944 list_splice_init(&plug->mq_list, &list);
1945
1946 if (plug->rq_count > 2 && plug->multiple_queues)
1947 list_sort(NULL, &list, plug_rq_cmp);
1948
1949 plug->rq_count = 0;
1950
1951 do {
1952 struct list_head rq_list;
1953 struct request *rq, *head_rq = list_entry_rq(list.next);
1954 struct list_head *pos = &head_rq->queuelist; /* skip first */
1955 struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
1956 struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
1957 unsigned int depth = 1;
1958
1959 list_for_each_continue(pos, &list) {
1960 rq = list_entry_rq(pos);
1961 BUG_ON(!rq->q);
1962 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
1963 break;
1964 depth++;
1965 }
1966
1967 list_cut_before(&rq_list, &list, pos);
1968 trace_block_unplug(head_rq->q, depth, !from_schedule);
1969 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1970 from_schedule);
1971 } while(!list_empty(&list));
1972 }
1973
1974 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
1975 unsigned int nr_segs)
1976 {
1977 int err;
1978
1979 if (bio->bi_opf & REQ_RAHEAD)
1980 rq->cmd_flags |= REQ_FAILFAST_MASK;
1981
1982 rq->__sector = bio->bi_iter.bi_sector;
1983 rq->write_hint = bio->bi_write_hint;
1984 blk_rq_bio_prep(rq, bio, nr_segs);
1985
1986 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
1987 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
1988 WARN_ON_ONCE(err);
1989
1990 blk_account_io_start(rq);
1991 }
1992
1993 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1994 struct request *rq,
1995 blk_qc_t *cookie, bool last)
1996 {
1997 struct request_queue *q = rq->q;
1998 struct blk_mq_queue_data bd = {
1999 .rq = rq,
2000 .last = last,
2001 };
2002 blk_qc_t new_cookie;
2003 blk_status_t ret;
2004
2005 new_cookie = request_to_qc_t(hctx, rq);
2006
2007 /*
2008 * For OK queue, we are done. For error, caller may kill it.
2009 * Any other error (busy), just add it to our list as we
2010 * previously would have done.
2011 */
2012 ret = q->mq_ops->queue_rq(hctx, &bd);
2013 switch (ret) {
2014 case BLK_STS_OK:
2015 blk_mq_update_dispatch_busy(hctx, false);
2016 *cookie = new_cookie;
2017 break;
2018 case BLK_STS_RESOURCE:
2019 case BLK_STS_DEV_RESOURCE:
2020 blk_mq_update_dispatch_busy(hctx, true);
2021 __blk_mq_requeue_request(rq);
2022 break;
2023 default:
2024 blk_mq_update_dispatch_busy(hctx, false);
2025 *cookie = BLK_QC_T_NONE;
2026 break;
2027 }
2028
2029 return ret;
2030 }
2031
2032 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2033 struct request *rq,
2034 blk_qc_t *cookie,
2035 bool bypass_insert, bool last)
2036 {
2037 struct request_queue *q = rq->q;
2038 bool run_queue = true;
2039
2040 /*
2041 * RCU or SRCU read lock is needed before checking quiesced flag.
2042 *
2043 * When queue is stopped or quiesced, ignore 'bypass_insert' from
2044 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2045 * and avoid driver to try to dispatch again.
2046 */
2047 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2048 run_queue = false;
2049 bypass_insert = false;
2050 goto insert;
2051 }
2052
2053 if (q->elevator && !bypass_insert)
2054 goto insert;
2055
2056 if (!blk_mq_get_dispatch_budget(q))
2057 goto insert;
2058
2059 if (!blk_mq_get_driver_tag(rq)) {
2060 blk_mq_put_dispatch_budget(q);
2061 goto insert;
2062 }
2063
2064 return __blk_mq_issue_directly(hctx, rq, cookie, last);
2065 insert:
2066 if (bypass_insert)
2067 return BLK_STS_RESOURCE;
2068
2069 blk_mq_sched_insert_request(rq, false, run_queue, false);
2070
2071 return BLK_STS_OK;
2072 }
2073
2074 /**
2075 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2076 * @hctx: Pointer of the associated hardware queue.
2077 * @rq: Pointer to request to be sent.
2078 * @cookie: Request queue cookie.
2079 *
2080 * If the device has enough resources to accept a new request now, send the
2081 * request directly to device driver. Else, insert at hctx->dispatch queue, so
2082 * we can try send it another time in the future. Requests inserted at this
2083 * queue have higher priority.
2084 */
2085 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2086 struct request *rq, blk_qc_t *cookie)
2087 {
2088 blk_status_t ret;
2089 int srcu_idx;
2090
2091 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
2092
2093 hctx_lock(hctx, &srcu_idx);
2094
2095 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
2096 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2097 blk_mq_request_bypass_insert(rq, false, true);
2098 else if (ret != BLK_STS_OK)
2099 blk_mq_end_request(rq, ret);
2100
2101 hctx_unlock(hctx, srcu_idx);
2102 }
2103
2104 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2105 {
2106 blk_status_t ret;
2107 int srcu_idx;
2108 blk_qc_t unused_cookie;
2109 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2110
2111 hctx_lock(hctx, &srcu_idx);
2112 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
2113 hctx_unlock(hctx, srcu_idx);
2114
2115 return ret;
2116 }
2117
2118 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2119 struct list_head *list)
2120 {
2121 int queued = 0;
2122 int errors = 0;
2123
2124 while (!list_empty(list)) {
2125 blk_status_t ret;
2126 struct request *rq = list_first_entry(list, struct request,
2127 queuelist);
2128
2129 list_del_init(&rq->queuelist);
2130 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2131 if (ret != BLK_STS_OK) {
2132 if (ret == BLK_STS_RESOURCE ||
2133 ret == BLK_STS_DEV_RESOURCE) {
2134 blk_mq_request_bypass_insert(rq, false,
2135 list_empty(list));
2136 break;
2137 }
2138 blk_mq_end_request(rq, ret);
2139 errors++;
2140 } else
2141 queued++;
2142 }
2143
2144 /*
2145 * If we didn't flush the entire list, we could have told
2146 * the driver there was more coming, but that turned out to
2147 * be a lie.
2148 */
2149 if ((!list_empty(list) || errors) &&
2150 hctx->queue->mq_ops->commit_rqs && queued)
2151 hctx->queue->mq_ops->commit_rqs(hctx);
2152 }
2153
2154 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
2155 {
2156 list_add_tail(&rq->queuelist, &plug->mq_list);
2157 plug->rq_count++;
2158 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
2159 struct request *tmp;
2160
2161 tmp = list_first_entry(&plug->mq_list, struct request,
2162 queuelist);
2163 if (tmp->q != rq->q)
2164 plug->multiple_queues = true;
2165 }
2166 }
2167
2168 /**
2169 * blk_mq_submit_bio - Create and send a request to block device.
2170 * @bio: Bio pointer.
2171 *
2172 * Builds up a request structure from @q and @bio and send to the device. The
2173 * request may not be queued directly to hardware if:
2174 * * This request can be merged with another one
2175 * * We want to place request at plug queue for possible future merging
2176 * * There is an IO scheduler active at this queue
2177 *
2178 * It will not queue the request if there is an error with the bio, or at the
2179 * request creation.
2180 *
2181 * Returns: Request queue cookie.
2182 */
2183 blk_qc_t blk_mq_submit_bio(struct bio *bio)
2184 {
2185 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
2186 const int is_sync = op_is_sync(bio->bi_opf);
2187 const int is_flush_fua = op_is_flush(bio->bi_opf);
2188 struct blk_mq_alloc_data data = {
2189 .q = q,
2190 };
2191 struct request *rq;
2192 struct blk_plug *plug;
2193 struct request *same_queue_rq = NULL;
2194 unsigned int nr_segs;
2195 blk_qc_t cookie;
2196 blk_status_t ret;
2197 bool hipri;
2198
2199 blk_queue_bounce(q, &bio);
2200 __blk_queue_split(&bio, &nr_segs);
2201
2202 if (!bio_integrity_prep(bio))
2203 goto queue_exit;
2204
2205 if (!is_flush_fua && !blk_queue_nomerges(q) &&
2206 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
2207 goto queue_exit;
2208
2209 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2210 goto queue_exit;
2211
2212 rq_qos_throttle(q, bio);
2213
2214 hipri = bio->bi_opf & REQ_HIPRI;
2215
2216 data.cmd_flags = bio->bi_opf;
2217 rq = __blk_mq_alloc_request(&data);
2218 if (unlikely(!rq)) {
2219 rq_qos_cleanup(q, bio);
2220 if (bio->bi_opf & REQ_NOWAIT)
2221 bio_wouldblock_error(bio);
2222 goto queue_exit;
2223 }
2224
2225 trace_block_getrq(bio);
2226
2227 rq_qos_track(q, rq, bio);
2228
2229 cookie = request_to_qc_t(data.hctx, rq);
2230
2231 blk_mq_bio_to_request(rq, bio, nr_segs);
2232
2233 ret = blk_crypto_init_request(rq);
2234 if (ret != BLK_STS_OK) {
2235 bio->bi_status = ret;
2236 bio_endio(bio);
2237 blk_mq_free_request(rq);
2238 return BLK_QC_T_NONE;
2239 }
2240
2241 plug = blk_mq_plug(q, bio);
2242 if (unlikely(is_flush_fua)) {
2243 /* Bypass scheduler for flush requests */
2244 blk_insert_flush(rq);
2245 blk_mq_run_hw_queue(data.hctx, true);
2246 } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
2247 !blk_queue_nonrot(q))) {
2248 /*
2249 * Use plugging if we have a ->commit_rqs() hook as well, as
2250 * we know the driver uses bd->last in a smart fashion.
2251 *
2252 * Use normal plugging if this disk is slow HDD, as sequential
2253 * IO may benefit a lot from plug merging.
2254 */
2255 unsigned int request_count = plug->rq_count;
2256 struct request *last = NULL;
2257
2258 if (!request_count)
2259 trace_block_plug(q);
2260 else
2261 last = list_entry_rq(plug->mq_list.prev);
2262
2263 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
2264 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
2265 blk_flush_plug_list(plug, false);
2266 trace_block_plug(q);
2267 }
2268
2269 blk_add_rq_to_plug(plug, rq);
2270 } else if (q->elevator) {
2271 /* Insert the request at the IO scheduler queue */
2272 blk_mq_sched_insert_request(rq, false, true, true);
2273 } else if (plug && !blk_queue_nomerges(q)) {
2274 /*
2275 * We do limited plugging. If the bio can be merged, do that.
2276 * Otherwise the existing request in the plug list will be
2277 * issued. So the plug list will have one request at most
2278 * The plug list might get flushed before this. If that happens,
2279 * the plug list is empty, and same_queue_rq is invalid.
2280 */
2281 if (list_empty(&plug->mq_list))
2282 same_queue_rq = NULL;
2283 if (same_queue_rq) {
2284 list_del_init(&same_queue_rq->queuelist);
2285 plug->rq_count--;
2286 }
2287 blk_add_rq_to_plug(plug, rq);
2288 trace_block_plug(q);
2289
2290 if (same_queue_rq) {
2291 data.hctx = same_queue_rq->mq_hctx;
2292 trace_block_unplug(q, 1, true);
2293 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2294 &cookie);
2295 }
2296 } else if ((q->nr_hw_queues > 1 && is_sync) ||
2297 !data.hctx->dispatch_busy) {
2298 /*
2299 * There is no scheduler and we can try to send directly
2300 * to the hardware.
2301 */
2302 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2303 } else {
2304 /* Default case. */
2305 blk_mq_sched_insert_request(rq, false, true, true);
2306 }
2307
2308 if (!hipri)
2309 return BLK_QC_T_NONE;
2310 return cookie;
2311 queue_exit:
2312 blk_queue_exit(q);
2313 return BLK_QC_T_NONE;
2314 }
2315
2316 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2317 unsigned int hctx_idx)
2318 {
2319 struct page *page;
2320
2321 if (tags->rqs && set->ops->exit_request) {
2322 int i;
2323
2324 for (i = 0; i < tags->nr_tags; i++) {
2325 struct request *rq = tags->static_rqs[i];
2326
2327 if (!rq)
2328 continue;
2329 set->ops->exit_request(set, rq, hctx_idx);
2330 tags->static_rqs[i] = NULL;
2331 }
2332 }
2333
2334 while (!list_empty(&tags->page_list)) {
2335 page = list_first_entry(&tags->page_list, struct page, lru);
2336 list_del_init(&page->lru);
2337 /*
2338 * Remove kmemleak object previously allocated in
2339 * blk_mq_alloc_rqs().
2340 */
2341 kmemleak_free(page_address(page));
2342 __free_pages(page, page->private);
2343 }
2344 }
2345
2346 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
2347 {
2348 kfree(tags->rqs);
2349 tags->rqs = NULL;
2350 kfree(tags->static_rqs);
2351 tags->static_rqs = NULL;
2352
2353 blk_mq_free_tags(tags, flags);
2354 }
2355
2356 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2357 unsigned int hctx_idx,
2358 unsigned int nr_tags,
2359 unsigned int reserved_tags,
2360 unsigned int flags)
2361 {
2362 struct blk_mq_tags *tags;
2363 int node;
2364
2365 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2366 if (node == NUMA_NO_NODE)
2367 node = set->numa_node;
2368
2369 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
2370 if (!tags)
2371 return NULL;
2372
2373 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2374 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2375 node);
2376 if (!tags->rqs) {
2377 blk_mq_free_tags(tags, flags);
2378 return NULL;
2379 }
2380
2381 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2382 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2383 node);
2384 if (!tags->static_rqs) {
2385 kfree(tags->rqs);
2386 blk_mq_free_tags(tags, flags);
2387 return NULL;
2388 }
2389
2390 return tags;
2391 }
2392
2393 static size_t order_to_size(unsigned int order)
2394 {
2395 return (size_t)PAGE_SIZE << order;
2396 }
2397
2398 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2399 unsigned int hctx_idx, int node)
2400 {
2401 int ret;
2402
2403 if (set->ops->init_request) {
2404 ret = set->ops->init_request(set, rq, hctx_idx, node);
2405 if (ret)
2406 return ret;
2407 }
2408
2409 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2410 return 0;
2411 }
2412
2413 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2414 unsigned int hctx_idx, unsigned int depth)
2415 {
2416 unsigned int i, j, entries_per_page, max_order = 4;
2417 size_t rq_size, left;
2418 int node;
2419
2420 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2421 if (node == NUMA_NO_NODE)
2422 node = set->numa_node;
2423
2424 INIT_LIST_HEAD(&tags->page_list);
2425
2426 /*
2427 * rq_size is the size of the request plus driver payload, rounded
2428 * to the cacheline size
2429 */
2430 rq_size = round_up(sizeof(struct request) + set->cmd_size,
2431 cache_line_size());
2432 left = rq_size * depth;
2433
2434 for (i = 0; i < depth; ) {
2435 int this_order = max_order;
2436 struct page *page;
2437 int to_do;
2438 void *p;
2439
2440 while (this_order && left < order_to_size(this_order - 1))
2441 this_order--;
2442
2443 do {
2444 page = alloc_pages_node(node,
2445 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2446 this_order);
2447 if (page)
2448 break;
2449 if (!this_order--)
2450 break;
2451 if (order_to_size(this_order) < rq_size)
2452 break;
2453 } while (1);
2454
2455 if (!page)
2456 goto fail;
2457
2458 page->private = this_order;
2459 list_add_tail(&page->lru, &tags->page_list);
2460
2461 p = page_address(page);
2462 /*
2463 * Allow kmemleak to scan these pages as they contain pointers
2464 * to additional allocations like via ops->init_request().
2465 */
2466 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2467 entries_per_page = order_to_size(this_order) / rq_size;
2468 to_do = min(entries_per_page, depth - i);
2469 left -= to_do * rq_size;
2470 for (j = 0; j < to_do; j++) {
2471 struct request *rq = p;
2472
2473 tags->static_rqs[i] = rq;
2474 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2475 tags->static_rqs[i] = NULL;
2476 goto fail;
2477 }
2478
2479 p += rq_size;
2480 i++;
2481 }
2482 }
2483 return 0;
2484
2485 fail:
2486 blk_mq_free_rqs(set, tags, hctx_idx);
2487 return -ENOMEM;
2488 }
2489
2490 struct rq_iter_data {
2491 struct blk_mq_hw_ctx *hctx;
2492 bool has_rq;
2493 };
2494
2495 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
2496 {
2497 struct rq_iter_data *iter_data = data;
2498
2499 if (rq->mq_hctx != iter_data->hctx)
2500 return true;
2501 iter_data->has_rq = true;
2502 return false;
2503 }
2504
2505 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
2506 {
2507 struct blk_mq_tags *tags = hctx->sched_tags ?
2508 hctx->sched_tags : hctx->tags;
2509 struct rq_iter_data data = {
2510 .hctx = hctx,
2511 };
2512
2513 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
2514 return data.has_rq;
2515 }
2516
2517 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
2518 struct blk_mq_hw_ctx *hctx)
2519 {
2520 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
2521 return false;
2522 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
2523 return false;
2524 return true;
2525 }
2526
2527 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
2528 {
2529 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2530 struct blk_mq_hw_ctx, cpuhp_online);
2531
2532 if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
2533 !blk_mq_last_cpu_in_hctx(cpu, hctx))
2534 return 0;
2535
2536 /*
2537 * Prevent new request from being allocated on the current hctx.
2538 *
2539 * The smp_mb__after_atomic() Pairs with the implied barrier in
2540 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is
2541 * seen once we return from the tag allocator.
2542 */
2543 set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2544 smp_mb__after_atomic();
2545
2546 /*
2547 * Try to grab a reference to the queue and wait for any outstanding
2548 * requests. If we could not grab a reference the queue has been
2549 * frozen and there are no requests.
2550 */
2551 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
2552 while (blk_mq_hctx_has_requests(hctx))
2553 msleep(5);
2554 percpu_ref_put(&hctx->queue->q_usage_counter);
2555 }
2556
2557 return 0;
2558 }
2559
2560 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
2561 {
2562 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2563 struct blk_mq_hw_ctx, cpuhp_online);
2564
2565 if (cpumask_test_cpu(cpu, hctx->cpumask))
2566 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2567 return 0;
2568 }
2569
2570 /*
2571 * 'cpu' is going away. splice any existing rq_list entries from this
2572 * software queue to the hw queue dispatch list, and ensure that it
2573 * gets run.
2574 */
2575 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2576 {
2577 struct blk_mq_hw_ctx *hctx;
2578 struct blk_mq_ctx *ctx;
2579 LIST_HEAD(tmp);
2580 enum hctx_type type;
2581
2582 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2583 if (!cpumask_test_cpu(cpu, hctx->cpumask))
2584 return 0;
2585
2586 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2587 type = hctx->type;
2588
2589 spin_lock(&ctx->lock);
2590 if (!list_empty(&ctx->rq_lists[type])) {
2591 list_splice_init(&ctx->rq_lists[type], &tmp);
2592 blk_mq_hctx_clear_pending(hctx, ctx);
2593 }
2594 spin_unlock(&ctx->lock);
2595
2596 if (list_empty(&tmp))
2597 return 0;
2598
2599 spin_lock(&hctx->lock);
2600 list_splice_tail_init(&tmp, &hctx->dispatch);
2601 spin_unlock(&hctx->lock);
2602
2603 blk_mq_run_hw_queue(hctx, true);
2604 return 0;
2605 }
2606
2607 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2608 {
2609 if (!(hctx->flags & BLK_MQ_F_STACKING))
2610 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2611 &hctx->cpuhp_online);
2612 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2613 &hctx->cpuhp_dead);
2614 }
2615
2616 /* hctx->ctxs will be freed in queue's release handler */
2617 static void blk_mq_exit_hctx(struct request_queue *q,
2618 struct blk_mq_tag_set *set,
2619 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2620 {
2621 if (blk_mq_hw_queue_mapped(hctx))
2622 blk_mq_tag_idle(hctx);
2623
2624 if (set->ops->exit_request)
2625 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2626
2627 if (set->ops->exit_hctx)
2628 set->ops->exit_hctx(hctx, hctx_idx);
2629
2630 blk_mq_remove_cpuhp(hctx);
2631
2632 spin_lock(&q->unused_hctx_lock);
2633 list_add(&hctx->hctx_list, &q->unused_hctx_list);
2634 spin_unlock(&q->unused_hctx_lock);
2635 }
2636
2637 static void blk_mq_exit_hw_queues(struct request_queue *q,
2638 struct blk_mq_tag_set *set, int nr_queue)
2639 {
2640 struct blk_mq_hw_ctx *hctx;
2641 unsigned int i;
2642
2643 queue_for_each_hw_ctx(q, hctx, i) {
2644 if (i == nr_queue)
2645 break;
2646 blk_mq_debugfs_unregister_hctx(hctx);
2647 blk_mq_exit_hctx(q, set, hctx, i);
2648 }
2649 }
2650
2651 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2652 {
2653 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2654
2655 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2656 __alignof__(struct blk_mq_hw_ctx)) !=
2657 sizeof(struct blk_mq_hw_ctx));
2658
2659 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2660 hw_ctx_size += sizeof(struct srcu_struct);
2661
2662 return hw_ctx_size;
2663 }
2664
2665 static int blk_mq_init_hctx(struct request_queue *q,
2666 struct blk_mq_tag_set *set,
2667 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2668 {
2669 hctx->queue_num = hctx_idx;
2670
2671 if (!(hctx->flags & BLK_MQ_F_STACKING))
2672 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2673 &hctx->cpuhp_online);
2674 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2675
2676 hctx->tags = set->tags[hctx_idx];
2677
2678 if (set->ops->init_hctx &&
2679 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2680 goto unregister_cpu_notifier;
2681
2682 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2683 hctx->numa_node))
2684 goto exit_hctx;
2685 return 0;
2686
2687 exit_hctx:
2688 if (set->ops->exit_hctx)
2689 set->ops->exit_hctx(hctx, hctx_idx);
2690 unregister_cpu_notifier:
2691 blk_mq_remove_cpuhp(hctx);
2692 return -1;
2693 }
2694
2695 static struct blk_mq_hw_ctx *
2696 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
2697 int node)
2698 {
2699 struct blk_mq_hw_ctx *hctx;
2700 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
2701
2702 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2703 if (!hctx)
2704 goto fail_alloc_hctx;
2705
2706 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2707 goto free_hctx;
2708
2709 atomic_set(&hctx->nr_active, 0);
2710 if (node == NUMA_NO_NODE)
2711 node = set->numa_node;
2712 hctx->numa_node = node;
2713
2714 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2715 spin_lock_init(&hctx->lock);
2716 INIT_LIST_HEAD(&hctx->dispatch);
2717 hctx->queue = q;
2718 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
2719
2720 INIT_LIST_HEAD(&hctx->hctx_list);
2721
2722 /*
2723 * Allocate space for all possible cpus to avoid allocation at
2724 * runtime
2725 */
2726 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2727 gfp, node);
2728 if (!hctx->ctxs)
2729 goto free_cpumask;
2730
2731 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2732 gfp, node))
2733 goto free_ctxs;
2734 hctx->nr_ctx = 0;
2735
2736 spin_lock_init(&hctx->dispatch_wait_lock);
2737 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2738 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2739
2740 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
2741 if (!hctx->fq)
2742 goto free_bitmap;
2743
2744 if (hctx->flags & BLK_MQ_F_BLOCKING)
2745 init_srcu_struct(hctx->srcu);
2746 blk_mq_hctx_kobj_init(hctx);
2747
2748 return hctx;
2749
2750 free_bitmap:
2751 sbitmap_free(&hctx->ctx_map);
2752 free_ctxs:
2753 kfree(hctx->ctxs);
2754 free_cpumask:
2755 free_cpumask_var(hctx->cpumask);
2756 free_hctx:
2757 kfree(hctx);
2758 fail_alloc_hctx:
2759 return NULL;
2760 }
2761
2762 static void blk_mq_init_cpu_queues(struct request_queue *q,
2763 unsigned int nr_hw_queues)
2764 {
2765 struct blk_mq_tag_set *set = q->tag_set;
2766 unsigned int i, j;
2767
2768 for_each_possible_cpu(i) {
2769 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2770 struct blk_mq_hw_ctx *hctx;
2771 int k;
2772
2773 __ctx->cpu = i;
2774 spin_lock_init(&__ctx->lock);
2775 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2776 INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2777
2778 __ctx->queue = q;
2779
2780 /*
2781 * Set local node, IFF we have more than one hw queue. If
2782 * not, we remain on the home node of the device
2783 */
2784 for (j = 0; j < set->nr_maps; j++) {
2785 hctx = blk_mq_map_queue_type(q, j, i);
2786 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2787 hctx->numa_node = cpu_to_node(i);
2788 }
2789 }
2790 }
2791
2792 static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
2793 int hctx_idx)
2794 {
2795 unsigned int flags = set->flags;
2796 int ret = 0;
2797
2798 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2799 set->queue_depth, set->reserved_tags, flags);
2800 if (!set->tags[hctx_idx])
2801 return false;
2802
2803 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2804 set->queue_depth);
2805 if (!ret)
2806 return true;
2807
2808 blk_mq_free_rq_map(set->tags[hctx_idx], flags);
2809 set->tags[hctx_idx] = NULL;
2810 return false;
2811 }
2812
2813 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2814 unsigned int hctx_idx)
2815 {
2816 unsigned int flags = set->flags;
2817
2818 if (set->tags && set->tags[hctx_idx]) {
2819 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2820 blk_mq_free_rq_map(set->tags[hctx_idx], flags);
2821 set->tags[hctx_idx] = NULL;
2822 }
2823 }
2824
2825 static void blk_mq_map_swqueue(struct request_queue *q)
2826 {
2827 unsigned int i, j, hctx_idx;
2828 struct blk_mq_hw_ctx *hctx;
2829 struct blk_mq_ctx *ctx;
2830 struct blk_mq_tag_set *set = q->tag_set;
2831
2832 queue_for_each_hw_ctx(q, hctx, i) {
2833 cpumask_clear(hctx->cpumask);
2834 hctx->nr_ctx = 0;
2835 hctx->dispatch_from = NULL;
2836 }
2837
2838 /*
2839 * Map software to hardware queues.
2840 *
2841 * If the cpu isn't present, the cpu is mapped to first hctx.
2842 */
2843 for_each_possible_cpu(i) {
2844
2845 ctx = per_cpu_ptr(q->queue_ctx, i);
2846 for (j = 0; j < set->nr_maps; j++) {
2847 if (!set->map[j].nr_queues) {
2848 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2849 HCTX_TYPE_DEFAULT, i);
2850 continue;
2851 }
2852 hctx_idx = set->map[j].mq_map[i];
2853 /* unmapped hw queue can be remapped after CPU topo changed */
2854 if (!set->tags[hctx_idx] &&
2855 !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
2856 /*
2857 * If tags initialization fail for some hctx,
2858 * that hctx won't be brought online. In this
2859 * case, remap the current ctx to hctx[0] which
2860 * is guaranteed to always have tags allocated
2861 */
2862 set->map[j].mq_map[i] = 0;
2863 }
2864
2865 hctx = blk_mq_map_queue_type(q, j, i);
2866 ctx->hctxs[j] = hctx;
2867 /*
2868 * If the CPU is already set in the mask, then we've
2869 * mapped this one already. This can happen if
2870 * devices share queues across queue maps.
2871 */
2872 if (cpumask_test_cpu(i, hctx->cpumask))
2873 continue;
2874
2875 cpumask_set_cpu(i, hctx->cpumask);
2876 hctx->type = j;
2877 ctx->index_hw[hctx->type] = hctx->nr_ctx;
2878 hctx->ctxs[hctx->nr_ctx++] = ctx;
2879
2880 /*
2881 * If the nr_ctx type overflows, we have exceeded the
2882 * amount of sw queues we can support.
2883 */
2884 BUG_ON(!hctx->nr_ctx);
2885 }
2886
2887 for (; j < HCTX_MAX_TYPES; j++)
2888 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2889 HCTX_TYPE_DEFAULT, i);
2890 }
2891
2892 queue_for_each_hw_ctx(q, hctx, i) {
2893 /*
2894 * If no software queues are mapped to this hardware queue,
2895 * disable it and free the request entries.
2896 */
2897 if (!hctx->nr_ctx) {
2898 /* Never unmap queue 0. We need it as a
2899 * fallback in case of a new remap fails
2900 * allocation
2901 */
2902 if (i && set->tags[i])
2903 blk_mq_free_map_and_requests(set, i);
2904
2905 hctx->tags = NULL;
2906 continue;
2907 }
2908
2909 hctx->tags = set->tags[i];
2910 WARN_ON(!hctx->tags);
2911
2912 /*
2913 * Set the map size to the number of mapped software queues.
2914 * This is more accurate and more efficient than looping
2915 * over all possibly mapped software queues.
2916 */
2917 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2918
2919 /*
2920 * Initialize batch roundrobin counts
2921 */
2922 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2923 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2924 }
2925 }
2926
2927 /*
2928 * Caller needs to ensure that we're either frozen/quiesced, or that
2929 * the queue isn't live yet.
2930 */
2931 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2932 {
2933 struct blk_mq_hw_ctx *hctx;
2934 int i;
2935
2936 queue_for_each_hw_ctx(q, hctx, i) {
2937 if (shared)
2938 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
2939 else
2940 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
2941 }
2942 }
2943
2944 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
2945 bool shared)
2946 {
2947 struct request_queue *q;
2948
2949 lockdep_assert_held(&set->tag_list_lock);
2950
2951 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2952 blk_mq_freeze_queue(q);
2953 queue_set_hctx_shared(q, shared);
2954 blk_mq_unfreeze_queue(q);
2955 }
2956 }
2957
2958 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2959 {
2960 struct blk_mq_tag_set *set = q->tag_set;
2961
2962 mutex_lock(&set->tag_list_lock);
2963 list_del(&q->tag_set_list);
2964 if (list_is_singular(&set->tag_list)) {
2965 /* just transitioned to unshared */
2966 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
2967 /* update existing queue */
2968 blk_mq_update_tag_set_shared(set, false);
2969 }
2970 mutex_unlock(&set->tag_list_lock);
2971 INIT_LIST_HEAD(&q->tag_set_list);
2972 }
2973
2974 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2975 struct request_queue *q)
2976 {
2977 mutex_lock(&set->tag_list_lock);
2978
2979 /*
2980 * Check to see if we're transitioning to shared (from 1 to 2 queues).
2981 */
2982 if (!list_empty(&set->tag_list) &&
2983 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2984 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
2985 /* update existing queue */
2986 blk_mq_update_tag_set_shared(set, true);
2987 }
2988 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
2989 queue_set_hctx_shared(q, true);
2990 list_add_tail(&q->tag_set_list, &set->tag_list);
2991
2992 mutex_unlock(&set->tag_list_lock);
2993 }
2994
2995 /* All allocations will be freed in release handler of q->mq_kobj */
2996 static int blk_mq_alloc_ctxs(struct request_queue *q)
2997 {
2998 struct blk_mq_ctxs *ctxs;
2999 int cpu;
3000
3001 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3002 if (!ctxs)
3003 return -ENOMEM;
3004
3005 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3006 if (!ctxs->queue_ctx)
3007 goto fail;
3008
3009 for_each_possible_cpu(cpu) {
3010 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3011 ctx->ctxs = ctxs;
3012 }
3013
3014 q->mq_kobj = &ctxs->kobj;
3015 q->queue_ctx = ctxs->queue_ctx;
3016
3017 return 0;
3018 fail:
3019 kfree(ctxs);
3020 return -ENOMEM;
3021 }
3022
3023 /*
3024 * It is the actual release handler for mq, but we do it from
3025 * request queue's release handler for avoiding use-after-free
3026 * and headache because q->mq_kobj shouldn't have been introduced,
3027 * but we can't group ctx/kctx kobj without it.
3028 */
3029 void blk_mq_release(struct request_queue *q)
3030 {
3031 struct blk_mq_hw_ctx *hctx, *next;
3032 int i;
3033
3034 queue_for_each_hw_ctx(q, hctx, i)
3035 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3036
3037 /* all hctx are in .unused_hctx_list now */
3038 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3039 list_del_init(&hctx->hctx_list);
3040 kobject_put(&hctx->kobj);
3041 }
3042
3043 kfree(q->queue_hw_ctx);
3044
3045 /*
3046 * release .mq_kobj and sw queue's kobject now because
3047 * both share lifetime with request queue.
3048 */
3049 blk_mq_sysfs_deinit(q);
3050 }
3051
3052 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3053 void *queuedata)
3054 {
3055 struct request_queue *uninit_q, *q;
3056
3057 uninit_q = blk_alloc_queue(set->numa_node);
3058 if (!uninit_q)
3059 return ERR_PTR(-ENOMEM);
3060 uninit_q->queuedata = queuedata;
3061
3062 /*
3063 * Initialize the queue without an elevator. device_add_disk() will do
3064 * the initialization.
3065 */
3066 q = blk_mq_init_allocated_queue(set, uninit_q, false);
3067 if (IS_ERR(q))
3068 blk_cleanup_queue(uninit_q);
3069
3070 return q;
3071 }
3072 EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
3073
3074 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
3075 {
3076 return blk_mq_init_queue_data(set, NULL);
3077 }
3078 EXPORT_SYMBOL(blk_mq_init_queue);
3079
3080 /*
3081 * Helper for setting up a queue with mq ops, given queue depth, and
3082 * the passed in mq ops flags.
3083 */
3084 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
3085 const struct blk_mq_ops *ops,
3086 unsigned int queue_depth,
3087 unsigned int set_flags)
3088 {
3089 struct request_queue *q;
3090 int ret;
3091
3092 memset(set, 0, sizeof(*set));
3093 set->ops = ops;
3094 set->nr_hw_queues = 1;
3095 set->nr_maps = 1;
3096 set->queue_depth = queue_depth;
3097 set->numa_node = NUMA_NO_NODE;
3098 set->flags = set_flags;
3099
3100 ret = blk_mq_alloc_tag_set(set);
3101 if (ret)
3102 return ERR_PTR(ret);
3103
3104 q = blk_mq_init_queue(set);
3105 if (IS_ERR(q)) {
3106 blk_mq_free_tag_set(set);
3107 return q;
3108 }
3109
3110 return q;
3111 }
3112 EXPORT_SYMBOL(blk_mq_init_sq_queue);
3113
3114 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
3115 struct blk_mq_tag_set *set, struct request_queue *q,
3116 int hctx_idx, int node)
3117 {
3118 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3119
3120 /* reuse dead hctx first */
3121 spin_lock(&q->unused_hctx_lock);
3122 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
3123 if (tmp->numa_node == node) {
3124 hctx = tmp;
3125 break;
3126 }
3127 }
3128 if (hctx)
3129 list_del_init(&hctx->hctx_list);
3130 spin_unlock(&q->unused_hctx_lock);
3131
3132 if (!hctx)
3133 hctx = blk_mq_alloc_hctx(q, set, node);
3134 if (!hctx)
3135 goto fail;
3136
3137 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
3138 goto free_hctx;
3139
3140 return hctx;
3141
3142 free_hctx:
3143 kobject_put(&hctx->kobj);
3144 fail:
3145 return NULL;
3146 }
3147
3148 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
3149 struct request_queue *q)
3150 {
3151 int i, j, end;
3152 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
3153
3154 if (q->nr_hw_queues < set->nr_hw_queues) {
3155 struct blk_mq_hw_ctx **new_hctxs;
3156
3157 new_hctxs = kcalloc_node(set->nr_hw_queues,
3158 sizeof(*new_hctxs), GFP_KERNEL,
3159 set->numa_node);
3160 if (!new_hctxs)
3161 return;
3162 if (hctxs)
3163 memcpy(new_hctxs, hctxs, q->nr_hw_queues *
3164 sizeof(*hctxs));
3165 q->queue_hw_ctx = new_hctxs;
3166 kfree(hctxs);
3167 hctxs = new_hctxs;
3168 }
3169
3170 /* protect against switching io scheduler */
3171 mutex_lock(&q->sysfs_lock);
3172 for (i = 0; i < set->nr_hw_queues; i++) {
3173 int node;
3174 struct blk_mq_hw_ctx *hctx;
3175
3176 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
3177 /*
3178 * If the hw queue has been mapped to another numa node,
3179 * we need to realloc the hctx. If allocation fails, fallback
3180 * to use the previous one.
3181 */
3182 if (hctxs[i] && (hctxs[i]->numa_node == node))
3183 continue;
3184
3185 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
3186 if (hctx) {
3187 if (hctxs[i])
3188 blk_mq_exit_hctx(q, set, hctxs[i], i);
3189 hctxs[i] = hctx;
3190 } else {
3191 if (hctxs[i])
3192 pr_warn("Allocate new hctx on node %d fails,\
3193 fallback to previous one on node %d\n",
3194 node, hctxs[i]->numa_node);
3195 else
3196 break;
3197 }
3198 }
3199 /*
3200 * Increasing nr_hw_queues fails. Free the newly allocated
3201 * hctxs and keep the previous q->nr_hw_queues.
3202 */
3203 if (i != set->nr_hw_queues) {
3204 j = q->nr_hw_queues;
3205 end = i;
3206 } else {
3207 j = i;
3208 end = q->nr_hw_queues;
3209 q->nr_hw_queues = set->nr_hw_queues;
3210 }
3211
3212 for (; j < end; j++) {
3213 struct blk_mq_hw_ctx *hctx = hctxs[j];
3214
3215 if (hctx) {
3216 if (hctx->tags)
3217 blk_mq_free_map_and_requests(set, j);
3218 blk_mq_exit_hctx(q, set, hctx, j);
3219 hctxs[j] = NULL;
3220 }
3221 }
3222 mutex_unlock(&q->sysfs_lock);
3223 }
3224
3225 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
3226 struct request_queue *q,
3227 bool elevator_init)
3228 {
3229 /* mark the queue as mq asap */
3230 q->mq_ops = set->ops;
3231
3232 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
3233 blk_mq_poll_stats_bkt,
3234 BLK_MQ_POLL_STATS_BKTS, q);
3235 if (!q->poll_cb)
3236 goto err_exit;
3237
3238 if (blk_mq_alloc_ctxs(q))
3239 goto err_poll;
3240
3241 /* init q->mq_kobj and sw queues' kobjects */
3242 blk_mq_sysfs_init(q);
3243
3244 INIT_LIST_HEAD(&q->unused_hctx_list);
3245 spin_lock_init(&q->unused_hctx_lock);
3246
3247 blk_mq_realloc_hw_ctxs(set, q);
3248 if (!q->nr_hw_queues)
3249 goto err_hctxs;
3250
3251 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
3252 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
3253
3254 q->tag_set = set;
3255
3256 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
3257 if (set->nr_maps > HCTX_TYPE_POLL &&
3258 set->map[HCTX_TYPE_POLL].nr_queues)
3259 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
3260
3261 q->sg_reserved_size = INT_MAX;
3262
3263 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
3264 INIT_LIST_HEAD(&q->requeue_list);
3265 spin_lock_init(&q->requeue_lock);
3266
3267 q->nr_requests = set->queue_depth;
3268
3269 /*
3270 * Default to classic polling
3271 */
3272 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
3273
3274 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
3275 blk_mq_add_queue_tag_set(set, q);
3276 blk_mq_map_swqueue(q);
3277
3278 if (elevator_init)
3279 elevator_init_mq(q);
3280
3281 return q;
3282
3283 err_hctxs:
3284 kfree(q->queue_hw_ctx);
3285 q->nr_hw_queues = 0;
3286 blk_mq_sysfs_deinit(q);
3287 err_poll:
3288 blk_stat_free_callback(q->poll_cb);
3289 q->poll_cb = NULL;
3290 err_exit:
3291 q->mq_ops = NULL;
3292 return ERR_PTR(-ENOMEM);
3293 }
3294 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
3295
3296 /* tags can _not_ be used after returning from blk_mq_exit_queue */
3297 void blk_mq_exit_queue(struct request_queue *q)
3298 {
3299 struct blk_mq_tag_set *set = q->tag_set;
3300
3301 blk_mq_del_queue_tag_set(q);
3302 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
3303 }
3304
3305 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
3306 {
3307 int i;
3308
3309 for (i = 0; i < set->nr_hw_queues; i++) {
3310 if (!__blk_mq_alloc_map_and_request(set, i))
3311 goto out_unwind;
3312 cond_resched();
3313 }
3314
3315 return 0;
3316
3317 out_unwind:
3318 while (--i >= 0)
3319 blk_mq_free_map_and_requests(set, i);
3320
3321 return -ENOMEM;
3322 }
3323
3324 /*
3325 * Allocate the request maps associated with this tag_set. Note that this
3326 * may reduce the depth asked for, if memory is tight. set->queue_depth
3327 * will be updated to reflect the allocated depth.
3328 */
3329 static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set)
3330 {
3331 unsigned int depth;
3332 int err;
3333
3334 depth = set->queue_depth;
3335 do {
3336 err = __blk_mq_alloc_rq_maps(set);
3337 if (!err)
3338 break;
3339
3340 set->queue_depth >>= 1;
3341 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
3342 err = -ENOMEM;
3343 break;
3344 }
3345 } while (set->queue_depth);
3346
3347 if (!set->queue_depth || err) {
3348 pr_err("blk-mq: failed to allocate request map\n");
3349 return -ENOMEM;
3350 }
3351
3352 if (depth != set->queue_depth)
3353 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
3354 depth, set->queue_depth);
3355
3356 return 0;
3357 }
3358
3359 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
3360 {
3361 /*
3362 * blk_mq_map_queues() and multiple .map_queues() implementations
3363 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
3364 * number of hardware queues.
3365 */
3366 if (set->nr_maps == 1)
3367 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
3368
3369 if (set->ops->map_queues && !is_kdump_kernel()) {
3370 int i;
3371
3372 /*
3373 * transport .map_queues is usually done in the following
3374 * way:
3375 *
3376 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
3377 * mask = get_cpu_mask(queue)
3378 * for_each_cpu(cpu, mask)
3379 * set->map[x].mq_map[cpu] = queue;
3380 * }
3381 *
3382 * When we need to remap, the table has to be cleared for
3383 * killing stale mapping since one CPU may not be mapped
3384 * to any hw queue.
3385 */
3386 for (i = 0; i < set->nr_maps; i++)
3387 blk_mq_clear_mq_map(&set->map[i]);
3388
3389 return set->ops->map_queues(set);
3390 } else {
3391 BUG_ON(set->nr_maps > 1);
3392 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3393 }
3394 }
3395
3396 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
3397 int cur_nr_hw_queues, int new_nr_hw_queues)
3398 {
3399 struct blk_mq_tags **new_tags;
3400
3401 if (cur_nr_hw_queues >= new_nr_hw_queues)
3402 return 0;
3403
3404 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
3405 GFP_KERNEL, set->numa_node);
3406 if (!new_tags)
3407 return -ENOMEM;
3408
3409 if (set->tags)
3410 memcpy(new_tags, set->tags, cur_nr_hw_queues *
3411 sizeof(*set->tags));
3412 kfree(set->tags);
3413 set->tags = new_tags;
3414 set->nr_hw_queues = new_nr_hw_queues;
3415
3416 return 0;
3417 }
3418
3419 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
3420 int new_nr_hw_queues)
3421 {
3422 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
3423 }
3424
3425 /*
3426 * Alloc a tag set to be associated with one or more request queues.
3427 * May fail with EINVAL for various error conditions. May adjust the
3428 * requested depth down, if it's too large. In that case, the set
3429 * value will be stored in set->queue_depth.
3430 */
3431 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3432 {
3433 int i, ret;
3434
3435 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3436
3437 if (!set->nr_hw_queues)
3438 return -EINVAL;
3439 if (!set->queue_depth)
3440 return -EINVAL;
3441 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3442 return -EINVAL;
3443
3444 if (!set->ops->queue_rq)
3445 return -EINVAL;
3446
3447 if (!set->ops->get_budget ^ !set->ops->put_budget)
3448 return -EINVAL;
3449
3450 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3451 pr_info("blk-mq: reduced tag depth to %u\n",
3452 BLK_MQ_MAX_DEPTH);
3453 set->queue_depth = BLK_MQ_MAX_DEPTH;
3454 }
3455
3456 if (!set->nr_maps)
3457 set->nr_maps = 1;
3458 else if (set->nr_maps > HCTX_MAX_TYPES)
3459 return -EINVAL;
3460
3461 /*
3462 * If a crashdump is active, then we are potentially in a very
3463 * memory constrained environment. Limit us to 1 queue and
3464 * 64 tags to prevent using too much memory.
3465 */
3466 if (is_kdump_kernel()) {
3467 set->nr_hw_queues = 1;
3468 set->nr_maps = 1;
3469 set->queue_depth = min(64U, set->queue_depth);
3470 }
3471 /*
3472 * There is no use for more h/w queues than cpus if we just have
3473 * a single map
3474 */
3475 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3476 set->nr_hw_queues = nr_cpu_ids;
3477
3478 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
3479 return -ENOMEM;
3480
3481 ret = -ENOMEM;
3482 for (i = 0; i < set->nr_maps; i++) {
3483 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3484 sizeof(set->map[i].mq_map[0]),
3485 GFP_KERNEL, set->numa_node);
3486 if (!set->map[i].mq_map)
3487 goto out_free_mq_map;
3488 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3489 }
3490
3491 ret = blk_mq_update_queue_map(set);
3492 if (ret)
3493 goto out_free_mq_map;
3494
3495 ret = blk_mq_alloc_map_and_requests(set);
3496 if (ret)
3497 goto out_free_mq_map;
3498
3499 if (blk_mq_is_sbitmap_shared(set->flags)) {
3500 atomic_set(&set->active_queues_shared_sbitmap, 0);
3501
3502 if (blk_mq_init_shared_sbitmap(set, set->flags)) {
3503 ret = -ENOMEM;
3504 goto out_free_mq_rq_maps;
3505 }
3506 }
3507
3508 mutex_init(&set->tag_list_lock);
3509 INIT_LIST_HEAD(&set->tag_list);
3510
3511 return 0;
3512
3513 out_free_mq_rq_maps:
3514 for (i = 0; i < set->nr_hw_queues; i++)
3515 blk_mq_free_map_and_requests(set, i);
3516 out_free_mq_map:
3517 for (i = 0; i < set->nr_maps; i++) {
3518 kfree(set->map[i].mq_map);
3519 set->map[i].mq_map = NULL;
3520 }
3521 kfree(set->tags);
3522 set->tags = NULL;
3523 return ret;
3524 }
3525 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3526
3527 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3528 {
3529 int i, j;
3530
3531 for (i = 0; i < set->nr_hw_queues; i++)
3532 blk_mq_free_map_and_requests(set, i);
3533
3534 if (blk_mq_is_sbitmap_shared(set->flags))
3535 blk_mq_exit_shared_sbitmap(set);
3536
3537 for (j = 0; j < set->nr_maps; j++) {
3538 kfree(set->map[j].mq_map);
3539 set->map[j].mq_map = NULL;
3540 }
3541
3542 kfree(set->tags);
3543 set->tags = NULL;
3544 }
3545 EXPORT_SYMBOL(blk_mq_free_tag_set);
3546
3547 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3548 {
3549 struct blk_mq_tag_set *set = q->tag_set;
3550 struct blk_mq_hw_ctx *hctx;
3551 int i, ret;
3552
3553 if (!set)
3554 return -EINVAL;
3555
3556 if (q->nr_requests == nr)
3557 return 0;
3558
3559 blk_mq_freeze_queue(q);
3560 blk_mq_quiesce_queue(q);
3561
3562 ret = 0;
3563 queue_for_each_hw_ctx(q, hctx, i) {
3564 if (!hctx->tags)
3565 continue;
3566 /*
3567 * If we're using an MQ scheduler, just update the scheduler
3568 * queue depth. This is similar to what the old code would do.
3569 */
3570 if (!hctx->sched_tags) {
3571 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3572 false);
3573 if (!ret && blk_mq_is_sbitmap_shared(set->flags))
3574 blk_mq_tag_resize_shared_sbitmap(set, nr);
3575 } else {
3576 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3577 nr, true);
3578 }
3579 if (ret)
3580 break;
3581 if (q->elevator && q->elevator->type->ops.depth_updated)
3582 q->elevator->type->ops.depth_updated(hctx);
3583 }
3584
3585 if (!ret)
3586 q->nr_requests = nr;
3587
3588 blk_mq_unquiesce_queue(q);
3589 blk_mq_unfreeze_queue(q);
3590
3591 return ret;
3592 }
3593
3594 /*
3595 * request_queue and elevator_type pair.
3596 * It is just used by __blk_mq_update_nr_hw_queues to cache
3597 * the elevator_type associated with a request_queue.
3598 */
3599 struct blk_mq_qe_pair {
3600 struct list_head node;
3601 struct request_queue *q;
3602 struct elevator_type *type;
3603 };
3604
3605 /*
3606 * Cache the elevator_type in qe pair list and switch the
3607 * io scheduler to 'none'
3608 */
3609 static bool blk_mq_elv_switch_none(struct list_head *head,
3610 struct request_queue *q)
3611 {
3612 struct blk_mq_qe_pair *qe;
3613
3614 if (!q->elevator)
3615 return true;
3616
3617 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3618 if (!qe)
3619 return false;
3620
3621 INIT_LIST_HEAD(&qe->node);
3622 qe->q = q;
3623 qe->type = q->elevator->type;
3624 list_add(&qe->node, head);
3625
3626 mutex_lock(&q->sysfs_lock);
3627 /*
3628 * After elevator_switch_mq, the previous elevator_queue will be
3629 * released by elevator_release. The reference of the io scheduler
3630 * module get by elevator_get will also be put. So we need to get
3631 * a reference of the io scheduler module here to prevent it to be
3632 * removed.
3633 */
3634 __module_get(qe->type->elevator_owner);
3635 elevator_switch_mq(q, NULL);
3636 mutex_unlock(&q->sysfs_lock);
3637
3638 return true;
3639 }
3640
3641 static void blk_mq_elv_switch_back(struct list_head *head,
3642 struct request_queue *q)
3643 {
3644 struct blk_mq_qe_pair *qe;
3645 struct elevator_type *t = NULL;
3646
3647 list_for_each_entry(qe, head, node)
3648 if (qe->q == q) {
3649 t = qe->type;
3650 break;
3651 }
3652
3653 if (!t)
3654 return;
3655
3656 list_del(&qe->node);
3657 kfree(qe);
3658
3659 mutex_lock(&q->sysfs_lock);
3660 elevator_switch_mq(q, t);
3661 mutex_unlock(&q->sysfs_lock);
3662 }
3663
3664 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3665 int nr_hw_queues)
3666 {
3667 struct request_queue *q;
3668 LIST_HEAD(head);
3669 int prev_nr_hw_queues;
3670
3671 lockdep_assert_held(&set->tag_list_lock);
3672
3673 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3674 nr_hw_queues = nr_cpu_ids;
3675 if (nr_hw_queues < 1)
3676 return;
3677 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
3678 return;
3679
3680 list_for_each_entry(q, &set->tag_list, tag_set_list)
3681 blk_mq_freeze_queue(q);
3682 /*
3683 * Switch IO scheduler to 'none', cleaning up the data associated
3684 * with the previous scheduler. We will switch back once we are done
3685 * updating the new sw to hw queue mappings.
3686 */
3687 list_for_each_entry(q, &set->tag_list, tag_set_list)
3688 if (!blk_mq_elv_switch_none(&head, q))
3689 goto switch_back;
3690
3691 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3692 blk_mq_debugfs_unregister_hctxs(q);
3693 blk_mq_sysfs_unregister(q);
3694 }
3695
3696 prev_nr_hw_queues = set->nr_hw_queues;
3697 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
3698 0)
3699 goto reregister;
3700
3701 set->nr_hw_queues = nr_hw_queues;
3702 fallback:
3703 blk_mq_update_queue_map(set);
3704 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3705 blk_mq_realloc_hw_ctxs(set, q);
3706 if (q->nr_hw_queues != set->nr_hw_queues) {
3707 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3708 nr_hw_queues, prev_nr_hw_queues);
3709 set->nr_hw_queues = prev_nr_hw_queues;
3710 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3711 goto fallback;
3712 }
3713 blk_mq_map_swqueue(q);
3714 }
3715
3716 reregister:
3717 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3718 blk_mq_sysfs_register(q);
3719 blk_mq_debugfs_register_hctxs(q);
3720 }
3721
3722 switch_back:
3723 list_for_each_entry(q, &set->tag_list, tag_set_list)
3724 blk_mq_elv_switch_back(&head, q);
3725
3726 list_for_each_entry(q, &set->tag_list, tag_set_list)
3727 blk_mq_unfreeze_queue(q);
3728 }
3729
3730 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3731 {
3732 mutex_lock(&set->tag_list_lock);
3733 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3734 mutex_unlock(&set->tag_list_lock);
3735 }
3736 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3737
3738 /* Enable polling stats and return whether they were already enabled. */
3739 static bool blk_poll_stats_enable(struct request_queue *q)
3740 {
3741 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3742 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3743 return true;
3744 blk_stat_add_callback(q, q->poll_cb);
3745 return false;
3746 }
3747
3748 static void blk_mq_poll_stats_start(struct request_queue *q)
3749 {
3750 /*
3751 * We don't arm the callback if polling stats are not enabled or the
3752 * callback is already active.
3753 */
3754 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3755 blk_stat_is_active(q->poll_cb))
3756 return;
3757
3758 blk_stat_activate_msecs(q->poll_cb, 100);
3759 }
3760
3761 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3762 {
3763 struct request_queue *q = cb->data;
3764 int bucket;
3765
3766 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3767 if (cb->stat[bucket].nr_samples)
3768 q->poll_stat[bucket] = cb->stat[bucket];
3769 }
3770 }
3771
3772 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3773 struct request *rq)
3774 {
3775 unsigned long ret = 0;
3776 int bucket;
3777
3778 /*
3779 * If stats collection isn't on, don't sleep but turn it on for
3780 * future users
3781 */
3782 if (!blk_poll_stats_enable(q))
3783 return 0;
3784
3785 /*
3786 * As an optimistic guess, use half of the mean service time
3787 * for this type of request. We can (and should) make this smarter.
3788 * For instance, if the completion latencies are tight, we can
3789 * get closer than just half the mean. This is especially
3790 * important on devices where the completion latencies are longer
3791 * than ~10 usec. We do use the stats for the relevant IO size
3792 * if available which does lead to better estimates.
3793 */
3794 bucket = blk_mq_poll_stats_bkt(rq);
3795 if (bucket < 0)
3796 return ret;
3797
3798 if (q->poll_stat[bucket].nr_samples)
3799 ret = (q->poll_stat[bucket].mean + 1) / 2;
3800
3801 return ret;
3802 }
3803
3804 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3805 struct request *rq)
3806 {
3807 struct hrtimer_sleeper hs;
3808 enum hrtimer_mode mode;
3809 unsigned int nsecs;
3810 ktime_t kt;
3811
3812 if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3813 return false;
3814
3815 /*
3816 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
3817 *
3818 * 0: use half of prev avg
3819 * >0: use this specific value
3820 */
3821 if (q->poll_nsec > 0)
3822 nsecs = q->poll_nsec;
3823 else
3824 nsecs = blk_mq_poll_nsecs(q, rq);
3825
3826 if (!nsecs)
3827 return false;
3828
3829 rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3830
3831 /*
3832 * This will be replaced with the stats tracking code, using
3833 * 'avg_completion_time / 2' as the pre-sleep target.
3834 */
3835 kt = nsecs;
3836
3837 mode = HRTIMER_MODE_REL;
3838 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
3839 hrtimer_set_expires(&hs.timer, kt);
3840
3841 do {
3842 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3843 break;
3844 set_current_state(TASK_UNINTERRUPTIBLE);
3845 hrtimer_sleeper_start_expires(&hs, mode);
3846 if (hs.task)
3847 io_schedule();
3848 hrtimer_cancel(&hs.timer);
3849 mode = HRTIMER_MODE_ABS;
3850 } while (hs.task && !signal_pending(current));
3851
3852 __set_current_state(TASK_RUNNING);
3853 destroy_hrtimer_on_stack(&hs.timer);
3854 return true;
3855 }
3856
3857 static bool blk_mq_poll_hybrid(struct request_queue *q,
3858 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3859 {
3860 struct request *rq;
3861
3862 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3863 return false;
3864
3865 if (!blk_qc_t_is_internal(cookie))
3866 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3867 else {
3868 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3869 /*
3870 * With scheduling, if the request has completed, we'll
3871 * get a NULL return here, as we clear the sched tag when
3872 * that happens. The request still remains valid, like always,
3873 * so we should be safe with just the NULL check.
3874 */
3875 if (!rq)
3876 return false;
3877 }
3878
3879 return blk_mq_poll_hybrid_sleep(q, rq);
3880 }
3881
3882 /**
3883 * blk_poll - poll for IO completions
3884 * @q: the queue
3885 * @cookie: cookie passed back at IO submission time
3886 * @spin: whether to spin for completions
3887 *
3888 * Description:
3889 * Poll for completions on the passed in queue. Returns number of
3890 * completed entries found. If @spin is true, then blk_poll will continue
3891 * looping until at least one completion is found, unless the task is
3892 * otherwise marked running (or we need to reschedule).
3893 */
3894 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3895 {
3896 struct blk_mq_hw_ctx *hctx;
3897 long state;
3898
3899 if (!blk_qc_t_valid(cookie) ||
3900 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3901 return 0;
3902
3903 if (current->plug)
3904 blk_flush_plug_list(current->plug, false);
3905
3906 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3907
3908 /*
3909 * If we sleep, have the caller restart the poll loop to reset
3910 * the state. Like for the other success return cases, the
3911 * caller is responsible for checking if the IO completed. If
3912 * the IO isn't complete, we'll get called again and will go
3913 * straight to the busy poll loop. If specified not to spin,
3914 * we also should not sleep.
3915 */
3916 if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
3917 return 1;
3918
3919 hctx->poll_considered++;
3920
3921 state = current->state;
3922 do {
3923 int ret;
3924
3925 hctx->poll_invoked++;
3926
3927 ret = q->mq_ops->poll(hctx);
3928 if (ret > 0) {
3929 hctx->poll_success++;
3930 __set_current_state(TASK_RUNNING);
3931 return ret;
3932 }
3933
3934 if (signal_pending_state(state, current))
3935 __set_current_state(TASK_RUNNING);
3936
3937 if (current->state == TASK_RUNNING)
3938 return 1;
3939 if (ret < 0 || !spin)
3940 break;
3941 cpu_relax();
3942 } while (!need_resched());
3943
3944 __set_current_state(TASK_RUNNING);
3945 return 0;
3946 }
3947 EXPORT_SYMBOL_GPL(blk_poll);
3948
3949 unsigned int blk_mq_rq_cpu(struct request *rq)
3950 {
3951 return rq->mq_ctx->cpu;
3952 }
3953 EXPORT_SYMBOL(blk_mq_rq_cpu);
3954
3955 static int __init blk_mq_init(void)
3956 {
3957 int i;
3958
3959 for_each_possible_cpu(i)
3960 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3961 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
3962
3963 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
3964 "block/softirq:dead", NULL,
3965 blk_softirq_cpu_dead);
3966 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3967 blk_mq_hctx_notify_dead);
3968 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
3969 blk_mq_hctx_notify_online,
3970 blk_mq_hctx_notify_offline);
3971 return 0;
3972 }
3973 subsys_initcall(blk_mq_init);