2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
13 #define DM_MSG_PREFIX "core-rq"
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues
= DM_MQ_NR_HW_QUEUES
;
18 static unsigned dm_mq_queue_depth
= DM_MQ_QUEUE_DEPTH
;
21 * Request-based DM's mempools' reserved IOs set by the user.
23 #define RESERVED_REQUEST_BASED_IOS 256
24 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
26 #ifdef CONFIG_DM_MQ_DEFAULT
27 static bool use_blk_mq
= true;
29 static bool use_blk_mq
= false;
32 bool dm_use_blk_mq_default(void)
37 bool dm_use_blk_mq(struct mapped_device
*md
)
39 return md
->use_blk_mq
;
41 EXPORT_SYMBOL_GPL(dm_use_blk_mq
);
43 unsigned dm_get_reserved_rq_based_ios(void)
45 return __dm_get_module_param(&reserved_rq_based_ios
,
46 RESERVED_REQUEST_BASED_IOS
, DM_RESERVED_MAX_IOS
);
48 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
50 static unsigned dm_get_blk_mq_nr_hw_queues(void)
52 return __dm_get_module_param(&dm_mq_nr_hw_queues
, 1, 32);
55 static unsigned dm_get_blk_mq_queue_depth(void)
57 return __dm_get_module_param(&dm_mq_queue_depth
,
58 DM_MQ_QUEUE_DEPTH
, BLK_MQ_MAX_DEPTH
);
61 int dm_request_based(struct mapped_device
*md
)
63 return blk_queue_stackable(md
->queue
);
66 static void dm_old_start_queue(struct request_queue
*q
)
70 spin_lock_irqsave(q
->queue_lock
, flags
);
71 if (blk_queue_stopped(q
))
73 spin_unlock_irqrestore(q
->queue_lock
, flags
);
76 void dm_start_queue(struct request_queue
*q
)
79 dm_old_start_queue(q
);
81 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED
, q
);
82 blk_mq_start_stopped_hw_queues(q
, true);
83 blk_mq_kick_requeue_list(q
);
87 static void dm_old_stop_queue(struct request_queue
*q
)
91 spin_lock_irqsave(q
->queue_lock
, flags
);
92 if (blk_queue_stopped(q
)) {
93 spin_unlock_irqrestore(q
->queue_lock
, flags
);
98 spin_unlock_irqrestore(q
->queue_lock
, flags
);
101 void dm_stop_queue(struct request_queue
*q
)
104 dm_old_stop_queue(q
);
106 spin_lock_irq(q
->queue_lock
);
107 queue_flag_set(QUEUE_FLAG_STOPPED
, q
);
108 spin_unlock_irq(q
->queue_lock
);
110 blk_mq_cancel_requeue_work(q
);
111 blk_mq_stop_hw_queues(q
);
115 static struct dm_rq_target_io
*alloc_old_rq_tio(struct mapped_device
*md
,
118 return mempool_alloc(md
->io_pool
, gfp_mask
);
121 static void free_old_rq_tio(struct dm_rq_target_io
*tio
)
123 mempool_free(tio
, tio
->md
->io_pool
);
126 static struct request
*alloc_old_clone_request(struct mapped_device
*md
,
129 return mempool_alloc(md
->rq_pool
, gfp_mask
);
132 static void free_old_clone_request(struct mapped_device
*md
, struct request
*rq
)
134 mempool_free(rq
, md
->rq_pool
);
138 * Partial completion handling for request-based dm
140 static void end_clone_bio(struct bio
*clone
)
142 struct dm_rq_clone_bio_info
*info
=
143 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
144 struct dm_rq_target_io
*tio
= info
->tio
;
145 struct bio
*bio
= info
->orig
;
146 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
147 int error
= clone
->bi_error
;
153 * An error has already been detected on the request.
154 * Once error occurred, just let clone->end_io() handle
160 * Don't notice the error to the upper layer yet.
161 * The error handling decision is made by the target driver,
162 * when the request is completed.
169 * I/O for the bio successfully completed.
170 * Notice the data completion to the upper layer.
174 * bios are processed from the head of the list.
175 * So the completing bio should always be rq->bio.
176 * If it's not, something wrong is happening.
178 if (tio
->orig
->bio
!= bio
)
179 DMERR("bio completion is going in the middle of the request");
182 * Update the original request.
183 * Do not use blk_end_request() here, because it may complete
184 * the original request before the clone, and break the ordering.
186 blk_update_request(tio
->orig
, 0, nr_bytes
);
189 static struct dm_rq_target_io
*tio_from_request(struct request
*rq
)
191 return (rq
->q
->mq_ops
? blk_mq_rq_to_pdu(rq
) : rq
->special
);
194 static void rq_end_stats(struct mapped_device
*md
, struct request
*orig
)
196 if (unlikely(dm_stats_used(&md
->stats
))) {
197 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
198 tio
->duration_jiffies
= jiffies
- tio
->duration_jiffies
;
199 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
200 blk_rq_pos(orig
), tio
->n_sectors
, true,
201 tio
->duration_jiffies
, &tio
->stats_aux
);
206 * Don't touch any member of the md after calling this function because
207 * the md may be freed in dm_put() at the end of this function.
208 * Or do dm_get() before calling this function and dm_put() later.
210 static void rq_completed(struct mapped_device
*md
, int rw
, bool run_queue
)
212 atomic_dec(&md
->pending
[rw
]);
214 /* nudge anyone waiting on suspend queue */
215 if (!md_in_flight(md
))
219 * Run this off this callpath, as drivers could invoke end_io while
220 * inside their request_fn (and holding the queue lock). Calling
221 * back into ->request_fn() could deadlock attempting to grab the
224 if (!md
->queue
->mq_ops
&& run_queue
)
225 blk_run_queue_async(md
->queue
);
228 * dm_put() must be at the end of this function. See the comment above
233 static void free_rq_clone(struct request
*clone
)
235 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
236 struct mapped_device
*md
= tio
->md
;
238 blk_rq_unprep_clone(clone
);
241 * It is possible for a clone_old_rq() allocated clone to
242 * get passed in -- it may not yet have a request_queue.
243 * This is known to occur if the error target replaces
244 * a multipath target that has a request_fn queue stacked
245 * on blk-mq queue(s).
247 if (clone
->q
&& clone
->q
->mq_ops
)
248 /* stacked on blk-mq queue(s) */
249 tio
->ti
->type
->release_clone_rq(clone
);
250 else if (!md
->queue
->mq_ops
)
251 /* request_fn queue stacked on request_fn queue(s) */
252 free_old_clone_request(md
, clone
);
254 if (!md
->queue
->mq_ops
)
255 free_old_rq_tio(tio
);
259 * Complete the clone and the original request.
260 * Must be called without clone's queue lock held,
261 * see end_clone_request() for more details.
263 static void dm_end_request(struct request
*clone
, int error
)
265 int rw
= rq_data_dir(clone
);
266 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
267 struct mapped_device
*md
= tio
->md
;
268 struct request
*rq
= tio
->orig
;
270 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
271 rq
->errors
= clone
->errors
;
272 rq
->resid_len
= clone
->resid_len
;
276 * We are using the sense buffer of the original
278 * So setting the length of the sense data is enough.
280 rq
->sense_len
= clone
->sense_len
;
283 free_rq_clone(clone
);
284 rq_end_stats(md
, rq
);
286 blk_end_request_all(rq
, error
);
288 blk_mq_end_request(rq
, error
);
289 rq_completed(md
, rw
, true);
292 static void dm_unprep_request(struct request
*rq
)
294 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
295 struct request
*clone
= tio
->clone
;
297 if (!rq
->q
->mq_ops
) {
299 rq
->cmd_flags
&= ~REQ_DONTPREP
;
303 free_rq_clone(clone
);
304 else if (!tio
->md
->queue
->mq_ops
)
305 free_old_rq_tio(tio
);
309 * Requeue the original request of a clone.
311 static void dm_old_requeue_request(struct request
*rq
)
313 struct request_queue
*q
= rq
->q
;
316 spin_lock_irqsave(q
->queue_lock
, flags
);
317 blk_requeue_request(q
, rq
);
318 blk_run_queue_async(q
);
319 spin_unlock_irqrestore(q
->queue_lock
, flags
);
322 static void dm_mq_requeue_request(struct request
*rq
)
324 struct request_queue
*q
= rq
->q
;
327 blk_mq_requeue_request(rq
);
328 spin_lock_irqsave(q
->queue_lock
, flags
);
329 if (!blk_queue_stopped(q
))
330 blk_mq_kick_requeue_list(q
);
331 spin_unlock_irqrestore(q
->queue_lock
, flags
);
334 static void dm_requeue_original_request(struct mapped_device
*md
,
337 int rw
= rq_data_dir(rq
);
339 rq_end_stats(md
, rq
);
340 dm_unprep_request(rq
);
343 dm_old_requeue_request(rq
);
345 dm_mq_requeue_request(rq
);
347 rq_completed(md
, rw
, false);
350 static void dm_done(struct request
*clone
, int error
, bool mapped
)
353 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
354 dm_request_endio_fn rq_end_io
= NULL
;
357 rq_end_io
= tio
->ti
->type
->rq_end_io
;
359 if (mapped
&& rq_end_io
)
360 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
363 if (unlikely(r
== -EREMOTEIO
&& (req_op(clone
) == REQ_OP_WRITE_SAME
) &&
364 !clone
->q
->limits
.max_write_same_sectors
))
365 disable_write_same(tio
->md
);
368 /* The target wants to complete the I/O */
369 dm_end_request(clone
, r
);
370 else if (r
== DM_ENDIO_INCOMPLETE
)
371 /* The target will handle the I/O */
373 else if (r
== DM_ENDIO_REQUEUE
)
374 /* The target wants to requeue the I/O */
375 dm_requeue_original_request(tio
->md
, tio
->orig
);
377 DMWARN("unimplemented target endio return value: %d", r
);
383 * Request completion handler for request-based dm
385 static void dm_softirq_done(struct request
*rq
)
388 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
389 struct request
*clone
= tio
->clone
;
393 rq_end_stats(tio
->md
, rq
);
394 rw
= rq_data_dir(rq
);
395 if (!rq
->q
->mq_ops
) {
396 blk_end_request_all(rq
, tio
->error
);
397 rq_completed(tio
->md
, rw
, false);
398 free_old_rq_tio(tio
);
400 blk_mq_end_request(rq
, tio
->error
);
401 rq_completed(tio
->md
, rw
, false);
406 if (rq
->cmd_flags
& REQ_FAILED
)
409 dm_done(clone
, tio
->error
, mapped
);
413 * Complete the clone and the original request with the error status
414 * through softirq context.
416 static void dm_complete_request(struct request
*rq
, int error
)
418 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
422 blk_complete_request(rq
);
424 blk_mq_complete_request(rq
, error
);
428 * Complete the not-mapped clone and the original request with the error status
429 * through softirq context.
430 * Target's rq_end_io() function isn't called.
431 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
433 static void dm_kill_unmapped_request(struct request
*rq
, int error
)
435 rq
->cmd_flags
|= REQ_FAILED
;
436 dm_complete_request(rq
, error
);
440 * Called with the clone's queue lock held (in the case of .request_fn)
442 static void end_clone_request(struct request
*clone
, int error
)
444 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
446 if (!clone
->q
->mq_ops
) {
448 * For just cleaning up the information of the queue in which
449 * the clone was dispatched.
450 * The clone is *NOT* freed actually here because it is alloced
451 * from dm own mempool (REQ_ALLOCED isn't set).
453 __blk_put_request(clone
->q
, clone
);
457 * Actual request completion is done in a softirq context which doesn't
458 * hold the clone's queue lock. Otherwise, deadlock could occur because:
459 * - another request may be submitted by the upper level driver
460 * of the stacking during the completion
461 * - the submission which requires queue lock may be done
462 * against this clone's queue
464 dm_complete_request(tio
->orig
, error
);
467 static void dm_dispatch_clone_request(struct request
*clone
, struct request
*rq
)
471 if (blk_queue_io_stat(clone
->q
))
472 clone
->cmd_flags
|= REQ_IO_STAT
;
474 clone
->start_time
= jiffies
;
475 r
= blk_insert_cloned_request(clone
->q
, clone
);
477 /* must complete clone in terms of original request */
478 dm_complete_request(rq
, r
);
481 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
484 struct dm_rq_target_io
*tio
= data
;
485 struct dm_rq_clone_bio_info
*info
=
486 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
488 info
->orig
= bio_orig
;
490 bio
->bi_end_io
= end_clone_bio
;
495 static int setup_clone(struct request
*clone
, struct request
*rq
,
496 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
500 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, gfp_mask
,
501 dm_rq_bio_constructor
, tio
);
505 clone
->cmd
= rq
->cmd
;
506 clone
->cmd_len
= rq
->cmd_len
;
507 clone
->sense
= rq
->sense
;
508 clone
->end_io
= end_clone_request
;
509 clone
->end_io_data
= tio
;
516 static struct request
*clone_old_rq(struct request
*rq
, struct mapped_device
*md
,
517 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
520 * Create clone for use with .request_fn request_queue
522 struct request
*clone
;
524 clone
= alloc_old_clone_request(md
, gfp_mask
);
528 blk_rq_init(NULL
, clone
);
529 if (setup_clone(clone
, rq
, tio
, gfp_mask
)) {
531 free_old_clone_request(md
, clone
);
538 static void map_tio_request(struct kthread_work
*work
);
540 static void init_tio(struct dm_rq_target_io
*tio
, struct request
*rq
,
541 struct mapped_device
*md
)
549 * Avoid initializing info for blk-mq; it passes
550 * target-specific data through info.ptr
551 * (see: dm_mq_init_request)
553 if (!md
->init_tio_pdu
)
554 memset(&tio
->info
, 0, sizeof(tio
->info
));
555 if (md
->kworker_task
)
556 init_kthread_work(&tio
->work
, map_tio_request
);
559 static struct dm_rq_target_io
*dm_old_prep_tio(struct request
*rq
,
560 struct mapped_device
*md
,
563 struct dm_rq_target_io
*tio
;
565 struct dm_table
*table
;
567 tio
= alloc_old_rq_tio(md
, gfp_mask
);
571 init_tio(tio
, rq
, md
);
573 table
= dm_get_live_table(md
, &srcu_idx
);
575 * Must clone a request if this .request_fn DM device
576 * is stacked on .request_fn device(s).
578 if (!dm_table_all_blk_mq_devices(table
)) {
579 if (!clone_old_rq(rq
, md
, tio
, gfp_mask
)) {
580 dm_put_live_table(md
, srcu_idx
);
581 free_old_rq_tio(tio
);
585 dm_put_live_table(md
, srcu_idx
);
591 * Called with the queue lock held.
593 static int dm_old_prep_fn(struct request_queue
*q
, struct request
*rq
)
595 struct mapped_device
*md
= q
->queuedata
;
596 struct dm_rq_target_io
*tio
;
598 if (unlikely(rq
->special
)) {
599 DMWARN("Already has something in rq->special.");
603 tio
= dm_old_prep_tio(rq
, md
, GFP_ATOMIC
);
605 return BLKPREP_DEFER
;
608 rq
->cmd_flags
|= REQ_DONTPREP
;
615 * 0 : the request has been processed
616 * DM_MAPIO_REQUEUE : the original request needs to be requeued
617 * < 0 : the request was completed due to failure
619 static int map_request(struct dm_rq_target_io
*tio
, struct request
*rq
,
620 struct mapped_device
*md
)
623 struct dm_target
*ti
= tio
->ti
;
624 struct request
*clone
= NULL
;
628 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
630 r
= ti
->type
->clone_and_map_rq(ti
, rq
, &tio
->info
, &clone
);
632 /* The target wants to complete the I/O */
633 dm_kill_unmapped_request(rq
, r
);
636 if (r
!= DM_MAPIO_REMAPPED
)
638 if (setup_clone(clone
, rq
, tio
, GFP_ATOMIC
)) {
640 ti
->type
->release_clone_rq(clone
);
641 return DM_MAPIO_REQUEUE
;
646 case DM_MAPIO_SUBMITTED
:
647 /* The target has taken the I/O to submit by itself later */
649 case DM_MAPIO_REMAPPED
:
650 /* The target has remapped the I/O so dispatch it */
651 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
653 dm_dispatch_clone_request(clone
, rq
);
655 case DM_MAPIO_REQUEUE
:
656 /* The target wants to requeue the I/O */
657 dm_requeue_original_request(md
, tio
->orig
);
661 DMWARN("unimplemented target map return value: %d", r
);
665 /* The target wants to complete the I/O */
666 dm_kill_unmapped_request(rq
, r
);
673 static void dm_start_request(struct mapped_device
*md
, struct request
*orig
)
675 if (!orig
->q
->mq_ops
)
676 blk_start_request(orig
);
678 blk_mq_start_request(orig
);
679 atomic_inc(&md
->pending
[rq_data_dir(orig
)]);
681 if (md
->seq_rq_merge_deadline_usecs
) {
682 md
->last_rq_pos
= rq_end_sector(orig
);
683 md
->last_rq_rw
= rq_data_dir(orig
);
684 md
->last_rq_start_time
= ktime_get();
687 if (unlikely(dm_stats_used(&md
->stats
))) {
688 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
689 tio
->duration_jiffies
= jiffies
;
690 tio
->n_sectors
= blk_rq_sectors(orig
);
691 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
692 blk_rq_pos(orig
), tio
->n_sectors
, false, 0,
697 * Hold the md reference here for the in-flight I/O.
698 * We can't rely on the reference count by device opener,
699 * because the device may be closed during the request completion
700 * when all bios are completed.
701 * See the comment in rq_completed() too.
706 static void map_tio_request(struct kthread_work
*work
)
708 struct dm_rq_target_io
*tio
= container_of(work
, struct dm_rq_target_io
, work
);
709 struct request
*rq
= tio
->orig
;
710 struct mapped_device
*md
= tio
->md
;
712 if (map_request(tio
, rq
, md
) == DM_MAPIO_REQUEUE
)
713 dm_requeue_original_request(md
, rq
);
716 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device
*md
, char *buf
)
718 return sprintf(buf
, "%u\n", md
->seq_rq_merge_deadline_usecs
);
721 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
723 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device
*md
,
724 const char *buf
, size_t count
)
728 if (dm_get_md_type(md
) != DM_TYPE_REQUEST_BASED
)
731 if (kstrtouint(buf
, 10, &deadline
))
734 if (deadline
> MAX_SEQ_RQ_MERGE_DEADLINE_USECS
)
735 deadline
= MAX_SEQ_RQ_MERGE_DEADLINE_USECS
;
737 md
->seq_rq_merge_deadline_usecs
= deadline
;
742 static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device
*md
)
746 if (!md
->seq_rq_merge_deadline_usecs
)
749 kt_deadline
= ns_to_ktime((u64
)md
->seq_rq_merge_deadline_usecs
* NSEC_PER_USEC
);
750 kt_deadline
= ktime_add_safe(md
->last_rq_start_time
, kt_deadline
);
752 return !ktime_after(ktime_get(), kt_deadline
);
756 * q->request_fn for old request-based dm.
757 * Called with the queue lock held.
759 static void dm_old_request_fn(struct request_queue
*q
)
761 struct mapped_device
*md
= q
->queuedata
;
762 struct dm_target
*ti
= md
->immutable_target
;
764 struct dm_rq_target_io
*tio
;
769 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
771 ti
= dm_table_find_target(map
, pos
);
772 dm_put_live_table(md
, srcu_idx
);
776 * For suspend, check blk_queue_stopped() and increment
777 * ->pending within a single queue_lock not to increment the
778 * number of in-flight I/Os after the queue is stopped in
781 while (!blk_queue_stopped(q
)) {
782 rq
= blk_peek_request(q
);
786 /* always use block 0 to find the target for flushes for now */
788 if (req_op(rq
) != REQ_OP_FLUSH
)
789 pos
= blk_rq_pos(rq
);
791 if ((dm_old_request_peeked_before_merge_deadline(md
) &&
792 md_in_flight(md
) && rq
->bio
&& rq
->bio
->bi_vcnt
== 1 &&
793 md
->last_rq_pos
== pos
&& md
->last_rq_rw
== rq_data_dir(rq
)) ||
794 (ti
->type
->busy
&& ti
->type
->busy(ti
))) {
795 blk_delay_queue(q
, 10);
799 dm_start_request(md
, rq
);
801 tio
= tio_from_request(rq
);
802 /* Establish tio->ti before queuing work (map_tio_request) */
804 queue_kthread_work(&md
->kworker
, &tio
->work
);
805 BUG_ON(!irqs_disabled());
810 * Fully initialize a .request_fn request-based queue.
812 int dm_old_init_request_queue(struct mapped_device
*md
)
814 /* Fully initialize the queue */
815 if (!blk_init_allocated_queue(md
->queue
, dm_old_request_fn
, NULL
))
818 /* disable dm_old_request_fn's merge heuristic by default */
819 md
->seq_rq_merge_deadline_usecs
= 0;
821 dm_init_normal_md_queue(md
);
822 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
823 blk_queue_prep_rq(md
->queue
, dm_old_prep_fn
);
825 /* Initialize the request-based DM worker thread */
826 init_kthread_worker(&md
->kworker
);
827 md
->kworker_task
= kthread_run(kthread_worker_fn
, &md
->kworker
,
828 "kdmwork-%s", dm_device_name(md
));
829 if (IS_ERR(md
->kworker_task
))
830 return PTR_ERR(md
->kworker_task
);
832 elv_register_queue(md
->queue
);
837 static int dm_mq_init_request(void *data
, struct request
*rq
,
838 unsigned int hctx_idx
, unsigned int request_idx
,
839 unsigned int numa_node
)
841 struct mapped_device
*md
= data
;
842 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
845 * Must initialize md member of tio, otherwise it won't
846 * be available in dm_mq_queue_rq.
850 if (md
->init_tio_pdu
) {
851 /* target-specific per-io data is immediately after the tio */
852 tio
->info
.ptr
= tio
+ 1;
858 static int dm_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
859 const struct blk_mq_queue_data
*bd
)
861 struct request
*rq
= bd
->rq
;
862 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
863 struct mapped_device
*md
= tio
->md
;
864 struct dm_target
*ti
= md
->immutable_target
;
868 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
870 ti
= dm_table_find_target(map
, 0);
871 dm_put_live_table(md
, srcu_idx
);
875 * On suspend dm_stop_queue() handles stopping the blk-mq
876 * request_queue BUT: even though the hw_queues are marked
877 * BLK_MQ_S_STOPPED at that point there is still a race that
878 * is allowing block/blk-mq.c to call ->queue_rq against a
879 * hctx that it really shouldn't. The following check guards
880 * against this rarity (albeit _not_ race-free).
882 if (unlikely(test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
)))
883 return BLK_MQ_RQ_QUEUE_BUSY
;
885 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
886 return BLK_MQ_RQ_QUEUE_BUSY
;
888 dm_start_request(md
, rq
);
890 /* Init tio using md established in .init_request */
891 init_tio(tio
, rq
, md
);
894 * Establish tio->ti before calling map_request().
898 /* Direct call is fine since .queue_rq allows allocations */
899 if (map_request(tio
, rq
, md
) == DM_MAPIO_REQUEUE
) {
900 /* Undo dm_start_request() before requeuing */
901 rq_end_stats(md
, rq
);
902 rq_completed(md
, rq_data_dir(rq
), false);
903 return BLK_MQ_RQ_QUEUE_BUSY
;
906 return BLK_MQ_RQ_QUEUE_OK
;
909 static struct blk_mq_ops dm_mq_ops
= {
910 .queue_rq
= dm_mq_queue_rq
,
911 .map_queue
= blk_mq_map_queue
,
912 .complete
= dm_softirq_done
,
913 .init_request
= dm_mq_init_request
,
916 int dm_mq_init_request_queue(struct mapped_device
*md
, struct dm_table
*t
)
918 struct request_queue
*q
;
919 struct dm_target
*immutable_tgt
;
922 if (!dm_table_all_blk_mq_devices(t
)) {
923 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
927 md
->tag_set
= kzalloc_node(sizeof(struct blk_mq_tag_set
), GFP_KERNEL
, md
->numa_node_id
);
931 md
->tag_set
->ops
= &dm_mq_ops
;
932 md
->tag_set
->queue_depth
= dm_get_blk_mq_queue_depth();
933 md
->tag_set
->numa_node
= md
->numa_node_id
;
934 md
->tag_set
->flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
935 md
->tag_set
->nr_hw_queues
= dm_get_blk_mq_nr_hw_queues();
936 md
->tag_set
->driver_data
= md
;
938 md
->tag_set
->cmd_size
= sizeof(struct dm_rq_target_io
);
939 immutable_tgt
= dm_table_get_immutable_target(t
);
940 if (immutable_tgt
&& immutable_tgt
->per_io_data_size
) {
941 /* any target-specific per-io data is immediately after the tio */
942 md
->tag_set
->cmd_size
+= immutable_tgt
->per_io_data_size
;
943 md
->init_tio_pdu
= true;
946 err
= blk_mq_alloc_tag_set(md
->tag_set
);
948 goto out_kfree_tag_set
;
950 q
= blk_mq_init_allocated_queue(md
->tag_set
, md
->queue
);
955 dm_init_md_queue(md
);
957 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
958 blk_mq_register_dev(disk_to_dev(md
->disk
), q
);
963 blk_mq_free_tag_set(md
->tag_set
);
970 void dm_mq_cleanup_mapped_device(struct mapped_device
*md
)
973 blk_mq_free_tag_set(md
->tag_set
);
978 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
979 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
981 module_param(use_blk_mq
, bool, S_IRUGO
| S_IWUSR
);
982 MODULE_PARM_DESC(use_blk_mq
, "Use block multiqueue for request-based DM devices");
984 module_param(dm_mq_nr_hw_queues
, uint
, S_IRUGO
| S_IWUSR
);
985 MODULE_PARM_DESC(dm_mq_nr_hw_queues
, "Number of hardware queues for request-based dm-mq devices");
987 module_param(dm_mq_queue_depth
, uint
, S_IRUGO
| S_IWUSR
);
988 MODULE_PARM_DESC(dm_mq_queue_depth
, "Queue depth for request-based dm-mq devices");