2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
13 #define DM_MSG_PREFIX "core-rq"
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues
= DM_MQ_NR_HW_QUEUES
;
18 static unsigned dm_mq_queue_depth
= DM_MQ_QUEUE_DEPTH
;
21 * Request-based DM's mempools' reserved IOs set by the user.
23 #define RESERVED_REQUEST_BASED_IOS 256
24 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
26 static bool use_blk_mq
= IS_ENABLED(CONFIG_DM_MQ_DEFAULT
);
28 bool dm_use_blk_mq_default(void)
33 bool dm_use_blk_mq(struct mapped_device
*md
)
35 return md
->use_blk_mq
;
37 EXPORT_SYMBOL_GPL(dm_use_blk_mq
);
39 unsigned dm_get_reserved_rq_based_ios(void)
41 return __dm_get_module_param(&reserved_rq_based_ios
,
42 RESERVED_REQUEST_BASED_IOS
, DM_RESERVED_MAX_IOS
);
44 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
46 static unsigned dm_get_blk_mq_nr_hw_queues(void)
48 return __dm_get_module_param(&dm_mq_nr_hw_queues
, 1, 32);
51 static unsigned dm_get_blk_mq_queue_depth(void)
53 return __dm_get_module_param(&dm_mq_queue_depth
,
54 DM_MQ_QUEUE_DEPTH
, BLK_MQ_MAX_DEPTH
);
57 int dm_request_based(struct mapped_device
*md
)
59 return blk_queue_stackable(md
->queue
);
62 static void dm_old_start_queue(struct request_queue
*q
)
66 spin_lock_irqsave(q
->queue_lock
, flags
);
67 if (blk_queue_stopped(q
))
69 spin_unlock_irqrestore(q
->queue_lock
, flags
);
72 static void dm_mq_start_queue(struct request_queue
*q
)
74 blk_mq_start_stopped_hw_queues(q
, true);
75 blk_mq_kick_requeue_list(q
);
78 void dm_start_queue(struct request_queue
*q
)
81 dm_old_start_queue(q
);
86 static void dm_old_stop_queue(struct request_queue
*q
)
90 spin_lock_irqsave(q
->queue_lock
, flags
);
91 if (!blk_queue_stopped(q
))
93 spin_unlock_irqrestore(q
->queue_lock
, flags
);
96 static void dm_mq_stop_queue(struct request_queue
*q
)
98 if (blk_mq_queue_stopped(q
))
101 blk_mq_quiesce_queue(q
);
104 void dm_stop_queue(struct request_queue
*q
)
107 dm_old_stop_queue(q
);
113 * Partial completion handling for request-based dm
115 static void end_clone_bio(struct bio
*clone
)
117 struct dm_rq_clone_bio_info
*info
=
118 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
119 struct dm_rq_target_io
*tio
= info
->tio
;
120 struct bio
*bio
= info
->orig
;
121 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
122 int error
= clone
->bi_error
;
128 * An error has already been detected on the request.
129 * Once error occurred, just let clone->end_io() handle
135 * Don't notice the error to the upper layer yet.
136 * The error handling decision is made by the target driver,
137 * when the request is completed.
144 * I/O for the bio successfully completed.
145 * Notice the data completion to the upper layer.
149 * bios are processed from the head of the list.
150 * So the completing bio should always be rq->bio.
151 * If it's not, something wrong is happening.
153 if (tio
->orig
->bio
!= bio
)
154 DMERR("bio completion is going in the middle of the request");
157 * Update the original request.
158 * Do not use blk_end_request() here, because it may complete
159 * the original request before the clone, and break the ordering.
161 blk_update_request(tio
->orig
, 0, nr_bytes
);
164 static struct dm_rq_target_io
*tio_from_request(struct request
*rq
)
166 return blk_mq_rq_to_pdu(rq
);
169 static void rq_end_stats(struct mapped_device
*md
, struct request
*orig
)
171 if (unlikely(dm_stats_used(&md
->stats
))) {
172 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
173 tio
->duration_jiffies
= jiffies
- tio
->duration_jiffies
;
174 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
175 blk_rq_pos(orig
), tio
->n_sectors
, true,
176 tio
->duration_jiffies
, &tio
->stats_aux
);
181 * Don't touch any member of the md after calling this function because
182 * the md may be freed in dm_put() at the end of this function.
183 * Or do dm_get() before calling this function and dm_put() later.
185 static void rq_completed(struct mapped_device
*md
, int rw
, bool run_queue
)
187 struct request_queue
*q
= md
->queue
;
190 atomic_dec(&md
->pending
[rw
]);
192 /* nudge anyone waiting on suspend queue */
193 if (!md_in_flight(md
))
197 * Run this off this callpath, as drivers could invoke end_io while
198 * inside their request_fn (and holding the queue lock). Calling
199 * back into ->request_fn() could deadlock attempting to grab the
202 if (!q
->mq_ops
&& run_queue
) {
203 spin_lock_irqsave(q
->queue_lock
, flags
);
204 blk_run_queue_async(q
);
205 spin_unlock_irqrestore(q
->queue_lock
, flags
);
209 * dm_put() must be at the end of this function. See the comment above
215 * Complete the clone and the original request.
216 * Must be called without clone's queue lock held,
217 * see end_clone_request() for more details.
219 static void dm_end_request(struct request
*clone
, int error
)
221 int rw
= rq_data_dir(clone
);
222 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
223 struct mapped_device
*md
= tio
->md
;
224 struct request
*rq
= tio
->orig
;
226 blk_rq_unprep_clone(clone
);
227 tio
->ti
->type
->release_clone_rq(clone
);
229 rq_end_stats(md
, rq
);
231 blk_end_request_all(rq
, error
);
233 blk_mq_end_request(rq
, error
);
234 rq_completed(md
, rw
, true);
238 * Requeue the original request of a clone.
240 static void dm_old_requeue_request(struct request
*rq
)
242 struct request_queue
*q
= rq
->q
;
245 spin_lock_irqsave(q
->queue_lock
, flags
);
246 blk_requeue_request(q
, rq
);
247 blk_run_queue_async(q
);
248 spin_unlock_irqrestore(q
->queue_lock
, flags
);
251 static void __dm_mq_kick_requeue_list(struct request_queue
*q
, unsigned long msecs
)
253 blk_mq_delay_kick_requeue_list(q
, msecs
);
256 void dm_mq_kick_requeue_list(struct mapped_device
*md
)
258 __dm_mq_kick_requeue_list(dm_get_md_queue(md
), 0);
260 EXPORT_SYMBOL(dm_mq_kick_requeue_list
);
262 static void dm_mq_delay_requeue_request(struct request
*rq
, unsigned long msecs
)
264 blk_mq_requeue_request(rq
, false);
265 __dm_mq_kick_requeue_list(rq
->q
, msecs
);
268 static void dm_requeue_original_request(struct dm_rq_target_io
*tio
, bool delay_requeue
)
270 struct mapped_device
*md
= tio
->md
;
271 struct request
*rq
= tio
->orig
;
272 int rw
= rq_data_dir(rq
);
274 rq_end_stats(md
, rq
);
276 blk_rq_unprep_clone(tio
->clone
);
277 tio
->ti
->type
->release_clone_rq(tio
->clone
);
281 dm_old_requeue_request(rq
);
283 dm_mq_delay_requeue_request(rq
, delay_requeue
? 100/*ms*/ : 0);
285 rq_completed(md
, rw
, false);
288 static void dm_done(struct request
*clone
, int error
, bool mapped
)
290 int r
= DM_ENDIO_DONE
;
291 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
292 dm_request_endio_fn rq_end_io
= NULL
;
295 rq_end_io
= tio
->ti
->type
->rq_end_io
;
297 if (mapped
&& rq_end_io
)
298 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
301 if (unlikely(error
== -EREMOTEIO
)) {
302 if (req_op(clone
) == REQ_OP_WRITE_SAME
&&
303 !clone
->q
->limits
.max_write_same_sectors
)
304 disable_write_same(tio
->md
);
305 if (req_op(clone
) == REQ_OP_WRITE_ZEROES
&&
306 !clone
->q
->limits
.max_write_zeroes_sectors
)
307 disable_write_zeroes(tio
->md
);
312 /* The target wants to complete the I/O */
313 dm_end_request(clone
, error
);
315 case DM_ENDIO_INCOMPLETE
:
316 /* The target will handle the I/O */
318 case DM_ENDIO_REQUEUE
:
319 /* The target wants to requeue the I/O */
320 dm_requeue_original_request(tio
, false);
323 DMWARN("unimplemented target endio return value: %d", r
);
329 * Request completion handler for request-based dm
331 static void dm_softirq_done(struct request
*rq
)
334 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
335 struct request
*clone
= tio
->clone
;
339 struct mapped_device
*md
= tio
->md
;
341 rq_end_stats(md
, rq
);
342 rw
= rq_data_dir(rq
);
344 blk_end_request_all(rq
, tio
->error
);
346 blk_mq_end_request(rq
, tio
->error
);
347 rq_completed(md
, rw
, false);
351 if (rq
->rq_flags
& RQF_FAILED
)
354 dm_done(clone
, tio
->error
, mapped
);
358 * Complete the clone and the original request with the error status
359 * through softirq context.
361 static void dm_complete_request(struct request
*rq
, int error
)
363 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
367 blk_complete_request(rq
);
369 blk_mq_complete_request(rq
);
373 * Complete the not-mapped clone and the original request with the error status
374 * through softirq context.
375 * Target's rq_end_io() function isn't called.
376 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
378 static void dm_kill_unmapped_request(struct request
*rq
, int error
)
380 rq
->rq_flags
|= RQF_FAILED
;
381 dm_complete_request(rq
, error
);
385 * Called with the clone's queue lock held (in the case of .request_fn)
387 static void end_clone_request(struct request
*clone
, int error
)
389 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
392 * Actual request completion is done in a softirq context which doesn't
393 * hold the clone's queue lock. Otherwise, deadlock could occur because:
394 * - another request may be submitted by the upper level driver
395 * of the stacking during the completion
396 * - the submission which requires queue lock may be done
397 * against this clone's queue
399 dm_complete_request(tio
->orig
, error
);
402 static void dm_dispatch_clone_request(struct request
*clone
, struct request
*rq
)
406 if (blk_queue_io_stat(clone
->q
))
407 clone
->rq_flags
|= RQF_IO_STAT
;
409 clone
->start_time
= jiffies
;
410 r
= blk_insert_cloned_request(clone
->q
, clone
);
412 /* must complete clone in terms of original request */
413 dm_complete_request(rq
, r
);
416 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
419 struct dm_rq_target_io
*tio
= data
;
420 struct dm_rq_clone_bio_info
*info
=
421 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
423 info
->orig
= bio_orig
;
425 bio
->bi_end_io
= end_clone_bio
;
430 static int setup_clone(struct request
*clone
, struct request
*rq
,
431 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
435 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, gfp_mask
,
436 dm_rq_bio_constructor
, tio
);
440 clone
->end_io
= end_clone_request
;
441 clone
->end_io_data
= tio
;
448 static void map_tio_request(struct kthread_work
*work
);
450 static void init_tio(struct dm_rq_target_io
*tio
, struct request
*rq
,
451 struct mapped_device
*md
)
459 * Avoid initializing info for blk-mq; it passes
460 * target-specific data through info.ptr
461 * (see: dm_mq_init_request)
463 if (!md
->init_tio_pdu
)
464 memset(&tio
->info
, 0, sizeof(tio
->info
));
465 if (md
->kworker_task
)
466 kthread_init_work(&tio
->work
, map_tio_request
);
471 * DM_MAPIO_* : the request has been processed as indicated
472 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
473 * < 0 : the request was completed due to failure
475 static int map_request(struct dm_rq_target_io
*tio
)
478 struct dm_target
*ti
= tio
->ti
;
479 struct mapped_device
*md
= tio
->md
;
480 struct request
*rq
= tio
->orig
;
481 struct request
*clone
= NULL
;
483 r
= ti
->type
->clone_and_map_rq(ti
, rq
, &tio
->info
, &clone
);
485 case DM_MAPIO_SUBMITTED
:
486 /* The target has taken the I/O to submit by itself later */
488 case DM_MAPIO_REMAPPED
:
489 if (setup_clone(clone
, rq
, tio
, GFP_ATOMIC
)) {
491 ti
->type
->release_clone_rq(clone
);
492 return DM_MAPIO_REQUEUE
;
495 /* The target has remapped the I/O so dispatch it */
496 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
498 dm_dispatch_clone_request(clone
, rq
);
500 case DM_MAPIO_REQUEUE
:
501 /* The target wants to requeue the I/O */
503 case DM_MAPIO_DELAY_REQUEUE
:
504 /* The target wants to requeue the I/O after a delay */
505 dm_requeue_original_request(tio
, true);
508 /* The target wants to complete the I/O */
509 dm_kill_unmapped_request(rq
, -EIO
);
511 DMWARN("unimplemented target map return value: %d", r
);
518 static void dm_start_request(struct mapped_device
*md
, struct request
*orig
)
520 if (!orig
->q
->mq_ops
)
521 blk_start_request(orig
);
523 blk_mq_start_request(orig
);
524 atomic_inc(&md
->pending
[rq_data_dir(orig
)]);
526 if (md
->seq_rq_merge_deadline_usecs
) {
527 md
->last_rq_pos
= rq_end_sector(orig
);
528 md
->last_rq_rw
= rq_data_dir(orig
);
529 md
->last_rq_start_time
= ktime_get();
532 if (unlikely(dm_stats_used(&md
->stats
))) {
533 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
534 tio
->duration_jiffies
= jiffies
;
535 tio
->n_sectors
= blk_rq_sectors(orig
);
536 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
537 blk_rq_pos(orig
), tio
->n_sectors
, false, 0,
542 * Hold the md reference here for the in-flight I/O.
543 * We can't rely on the reference count by device opener,
544 * because the device may be closed during the request completion
545 * when all bios are completed.
546 * See the comment in rq_completed() too.
551 static int __dm_rq_init_rq(struct mapped_device
*md
, struct request
*rq
)
553 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
556 * Must initialize md member of tio, otherwise it won't
557 * be available in dm_mq_queue_rq.
561 if (md
->init_tio_pdu
) {
562 /* target-specific per-io data is immediately after the tio */
563 tio
->info
.ptr
= tio
+ 1;
569 static int dm_rq_init_rq(struct request_queue
*q
, struct request
*rq
, gfp_t gfp
)
571 return __dm_rq_init_rq(q
->rq_alloc_data
, rq
);
574 static void map_tio_request(struct kthread_work
*work
)
576 struct dm_rq_target_io
*tio
= container_of(work
, struct dm_rq_target_io
, work
);
578 if (map_request(tio
) == DM_MAPIO_REQUEUE
)
579 dm_requeue_original_request(tio
, false);
582 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device
*md
, char *buf
)
584 return sprintf(buf
, "%u\n", md
->seq_rq_merge_deadline_usecs
);
587 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
589 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device
*md
,
590 const char *buf
, size_t count
)
594 if (dm_get_md_type(md
) != DM_TYPE_REQUEST_BASED
)
597 if (kstrtouint(buf
, 10, &deadline
))
600 if (deadline
> MAX_SEQ_RQ_MERGE_DEADLINE_USECS
)
601 deadline
= MAX_SEQ_RQ_MERGE_DEADLINE_USECS
;
603 md
->seq_rq_merge_deadline_usecs
= deadline
;
608 static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device
*md
)
612 if (!md
->seq_rq_merge_deadline_usecs
)
615 kt_deadline
= ns_to_ktime((u64
)md
->seq_rq_merge_deadline_usecs
* NSEC_PER_USEC
);
616 kt_deadline
= ktime_add_safe(md
->last_rq_start_time
, kt_deadline
);
618 return !ktime_after(ktime_get(), kt_deadline
);
622 * q->request_fn for old request-based dm.
623 * Called with the queue lock held.
625 static void dm_old_request_fn(struct request_queue
*q
)
627 struct mapped_device
*md
= q
->queuedata
;
628 struct dm_target
*ti
= md
->immutable_target
;
630 struct dm_rq_target_io
*tio
;
635 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
637 if (unlikely(!map
)) {
638 dm_put_live_table(md
, srcu_idx
);
641 ti
= dm_table_find_target(map
, pos
);
642 dm_put_live_table(md
, srcu_idx
);
646 * For suspend, check blk_queue_stopped() and increment
647 * ->pending within a single queue_lock not to increment the
648 * number of in-flight I/Os after the queue is stopped in
651 while (!blk_queue_stopped(q
)) {
652 rq
= blk_peek_request(q
);
656 /* always use block 0 to find the target for flushes for now */
658 if (req_op(rq
) != REQ_OP_FLUSH
)
659 pos
= blk_rq_pos(rq
);
661 if ((dm_old_request_peeked_before_merge_deadline(md
) &&
662 md_in_flight(md
) && rq
->bio
&& !bio_multiple_segments(rq
->bio
) &&
663 md
->last_rq_pos
== pos
&& md
->last_rq_rw
== rq_data_dir(rq
)) ||
664 (ti
->type
->busy
&& ti
->type
->busy(ti
))) {
665 blk_delay_queue(q
, 10);
669 dm_start_request(md
, rq
);
671 tio
= tio_from_request(rq
);
672 init_tio(tio
, rq
, md
);
673 /* Establish tio->ti before queuing work (map_tio_request) */
675 kthread_queue_work(&md
->kworker
, &tio
->work
);
676 BUG_ON(!irqs_disabled());
681 * Fully initialize a .request_fn request-based queue.
683 int dm_old_init_request_queue(struct mapped_device
*md
, struct dm_table
*t
)
685 struct dm_target
*immutable_tgt
;
687 /* Fully initialize the queue */
688 md
->queue
->cmd_size
= sizeof(struct dm_rq_target_io
);
689 md
->queue
->rq_alloc_data
= md
;
690 md
->queue
->request_fn
= dm_old_request_fn
;
691 md
->queue
->init_rq_fn
= dm_rq_init_rq
;
693 immutable_tgt
= dm_table_get_immutable_target(t
);
694 if (immutable_tgt
&& immutable_tgt
->per_io_data_size
) {
695 /* any target-specific per-io data is immediately after the tio */
696 md
->queue
->cmd_size
+= immutable_tgt
->per_io_data_size
;
697 md
->init_tio_pdu
= true;
699 if (blk_init_allocated_queue(md
->queue
) < 0)
702 /* disable dm_old_request_fn's merge heuristic by default */
703 md
->seq_rq_merge_deadline_usecs
= 0;
705 dm_init_normal_md_queue(md
);
706 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
708 /* Initialize the request-based DM worker thread */
709 kthread_init_worker(&md
->kworker
);
710 md
->kworker_task
= kthread_run(kthread_worker_fn
, &md
->kworker
,
711 "kdmwork-%s", dm_device_name(md
));
712 if (IS_ERR(md
->kworker_task
)) {
713 int error
= PTR_ERR(md
->kworker_task
);
714 md
->kworker_task
= NULL
;
718 elv_register_queue(md
->queue
);
723 static int dm_mq_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
724 unsigned int hctx_idx
, unsigned int numa_node
)
726 return __dm_rq_init_rq(set
->driver_data
, rq
);
729 static int dm_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
730 const struct blk_mq_queue_data
*bd
)
732 struct request
*rq
= bd
->rq
;
733 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
734 struct mapped_device
*md
= tio
->md
;
735 struct dm_target
*ti
= md
->immutable_target
;
739 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
741 ti
= dm_table_find_target(map
, 0);
742 dm_put_live_table(md
, srcu_idx
);
745 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
746 return BLK_MQ_RQ_QUEUE_BUSY
;
748 dm_start_request(md
, rq
);
750 /* Init tio using md established in .init_request */
751 init_tio(tio
, rq
, md
);
754 * Establish tio->ti before calling map_request().
758 /* Direct call is fine since .queue_rq allows allocations */
759 if (map_request(tio
) == DM_MAPIO_REQUEUE
) {
760 /* Undo dm_start_request() before requeuing */
761 rq_end_stats(md
, rq
);
762 rq_completed(md
, rq_data_dir(rq
), false);
763 blk_mq_delay_run_hw_queue(hctx
, 100/*ms*/);
764 return BLK_MQ_RQ_QUEUE_BUSY
;
767 return BLK_MQ_RQ_QUEUE_OK
;
770 static const struct blk_mq_ops dm_mq_ops
= {
771 .queue_rq
= dm_mq_queue_rq
,
772 .complete
= dm_softirq_done
,
773 .init_request
= dm_mq_init_request
,
776 int dm_mq_init_request_queue(struct mapped_device
*md
, struct dm_table
*t
)
778 struct request_queue
*q
;
779 struct dm_target
*immutable_tgt
;
782 if (!dm_table_all_blk_mq_devices(t
)) {
783 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
787 md
->tag_set
= kzalloc_node(sizeof(struct blk_mq_tag_set
), GFP_KERNEL
, md
->numa_node_id
);
791 md
->tag_set
->ops
= &dm_mq_ops
;
792 md
->tag_set
->queue_depth
= dm_get_blk_mq_queue_depth();
793 md
->tag_set
->numa_node
= md
->numa_node_id
;
794 md
->tag_set
->flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
795 md
->tag_set
->nr_hw_queues
= dm_get_blk_mq_nr_hw_queues();
796 md
->tag_set
->driver_data
= md
;
798 md
->tag_set
->cmd_size
= sizeof(struct dm_rq_target_io
);
799 immutable_tgt
= dm_table_get_immutable_target(t
);
800 if (immutable_tgt
&& immutable_tgt
->per_io_data_size
) {
801 /* any target-specific per-io data is immediately after the tio */
802 md
->tag_set
->cmd_size
+= immutable_tgt
->per_io_data_size
;
803 md
->init_tio_pdu
= true;
806 err
= blk_mq_alloc_tag_set(md
->tag_set
);
808 goto out_kfree_tag_set
;
810 q
= blk_mq_init_allocated_queue(md
->tag_set
, md
->queue
);
815 dm_init_md_queue(md
);
817 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
818 err
= blk_mq_register_dev(disk_to_dev(md
->disk
), q
);
820 goto out_cleanup_queue
;
825 blk_cleanup_queue(q
);
827 blk_mq_free_tag_set(md
->tag_set
);
834 void dm_mq_cleanup_mapped_device(struct mapped_device
*md
)
837 blk_mq_free_tag_set(md
->tag_set
);
842 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
843 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
845 module_param(use_blk_mq
, bool, S_IRUGO
| S_IWUSR
);
846 MODULE_PARM_DESC(use_blk_mq
, "Use block multiqueue for request-based DM devices");
848 module_param(dm_mq_nr_hw_queues
, uint
, S_IRUGO
| S_IWUSR
);
849 MODULE_PARM_DESC(dm_mq_nr_hw_queues
, "Number of hardware queues for request-based dm-mq devices");
851 module_param(dm_mq_queue_depth
, uint
, S_IRUGO
| S_IWUSR
);
852 MODULE_PARM_DESC(dm_mq_queue_depth
, "Queue depth for request-based dm-mq devices");