2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
13 #define DM_MSG_PREFIX "core-rq"
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues
= DM_MQ_NR_HW_QUEUES
;
18 static unsigned dm_mq_queue_depth
= DM_MQ_QUEUE_DEPTH
;
21 * Request-based DM's mempools' reserved IOs set by the user.
23 #define RESERVED_REQUEST_BASED_IOS 256
24 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
26 unsigned dm_get_reserved_rq_based_ios(void)
28 return __dm_get_module_param(&reserved_rq_based_ios
,
29 RESERVED_REQUEST_BASED_IOS
, DM_RESERVED_MAX_IOS
);
31 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
33 static unsigned dm_get_blk_mq_nr_hw_queues(void)
35 return __dm_get_module_param(&dm_mq_nr_hw_queues
, 1, 32);
38 static unsigned dm_get_blk_mq_queue_depth(void)
40 return __dm_get_module_param(&dm_mq_queue_depth
,
41 DM_MQ_QUEUE_DEPTH
, BLK_MQ_MAX_DEPTH
);
44 int dm_request_based(struct mapped_device
*md
)
46 return queue_is_rq_based(md
->queue
);
49 void dm_start_queue(struct request_queue
*q
)
51 blk_mq_unquiesce_queue(q
);
52 blk_mq_kick_requeue_list(q
);
55 void dm_stop_queue(struct request_queue
*q
)
57 if (blk_mq_queue_stopped(q
))
60 blk_mq_quiesce_queue(q
);
64 * Partial completion handling for request-based dm
66 static void end_clone_bio(struct bio
*clone
)
68 struct dm_rq_clone_bio_info
*info
=
69 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
70 struct dm_rq_target_io
*tio
= info
->tio
;
71 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
72 blk_status_t error
= clone
->bi_status
;
73 bool is_last
= !clone
->bi_next
;
79 * An error has already been detected on the request.
80 * Once error occurred, just let clone->end_io() handle
86 * Don't notice the error to the upper layer yet.
87 * The error handling decision is made by the target driver,
88 * when the request is completed.
95 * I/O for the bio successfully completed.
96 * Notice the data completion to the upper layer.
98 tio
->completed
+= nr_bytes
;
101 * Update the original request.
102 * Do not use blk_end_request() here, because it may complete
103 * the original request before the clone, and break the ordering.
107 blk_update_request(tio
->orig
, BLK_STS_OK
, tio
->completed
);
110 static struct dm_rq_target_io
*tio_from_request(struct request
*rq
)
112 return blk_mq_rq_to_pdu(rq
);
115 static void rq_end_stats(struct mapped_device
*md
, struct request
*orig
)
117 if (unlikely(dm_stats_used(&md
->stats
))) {
118 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
119 tio
->duration_jiffies
= jiffies
- tio
->duration_jiffies
;
120 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
121 blk_rq_pos(orig
), tio
->n_sectors
, true,
122 tio
->duration_jiffies
, &tio
->stats_aux
);
127 * Don't touch any member of the md after calling this function because
128 * the md may be freed in dm_put() at the end of this function.
129 * Or do dm_get() before calling this function and dm_put() later.
131 static void rq_completed(struct mapped_device
*md
, int rw
, bool run_queue
)
133 atomic_dec(&md
->pending
[rw
]);
135 /* nudge anyone waiting on suspend queue */
136 if (!md_in_flight(md
))
140 * dm_put() must be at the end of this function. See the comment above
146 * Complete the clone and the original request.
147 * Must be called without clone's queue lock held,
148 * see end_clone_request() for more details.
150 static void dm_end_request(struct request
*clone
, blk_status_t error
)
152 int rw
= rq_data_dir(clone
);
153 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
154 struct mapped_device
*md
= tio
->md
;
155 struct request
*rq
= tio
->orig
;
157 blk_rq_unprep_clone(clone
);
158 tio
->ti
->type
->release_clone_rq(clone
);
160 rq_end_stats(md
, rq
);
161 blk_mq_end_request(rq
, error
);
162 rq_completed(md
, rw
, true);
165 static void __dm_mq_kick_requeue_list(struct request_queue
*q
, unsigned long msecs
)
167 blk_mq_delay_kick_requeue_list(q
, msecs
);
170 void dm_mq_kick_requeue_list(struct mapped_device
*md
)
172 __dm_mq_kick_requeue_list(dm_get_md_queue(md
), 0);
174 EXPORT_SYMBOL(dm_mq_kick_requeue_list
);
176 static void dm_mq_delay_requeue_request(struct request
*rq
, unsigned long msecs
)
178 blk_mq_requeue_request(rq
, false);
179 __dm_mq_kick_requeue_list(rq
->q
, msecs
);
182 static void dm_requeue_original_request(struct dm_rq_target_io
*tio
, bool delay_requeue
)
184 struct mapped_device
*md
= tio
->md
;
185 struct request
*rq
= tio
->orig
;
186 int rw
= rq_data_dir(rq
);
187 unsigned long delay_ms
= delay_requeue
? 100 : 0;
189 rq_end_stats(md
, rq
);
191 blk_rq_unprep_clone(tio
->clone
);
192 tio
->ti
->type
->release_clone_rq(tio
->clone
);
195 dm_mq_delay_requeue_request(rq
, delay_ms
);
196 rq_completed(md
, rw
, false);
199 static void dm_done(struct request
*clone
, blk_status_t error
, bool mapped
)
201 int r
= DM_ENDIO_DONE
;
202 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
203 dm_request_endio_fn rq_end_io
= NULL
;
206 rq_end_io
= tio
->ti
->type
->rq_end_io
;
208 if (mapped
&& rq_end_io
)
209 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
212 if (unlikely(error
== BLK_STS_TARGET
)) {
213 if (req_op(clone
) == REQ_OP_WRITE_SAME
&&
214 !clone
->q
->limits
.max_write_same_sectors
)
215 disable_write_same(tio
->md
);
216 if (req_op(clone
) == REQ_OP_WRITE_ZEROES
&&
217 !clone
->q
->limits
.max_write_zeroes_sectors
)
218 disable_write_zeroes(tio
->md
);
223 /* The target wants to complete the I/O */
224 dm_end_request(clone
, error
);
226 case DM_ENDIO_INCOMPLETE
:
227 /* The target will handle the I/O */
229 case DM_ENDIO_REQUEUE
:
230 /* The target wants to requeue the I/O */
231 dm_requeue_original_request(tio
, false);
233 case DM_ENDIO_DELAY_REQUEUE
:
234 /* The target wants to requeue the I/O after a delay */
235 dm_requeue_original_request(tio
, true);
238 DMWARN("unimplemented target endio return value: %d", r
);
244 * Request completion handler for request-based dm
246 static void dm_softirq_done(struct request
*rq
)
249 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
250 struct request
*clone
= tio
->clone
;
254 struct mapped_device
*md
= tio
->md
;
256 rq_end_stats(md
, rq
);
257 rw
= rq_data_dir(rq
);
258 blk_mq_end_request(rq
, tio
->error
);
259 rq_completed(md
, rw
, false);
263 if (rq
->rq_flags
& RQF_FAILED
)
266 dm_done(clone
, tio
->error
, mapped
);
270 * Complete the clone and the original request with the error status
271 * through softirq context.
273 static void dm_complete_request(struct request
*rq
, blk_status_t error
)
275 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
278 blk_mq_complete_request(rq
);
282 * Complete the not-mapped clone and the original request with the error status
283 * through softirq context.
284 * Target's rq_end_io() function isn't called.
285 * This may be used when the target's clone_and_map_rq() function fails.
287 static void dm_kill_unmapped_request(struct request
*rq
, blk_status_t error
)
289 rq
->rq_flags
|= RQF_FAILED
;
290 dm_complete_request(rq
, error
);
293 static void end_clone_request(struct request
*clone
, blk_status_t error
)
295 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
297 dm_complete_request(tio
->orig
, error
);
300 static blk_status_t
dm_dispatch_clone_request(struct request
*clone
, struct request
*rq
)
304 if (blk_queue_io_stat(clone
->q
))
305 clone
->rq_flags
|= RQF_IO_STAT
;
307 clone
->start_time_ns
= ktime_get_ns();
308 r
= blk_insert_cloned_request(clone
->q
, clone
);
309 if (r
!= BLK_STS_OK
&& r
!= BLK_STS_RESOURCE
&& r
!= BLK_STS_DEV_RESOURCE
)
310 /* must complete clone in terms of original request */
311 dm_complete_request(rq
, r
);
315 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
318 struct dm_rq_target_io
*tio
= data
;
319 struct dm_rq_clone_bio_info
*info
=
320 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
322 info
->orig
= bio_orig
;
324 bio
->bi_end_io
= end_clone_bio
;
329 static int setup_clone(struct request
*clone
, struct request
*rq
,
330 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
334 r
= blk_rq_prep_clone(clone
, rq
, &tio
->md
->bs
, gfp_mask
,
335 dm_rq_bio_constructor
, tio
);
339 clone
->end_io
= end_clone_request
;
340 clone
->end_io_data
= tio
;
347 static void init_tio(struct dm_rq_target_io
*tio
, struct request
*rq
,
348 struct mapped_device
*md
)
357 * Avoid initializing info for blk-mq; it passes
358 * target-specific data through info.ptr
359 * (see: dm_mq_init_request)
361 if (!md
->init_tio_pdu
)
362 memset(&tio
->info
, 0, sizeof(tio
->info
));
367 * DM_MAPIO_* : the request has been processed as indicated
368 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
369 * < 0 : the request was completed due to failure
371 static int map_request(struct dm_rq_target_io
*tio
)
374 struct dm_target
*ti
= tio
->ti
;
375 struct mapped_device
*md
= tio
->md
;
376 struct request
*rq
= tio
->orig
;
377 struct request
*clone
= NULL
;
380 r
= ti
->type
->clone_and_map_rq(ti
, rq
, &tio
->info
, &clone
);
383 case DM_MAPIO_SUBMITTED
:
384 /* The target has taken the I/O to submit by itself later */
386 case DM_MAPIO_REMAPPED
:
387 if (setup_clone(clone
, rq
, tio
, GFP_ATOMIC
)) {
389 ti
->type
->release_clone_rq(clone
);
390 return DM_MAPIO_REQUEUE
;
393 /* The target has remapped the I/O so dispatch it */
394 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
396 ret
= dm_dispatch_clone_request(clone
, rq
);
397 if (ret
== BLK_STS_RESOURCE
|| ret
== BLK_STS_DEV_RESOURCE
) {
398 blk_rq_unprep_clone(clone
);
399 tio
->ti
->type
->release_clone_rq(clone
);
401 r
= DM_MAPIO_REQUEUE
;
405 case DM_MAPIO_REQUEUE
:
406 /* The target wants to requeue the I/O */
408 case DM_MAPIO_DELAY_REQUEUE
:
409 /* The target wants to requeue the I/O after a delay */
410 dm_requeue_original_request(tio
, true);
413 /* The target wants to complete the I/O */
414 dm_kill_unmapped_request(rq
, BLK_STS_IOERR
);
417 DMWARN("unimplemented target map return value: %d", r
);
424 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
425 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device
*md
, char *buf
)
427 return sprintf(buf
, "%u\n", 0);
430 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device
*md
,
431 const char *buf
, size_t count
)
436 static void dm_start_request(struct mapped_device
*md
, struct request
*orig
)
438 blk_mq_start_request(orig
);
439 atomic_inc(&md
->pending
[rq_data_dir(orig
)]);
441 if (unlikely(dm_stats_used(&md
->stats
))) {
442 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
443 tio
->duration_jiffies
= jiffies
;
444 tio
->n_sectors
= blk_rq_sectors(orig
);
445 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
446 blk_rq_pos(orig
), tio
->n_sectors
, false, 0,
451 * Hold the md reference here for the in-flight I/O.
452 * We can't rely on the reference count by device opener,
453 * because the device may be closed during the request completion
454 * when all bios are completed.
455 * See the comment in rq_completed() too.
460 static int dm_mq_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
461 unsigned int hctx_idx
, unsigned int numa_node
)
463 struct mapped_device
*md
= set
->driver_data
;
464 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
467 * Must initialize md member of tio, otherwise it won't
468 * be available in dm_mq_queue_rq.
472 if (md
->init_tio_pdu
) {
473 /* target-specific per-io data is immediately after the tio */
474 tio
->info
.ptr
= tio
+ 1;
480 static blk_status_t
dm_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
481 const struct blk_mq_queue_data
*bd
)
483 struct request
*rq
= bd
->rq
;
484 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
485 struct mapped_device
*md
= tio
->md
;
486 struct dm_target
*ti
= md
->immutable_target
;
490 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
492 ti
= dm_table_find_target(map
, 0);
493 dm_put_live_table(md
, srcu_idx
);
496 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
497 return BLK_STS_RESOURCE
;
499 dm_start_request(md
, rq
);
501 /* Init tio using md established in .init_request */
502 init_tio(tio
, rq
, md
);
505 * Establish tio->ti before calling map_request().
509 /* Direct call is fine since .queue_rq allows allocations */
510 if (map_request(tio
) == DM_MAPIO_REQUEUE
) {
511 /* Undo dm_start_request() before requeuing */
512 rq_end_stats(md
, rq
);
513 rq_completed(md
, rq_data_dir(rq
), false);
514 return BLK_STS_RESOURCE
;
520 static const struct blk_mq_ops dm_mq_ops
= {
521 .queue_rq
= dm_mq_queue_rq
,
522 .complete
= dm_softirq_done
,
523 .init_request
= dm_mq_init_request
,
526 int dm_mq_init_request_queue(struct mapped_device
*md
, struct dm_table
*t
)
528 struct request_queue
*q
;
529 struct dm_target
*immutable_tgt
;
532 md
->tag_set
= kzalloc_node(sizeof(struct blk_mq_tag_set
), GFP_KERNEL
, md
->numa_node_id
);
536 md
->tag_set
->ops
= &dm_mq_ops
;
537 md
->tag_set
->queue_depth
= dm_get_blk_mq_queue_depth();
538 md
->tag_set
->numa_node
= md
->numa_node_id
;
539 md
->tag_set
->flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
540 md
->tag_set
->nr_hw_queues
= dm_get_blk_mq_nr_hw_queues();
541 md
->tag_set
->driver_data
= md
;
543 md
->tag_set
->cmd_size
= sizeof(struct dm_rq_target_io
);
544 immutable_tgt
= dm_table_get_immutable_target(t
);
545 if (immutable_tgt
&& immutable_tgt
->per_io_data_size
) {
546 /* any target-specific per-io data is immediately after the tio */
547 md
->tag_set
->cmd_size
+= immutable_tgt
->per_io_data_size
;
548 md
->init_tio_pdu
= true;
551 err
= blk_mq_alloc_tag_set(md
->tag_set
);
553 goto out_kfree_tag_set
;
555 q
= blk_mq_init_allocated_queue(md
->tag_set
, md
->queue
);
564 blk_mq_free_tag_set(md
->tag_set
);
571 void dm_mq_cleanup_mapped_device(struct mapped_device
*md
)
574 blk_mq_free_tag_set(md
->tag_set
);
579 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
580 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
582 /* Unused, but preserved for userspace compatibility */
583 static bool use_blk_mq
= true;
584 module_param(use_blk_mq
, bool, S_IRUGO
| S_IWUSR
);
585 MODULE_PARM_DESC(use_blk_mq
, "Use block multiqueue for request-based DM devices");
587 module_param(dm_mq_nr_hw_queues
, uint
, S_IRUGO
| S_IWUSR
);
588 MODULE_PARM_DESC(dm_mq_nr_hw_queues
, "Number of hardware queues for request-based dm-mq devices");
590 module_param(dm_mq_queue_depth
, uint
, S_IRUGO
| S_IWUSR
);
591 MODULE_PARM_DESC(dm_mq_queue_depth
, "Queue depth for request-based dm-mq devices");