]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/md/dm-rq.c
dm rq: reduce arguments passed to map_request() and dm_requeue_original_request()
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-rq.c
CommitLineData
4cc96131
MS
1/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"
9
10#include <linux/elevator.h> /* for rq_end_sector() */
11#include <linux/blk-mq.h>
12
13#define DM_MSG_PREFIX "core-rq"
14
15#define DM_MQ_NR_HW_QUEUES 1
16#define DM_MQ_QUEUE_DEPTH 2048
17static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
19
20/*
21 * Request-based DM's mempools' reserved IOs set by the user.
22 */
23#define RESERVED_REQUEST_BASED_IOS 256
24static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25
26#ifdef CONFIG_DM_MQ_DEFAULT
27static bool use_blk_mq = true;
28#else
29static bool use_blk_mq = false;
30#endif
31
32bool dm_use_blk_mq_default(void)
33{
34 return use_blk_mq;
35}
36
37bool dm_use_blk_mq(struct mapped_device *md)
38{
39 return md->use_blk_mq;
40}
41EXPORT_SYMBOL_GPL(dm_use_blk_mq);
42
43unsigned dm_get_reserved_rq_based_ios(void)
44{
45 return __dm_get_module_param(&reserved_rq_based_ios,
46 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
47}
48EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
49
50static unsigned dm_get_blk_mq_nr_hw_queues(void)
51{
52 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
53}
54
55static unsigned dm_get_blk_mq_queue_depth(void)
56{
57 return __dm_get_module_param(&dm_mq_queue_depth,
58 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
59}
60
61int dm_request_based(struct mapped_device *md)
62{
63 return blk_queue_stackable(md->queue);
64}
65
66static void dm_old_start_queue(struct request_queue *q)
67{
68 unsigned long flags;
69
70 spin_lock_irqsave(q->queue_lock, flags);
71 if (blk_queue_stopped(q))
72 blk_start_queue(q);
73 spin_unlock_irqrestore(q->queue_lock, flags);
74}
75
9dbeaeab
MS
76static void dm_mq_start_queue(struct request_queue *q)
77{
78 unsigned long flags;
79
80 spin_lock_irqsave(q->queue_lock, flags);
81 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
82 spin_unlock_irqrestore(q->queue_lock, flags);
83
84 blk_mq_start_stopped_hw_queues(q, true);
85 blk_mq_kick_requeue_list(q);
86}
87
4cc96131
MS
88void dm_start_queue(struct request_queue *q)
89{
90 if (!q->mq_ops)
91 dm_old_start_queue(q);
9dbeaeab
MS
92 else
93 dm_mq_start_queue(q);
4cc96131
MS
94}
95
96static void dm_old_stop_queue(struct request_queue *q)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(q->queue_lock, flags);
c533f249
BVA
101 if (!blk_queue_stopped(q))
102 blk_stop_queue(q);
4cc96131
MS
103 spin_unlock_irqrestore(q->queue_lock, flags);
104}
105
2397a15a
BVA
106static void dm_mq_stop_queue(struct request_queue *q)
107{
108 unsigned long flags;
109
110 spin_lock_irqsave(q->queue_lock, flags);
111 if (blk_queue_stopped(q)) {
112 spin_unlock_irqrestore(q->queue_lock, flags);
113 return;
114 }
115
116 queue_flag_set(QUEUE_FLAG_STOPPED, q);
117 spin_unlock_irqrestore(q->queue_lock, flags);
118
119 /* Avoid that requeuing could restart the queue. */
120 blk_mq_cancel_requeue_work(q);
121 blk_mq_stop_hw_queues(q);
122}
123
4cc96131
MS
124void dm_stop_queue(struct request_queue *q)
125{
126 if (!q->mq_ops)
127 dm_old_stop_queue(q);
2397a15a
BVA
128 else
129 dm_mq_stop_queue(q);
4cc96131
MS
130}
131
132static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
133 gfp_t gfp_mask)
134{
135 return mempool_alloc(md->io_pool, gfp_mask);
136}
137
138static void free_old_rq_tio(struct dm_rq_target_io *tio)
139{
140 mempool_free(tio, tio->md->io_pool);
141}
142
143static struct request *alloc_old_clone_request(struct mapped_device *md,
144 gfp_t gfp_mask)
145{
146 return mempool_alloc(md->rq_pool, gfp_mask);
147}
148
149static void free_old_clone_request(struct mapped_device *md, struct request *rq)
150{
151 mempool_free(rq, md->rq_pool);
152}
153
154/*
155 * Partial completion handling for request-based dm
156 */
157static void end_clone_bio(struct bio *clone)
158{
159 struct dm_rq_clone_bio_info *info =
160 container_of(clone, struct dm_rq_clone_bio_info, clone);
161 struct dm_rq_target_io *tio = info->tio;
162 struct bio *bio = info->orig;
163 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
164 int error = clone->bi_error;
165
166 bio_put(clone);
167
168 if (tio->error)
169 /*
170 * An error has already been detected on the request.
171 * Once error occurred, just let clone->end_io() handle
172 * the remainder.
173 */
174 return;
175 else if (error) {
176 /*
177 * Don't notice the error to the upper layer yet.
178 * The error handling decision is made by the target driver,
179 * when the request is completed.
180 */
181 tio->error = error;
182 return;
183 }
184
185 /*
186 * I/O for the bio successfully completed.
187 * Notice the data completion to the upper layer.
188 */
189
190 /*
191 * bios are processed from the head of the list.
192 * So the completing bio should always be rq->bio.
193 * If it's not, something wrong is happening.
194 */
195 if (tio->orig->bio != bio)
196 DMERR("bio completion is going in the middle of the request");
197
198 /*
199 * Update the original request.
200 * Do not use blk_end_request() here, because it may complete
201 * the original request before the clone, and break the ordering.
202 */
203 blk_update_request(tio->orig, 0, nr_bytes);
204}
205
206static struct dm_rq_target_io *tio_from_request(struct request *rq)
207{
208 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
209}
210
211static void rq_end_stats(struct mapped_device *md, struct request *orig)
212{
213 if (unlikely(dm_stats_used(&md->stats))) {
214 struct dm_rq_target_io *tio = tio_from_request(orig);
215 tio->duration_jiffies = jiffies - tio->duration_jiffies;
216 dm_stats_account_io(&md->stats, rq_data_dir(orig),
217 blk_rq_pos(orig), tio->n_sectors, true,
218 tio->duration_jiffies, &tio->stats_aux);
219 }
220}
221
222/*
223 * Don't touch any member of the md after calling this function because
224 * the md may be freed in dm_put() at the end of this function.
225 * Or do dm_get() before calling this function and dm_put() later.
226 */
227static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
228{
229 atomic_dec(&md->pending[rw]);
230
231 /* nudge anyone waiting on suspend queue */
232 if (!md_in_flight(md))
233 wake_up(&md->wait);
234
235 /*
236 * Run this off this callpath, as drivers could invoke end_io while
237 * inside their request_fn (and holding the queue lock). Calling
238 * back into ->request_fn() could deadlock attempting to grab the
239 * queue lock again.
240 */
241 if (!md->queue->mq_ops && run_queue)
242 blk_run_queue_async(md->queue);
243
244 /*
245 * dm_put() must be at the end of this function. See the comment above
246 */
247 dm_put(md);
248}
249
250static void free_rq_clone(struct request *clone)
251{
252 struct dm_rq_target_io *tio = clone->end_io_data;
253 struct mapped_device *md = tio->md;
254
255 blk_rq_unprep_clone(clone);
256
e83068a5
MS
257 /*
258 * It is possible for a clone_old_rq() allocated clone to
259 * get passed in -- it may not yet have a request_queue.
260 * This is known to occur if the error target replaces
261 * a multipath target that has a request_fn queue stacked
262 * on blk-mq queue(s).
263 */
264 if (clone->q && clone->q->mq_ops)
4cc96131
MS
265 /* stacked on blk-mq queue(s) */
266 tio->ti->type->release_clone_rq(clone);
267 else if (!md->queue->mq_ops)
268 /* request_fn queue stacked on request_fn queue(s) */
269 free_old_clone_request(md, clone);
270
271 if (!md->queue->mq_ops)
272 free_old_rq_tio(tio);
273}
274
275/*
276 * Complete the clone and the original request.
277 * Must be called without clone's queue lock held,
278 * see end_clone_request() for more details.
279 */
280static void dm_end_request(struct request *clone, int error)
281{
282 int rw = rq_data_dir(clone);
283 struct dm_rq_target_io *tio = clone->end_io_data;
284 struct mapped_device *md = tio->md;
285 struct request *rq = tio->orig;
286
287 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
288 rq->errors = clone->errors;
289 rq->resid_len = clone->resid_len;
290
291 if (rq->sense)
292 /*
293 * We are using the sense buffer of the original
294 * request.
295 * So setting the length of the sense data is enough.
296 */
297 rq->sense_len = clone->sense_len;
298 }
299
300 free_rq_clone(clone);
301 rq_end_stats(md, rq);
302 if (!rq->q->mq_ops)
303 blk_end_request_all(rq, error);
304 else
305 blk_mq_end_request(rq, error);
306 rq_completed(md, rw, true);
307}
308
309static void dm_unprep_request(struct request *rq)
310{
311 struct dm_rq_target_io *tio = tio_from_request(rq);
312 struct request *clone = tio->clone;
313
314 if (!rq->q->mq_ops) {
315 rq->special = NULL;
316 rq->cmd_flags &= ~REQ_DONTPREP;
317 }
318
319 if (clone)
320 free_rq_clone(clone);
321 else if (!tio->md->queue->mq_ops)
322 free_old_rq_tio(tio);
323}
324
325/*
326 * Requeue the original request of a clone.
327 */
328static void dm_old_requeue_request(struct request *rq)
329{
330 struct request_queue *q = rq->q;
331 unsigned long flags;
332
333 spin_lock_irqsave(q->queue_lock, flags);
334 blk_requeue_request(q, rq);
335 blk_run_queue_async(q);
336 spin_unlock_irqrestore(q->queue_lock, flags);
337}
338
a8ac51e4 339static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
4cc96131
MS
340{
341 struct request_queue *q = rq->q;
342 unsigned long flags;
343
344 blk_mq_requeue_request(rq);
a8ac51e4 345
4cc96131
MS
346 spin_lock_irqsave(q->queue_lock, flags);
347 if (!blk_queue_stopped(q))
a8ac51e4 348 blk_mq_delay_kick_requeue_list(q, msecs);
4cc96131
MS
349 spin_unlock_irqrestore(q->queue_lock, flags);
350}
351
fbc39b4c 352static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
4cc96131 353{
fbc39b4c
MS
354 struct mapped_device *md = tio->md;
355 struct request *rq = tio->orig;
4cc96131
MS
356 int rw = rq_data_dir(rq);
357
358 rq_end_stats(md, rq);
359 dm_unprep_request(rq);
360
361 if (!rq->q->mq_ops)
362 dm_old_requeue_request(rq);
363 else
a8ac51e4 364 dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
4cc96131
MS
365
366 rq_completed(md, rw, false);
367}
368
369static void dm_done(struct request *clone, int error, bool mapped)
370{
371 int r = error;
372 struct dm_rq_target_io *tio = clone->end_io_data;
373 dm_request_endio_fn rq_end_io = NULL;
374
375 if (tio->ti) {
376 rq_end_io = tio->ti->type->rq_end_io;
377
378 if (mapped && rq_end_io)
379 r = rq_end_io(tio->ti, clone, error, &tio->info);
380 }
381
382 if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
383 !clone->q->limits.max_write_same_sectors))
384 disable_write_same(tio->md);
385
386 if (r <= 0)
387 /* The target wants to complete the I/O */
388 dm_end_request(clone, r);
389 else if (r == DM_ENDIO_INCOMPLETE)
390 /* The target will handle the I/O */
391 return;
392 else if (r == DM_ENDIO_REQUEUE)
393 /* The target wants to requeue the I/O */
fbc39b4c 394 dm_requeue_original_request(tio, false);
4cc96131
MS
395 else {
396 DMWARN("unimplemented target endio return value: %d", r);
397 BUG();
398 }
399}
400
401/*
402 * Request completion handler for request-based dm
403 */
404static void dm_softirq_done(struct request *rq)
405{
406 bool mapped = true;
407 struct dm_rq_target_io *tio = tio_from_request(rq);
408 struct request *clone = tio->clone;
409 int rw;
410
411 if (!clone) {
412 rq_end_stats(tio->md, rq);
413 rw = rq_data_dir(rq);
414 if (!rq->q->mq_ops) {
415 blk_end_request_all(rq, tio->error);
416 rq_completed(tio->md, rw, false);
417 free_old_rq_tio(tio);
418 } else {
419 blk_mq_end_request(rq, tio->error);
420 rq_completed(tio->md, rw, false);
421 }
422 return;
423 }
424
425 if (rq->cmd_flags & REQ_FAILED)
426 mapped = false;
427
428 dm_done(clone, tio->error, mapped);
429}
430
431/*
432 * Complete the clone and the original request with the error status
433 * through softirq context.
434 */
435static void dm_complete_request(struct request *rq, int error)
436{
437 struct dm_rq_target_io *tio = tio_from_request(rq);
438
439 tio->error = error;
440 if (!rq->q->mq_ops)
441 blk_complete_request(rq);
442 else
443 blk_mq_complete_request(rq, error);
444}
445
446/*
447 * Complete the not-mapped clone and the original request with the error status
448 * through softirq context.
449 * Target's rq_end_io() function isn't called.
450 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
451 */
452static void dm_kill_unmapped_request(struct request *rq, int error)
453{
454 rq->cmd_flags |= REQ_FAILED;
455 dm_complete_request(rq, error);
456}
457
458/*
459 * Called with the clone's queue lock held (in the case of .request_fn)
460 */
461static void end_clone_request(struct request *clone, int error)
462{
463 struct dm_rq_target_io *tio = clone->end_io_data;
464
465 if (!clone->q->mq_ops) {
466 /*
467 * For just cleaning up the information of the queue in which
468 * the clone was dispatched.
469 * The clone is *NOT* freed actually here because it is alloced
470 * from dm own mempool (REQ_ALLOCED isn't set).
471 */
472 __blk_put_request(clone->q, clone);
473 }
474
475 /*
476 * Actual request completion is done in a softirq context which doesn't
477 * hold the clone's queue lock. Otherwise, deadlock could occur because:
478 * - another request may be submitted by the upper level driver
479 * of the stacking during the completion
480 * - the submission which requires queue lock may be done
481 * against this clone's queue
482 */
483 dm_complete_request(tio->orig, error);
484}
485
486static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
487{
488 int r;
489
490 if (blk_queue_io_stat(clone->q))
491 clone->cmd_flags |= REQ_IO_STAT;
492
493 clone->start_time = jiffies;
494 r = blk_insert_cloned_request(clone->q, clone);
495 if (r)
496 /* must complete clone in terms of original request */
497 dm_complete_request(rq, r);
498}
499
500static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
501 void *data)
502{
503 struct dm_rq_target_io *tio = data;
504 struct dm_rq_clone_bio_info *info =
505 container_of(bio, struct dm_rq_clone_bio_info, clone);
506
507 info->orig = bio_orig;
508 info->tio = tio;
509 bio->bi_end_io = end_clone_bio;
510
511 return 0;
512}
513
514static int setup_clone(struct request *clone, struct request *rq,
515 struct dm_rq_target_io *tio, gfp_t gfp_mask)
516{
517 int r;
518
519 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
520 dm_rq_bio_constructor, tio);
521 if (r)
522 return r;
523
524 clone->cmd = rq->cmd;
525 clone->cmd_len = rq->cmd_len;
526 clone->sense = rq->sense;
527 clone->end_io = end_clone_request;
528 clone->end_io_data = tio;
529
530 tio->clone = clone;
531
532 return 0;
533}
534
535static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
536 struct dm_rq_target_io *tio, gfp_t gfp_mask)
537{
538 /*
539 * Create clone for use with .request_fn request_queue
540 */
541 struct request *clone;
542
543 clone = alloc_old_clone_request(md, gfp_mask);
544 if (!clone)
545 return NULL;
546
547 blk_rq_init(NULL, clone);
548 if (setup_clone(clone, rq, tio, gfp_mask)) {
549 /* -ENOMEM */
550 free_old_clone_request(md, clone);
551 return NULL;
552 }
553
554 return clone;
555}
556
557static void map_tio_request(struct kthread_work *work);
558
559static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
560 struct mapped_device *md)
561{
562 tio->md = md;
563 tio->ti = NULL;
564 tio->clone = NULL;
565 tio->orig = rq;
566 tio->error = 0;
567 /*
568 * Avoid initializing info for blk-mq; it passes
569 * target-specific data through info.ptr
570 * (see: dm_mq_init_request)
571 */
572 if (!md->init_tio_pdu)
573 memset(&tio->info, 0, sizeof(tio->info));
574 if (md->kworker_task)
575 init_kthread_work(&tio->work, map_tio_request);
576}
577
578static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
579 struct mapped_device *md,
580 gfp_t gfp_mask)
581{
582 struct dm_rq_target_io *tio;
583 int srcu_idx;
584 struct dm_table *table;
585
586 tio = alloc_old_rq_tio(md, gfp_mask);
587 if (!tio)
588 return NULL;
589
590 init_tio(tio, rq, md);
591
592 table = dm_get_live_table(md, &srcu_idx);
593 /*
594 * Must clone a request if this .request_fn DM device
595 * is stacked on .request_fn device(s).
596 */
e83068a5 597 if (!dm_table_all_blk_mq_devices(table)) {
4cc96131
MS
598 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
599 dm_put_live_table(md, srcu_idx);
600 free_old_rq_tio(tio);
601 return NULL;
602 }
603 }
604 dm_put_live_table(md, srcu_idx);
605
606 return tio;
607}
608
609/*
610 * Called with the queue lock held.
611 */
612static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
613{
614 struct mapped_device *md = q->queuedata;
615 struct dm_rq_target_io *tio;
616
617 if (unlikely(rq->special)) {
618 DMWARN("Already has something in rq->special.");
619 return BLKPREP_KILL;
620 }
621
622 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
623 if (!tio)
624 return BLKPREP_DEFER;
625
626 rq->special = tio;
627 rq->cmd_flags |= REQ_DONTPREP;
628
629 return BLKPREP_OK;
630}
631
632/*
633 * Returns:
a8ac51e4
MS
634 * DM_MAPIO_* : the request has been processed as indicated
635 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
4cc96131
MS
636 * < 0 : the request was completed due to failure
637 */
fbc39b4c 638static int map_request(struct dm_rq_target_io *tio)
4cc96131
MS
639{
640 int r;
641 struct dm_target *ti = tio->ti;
fbc39b4c
MS
642 struct mapped_device *md = tio->md;
643 struct request *rq = tio->orig;
4cc96131
MS
644 struct request *clone = NULL;
645
646 if (tio->clone) {
647 clone = tio->clone;
648 r = ti->type->map_rq(ti, clone, &tio->info);
a8ac51e4
MS
649 if (r == DM_MAPIO_DELAY_REQUEUE)
650 return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
4cc96131
MS
651 } else {
652 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
653 if (r < 0) {
654 /* The target wants to complete the I/O */
655 dm_kill_unmapped_request(rq, r);
656 return r;
657 }
a8ac51e4
MS
658 if (r == DM_MAPIO_REMAPPED &&
659 setup_clone(clone, rq, tio, GFP_ATOMIC)) {
4cc96131
MS
660 /* -ENOMEM */
661 ti->type->release_clone_rq(clone);
662 return DM_MAPIO_REQUEUE;
663 }
664 }
665
666 switch (r) {
667 case DM_MAPIO_SUBMITTED:
668 /* The target has taken the I/O to submit by itself later */
669 break;
670 case DM_MAPIO_REMAPPED:
671 /* The target has remapped the I/O so dispatch it */
672 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
673 blk_rq_pos(rq));
674 dm_dispatch_clone_request(clone, rq);
675 break;
676 case DM_MAPIO_REQUEUE:
677 /* The target wants to requeue the I/O */
a8ac51e4
MS
678 break;
679 case DM_MAPIO_DELAY_REQUEUE:
680 /* The target wants to requeue the I/O after a delay */
fbc39b4c 681 dm_requeue_original_request(tio, true);
4cc96131
MS
682 break;
683 default:
684 if (r > 0) {
685 DMWARN("unimplemented target map return value: %d", r);
686 BUG();
687 }
688
689 /* The target wants to complete the I/O */
690 dm_kill_unmapped_request(rq, r);
4cc96131
MS
691 }
692
a8ac51e4 693 return r;
4cc96131
MS
694}
695
696static void dm_start_request(struct mapped_device *md, struct request *orig)
697{
698 if (!orig->q->mq_ops)
699 blk_start_request(orig);
700 else
701 blk_mq_start_request(orig);
702 atomic_inc(&md->pending[rq_data_dir(orig)]);
703
704 if (md->seq_rq_merge_deadline_usecs) {
705 md->last_rq_pos = rq_end_sector(orig);
706 md->last_rq_rw = rq_data_dir(orig);
707 md->last_rq_start_time = ktime_get();
708 }
709
710 if (unlikely(dm_stats_used(&md->stats))) {
711 struct dm_rq_target_io *tio = tio_from_request(orig);
712 tio->duration_jiffies = jiffies;
713 tio->n_sectors = blk_rq_sectors(orig);
714 dm_stats_account_io(&md->stats, rq_data_dir(orig),
715 blk_rq_pos(orig), tio->n_sectors, false, 0,
716 &tio->stats_aux);
717 }
718
719 /*
720 * Hold the md reference here for the in-flight I/O.
721 * We can't rely on the reference count by device opener,
722 * because the device may be closed during the request completion
723 * when all bios are completed.
724 * See the comment in rq_completed() too.
725 */
726 dm_get(md);
727}
728
729static void map_tio_request(struct kthread_work *work)
730{
731 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
4cc96131 732
fbc39b4c
MS
733 if (map_request(tio) == DM_MAPIO_REQUEUE)
734 dm_requeue_original_request(tio, false);
4cc96131
MS
735}
736
737ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
738{
739 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
740}
741
742#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
743
744ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
745 const char *buf, size_t count)
746{
747 unsigned deadline;
748
e83068a5 749 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
4cc96131
MS
750 return count;
751
752 if (kstrtouint(buf, 10, &deadline))
753 return -EINVAL;
754
755 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
756 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
757
758 md->seq_rq_merge_deadline_usecs = deadline;
759
760 return count;
761}
762
763static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
764{
765 ktime_t kt_deadline;
766
767 if (!md->seq_rq_merge_deadline_usecs)
768 return false;
769
770 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
771 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
772
773 return !ktime_after(ktime_get(), kt_deadline);
774}
775
776/*
777 * q->request_fn for old request-based dm.
778 * Called with the queue lock held.
779 */
780static void dm_old_request_fn(struct request_queue *q)
781{
782 struct mapped_device *md = q->queuedata;
783 struct dm_target *ti = md->immutable_target;
784 struct request *rq;
785 struct dm_rq_target_io *tio;
786 sector_t pos = 0;
787
788 if (unlikely(!ti)) {
789 int srcu_idx;
790 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
791
792 ti = dm_table_find_target(map, pos);
793 dm_put_live_table(md, srcu_idx);
794 }
795
796 /*
797 * For suspend, check blk_queue_stopped() and increment
798 * ->pending within a single queue_lock not to increment the
799 * number of in-flight I/Os after the queue is stopped in
800 * dm_suspend().
801 */
802 while (!blk_queue_stopped(q)) {
803 rq = blk_peek_request(q);
804 if (!rq)
805 return;
806
807 /* always use block 0 to find the target for flushes for now */
808 pos = 0;
809 if (req_op(rq) != REQ_OP_FLUSH)
810 pos = blk_rq_pos(rq);
811
812 if ((dm_old_request_peeked_before_merge_deadline(md) &&
813 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
814 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
815 (ti->type->busy && ti->type->busy(ti))) {
bd9f55ea 816 blk_delay_queue(q, 10);
4cc96131
MS
817 return;
818 }
819
820 dm_start_request(md, rq);
821
822 tio = tio_from_request(rq);
823 /* Establish tio->ti before queuing work (map_tio_request) */
824 tio->ti = ti;
825 queue_kthread_work(&md->kworker, &tio->work);
826 BUG_ON(!irqs_disabled());
827 }
828}
829
830/*
831 * Fully initialize a .request_fn request-based queue.
832 */
833int dm_old_init_request_queue(struct mapped_device *md)
834{
835 /* Fully initialize the queue */
836 if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
837 return -EINVAL;
838
839 /* disable dm_old_request_fn's merge heuristic by default */
840 md->seq_rq_merge_deadline_usecs = 0;
841
842 dm_init_normal_md_queue(md);
843 blk_queue_softirq_done(md->queue, dm_softirq_done);
844 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
845
846 /* Initialize the request-based DM worker thread */
847 init_kthread_worker(&md->kworker);
848 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
849 "kdmwork-%s", dm_device_name(md));
7193a9de
MS
850 if (IS_ERR(md->kworker_task))
851 return PTR_ERR(md->kworker_task);
4cc96131
MS
852
853 elv_register_queue(md->queue);
854
855 return 0;
856}
857
858static int dm_mq_init_request(void *data, struct request *rq,
859 unsigned int hctx_idx, unsigned int request_idx,
860 unsigned int numa_node)
861{
862 struct mapped_device *md = data;
863 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
864
865 /*
866 * Must initialize md member of tio, otherwise it won't
867 * be available in dm_mq_queue_rq.
868 */
869 tio->md = md;
870
871 if (md->init_tio_pdu) {
872 /* target-specific per-io data is immediately after the tio */
873 tio->info.ptr = tio + 1;
874 }
875
876 return 0;
877}
878
879static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
880 const struct blk_mq_queue_data *bd)
881{
882 struct request *rq = bd->rq;
883 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
884 struct mapped_device *md = tio->md;
885 struct dm_target *ti = md->immutable_target;
886
887 if (unlikely(!ti)) {
888 int srcu_idx;
889 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
890
891 ti = dm_table_find_target(map, 0);
892 dm_put_live_table(md, srcu_idx);
893 }
894
7d9595d8
MS
895 /*
896 * On suspend dm_stop_queue() handles stopping the blk-mq
897 * request_queue BUT: even though the hw_queues are marked
898 * BLK_MQ_S_STOPPED at that point there is still a race that
899 * is allowing block/blk-mq.c to call ->queue_rq against a
900 * hctx that it really shouldn't. The following check guards
901 * against this rarity (albeit _not_ race-free).
902 */
903 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
904 return BLK_MQ_RQ_QUEUE_BUSY;
905
4cc96131
MS
906 if (ti->type->busy && ti->type->busy(ti))
907 return BLK_MQ_RQ_QUEUE_BUSY;
908
909 dm_start_request(md, rq);
910
911 /* Init tio using md established in .init_request */
912 init_tio(tio, rq, md);
913
914 /*
915 * Establish tio->ti before calling map_request().
916 */
917 tio->ti = ti;
918
919 /* Direct call is fine since .queue_rq allows allocations */
fbc39b4c 920 if (map_request(tio) == DM_MAPIO_REQUEUE) {
4cc96131
MS
921 /* Undo dm_start_request() before requeuing */
922 rq_end_stats(md, rq);
923 rq_completed(md, rq_data_dir(rq), false);
924 return BLK_MQ_RQ_QUEUE_BUSY;
925 }
926
927 return BLK_MQ_RQ_QUEUE_OK;
928}
929
930static struct blk_mq_ops dm_mq_ops = {
931 .queue_rq = dm_mq_queue_rq,
932 .map_queue = blk_mq_map_queue,
933 .complete = dm_softirq_done,
934 .init_request = dm_mq_init_request,
935};
936
e83068a5 937int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
4cc96131
MS
938{
939 struct request_queue *q;
e83068a5 940 struct dm_target *immutable_tgt;
4cc96131
MS
941 int err;
942
e83068a5 943 if (!dm_table_all_blk_mq_devices(t)) {
4cc96131
MS
944 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
945 return -EINVAL;
946 }
947
948 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
949 if (!md->tag_set)
950 return -ENOMEM;
951
952 md->tag_set->ops = &dm_mq_ops;
953 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
954 md->tag_set->numa_node = md->numa_node_id;
955 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
956 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
957 md->tag_set->driver_data = md;
958
959 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
e83068a5 960 immutable_tgt = dm_table_get_immutable_target(t);
4cc96131
MS
961 if (immutable_tgt && immutable_tgt->per_io_data_size) {
962 /* any target-specific per-io data is immediately after the tio */
963 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
964 md->init_tio_pdu = true;
965 }
966
967 err = blk_mq_alloc_tag_set(md->tag_set);
968 if (err)
969 goto out_kfree_tag_set;
970
971 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
972 if (IS_ERR(q)) {
973 err = PTR_ERR(q);
974 goto out_tag_set;
975 }
976 dm_init_md_queue(md);
977
978 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
979 blk_mq_register_disk(md->disk);
980
981 return 0;
982
983out_tag_set:
984 blk_mq_free_tag_set(md->tag_set);
985out_kfree_tag_set:
986 kfree(md->tag_set);
987
988 return err;
989}
990
991void dm_mq_cleanup_mapped_device(struct mapped_device *md)
992{
993 if (md->tag_set) {
994 blk_mq_free_tag_set(md->tag_set);
995 kfree(md->tag_set);
996 }
997}
998
999module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
1000MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
1001
1002module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
1003MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
1004
1005module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
1006MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
1007
1008module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
1009MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");