]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/dm-rq.c
dm rq: make dm-sq requeuing behavior consistent with dm-mq behavior
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-rq.c
1 /*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-core.h"
8 #include "dm-rq.h"
9
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
12
13 #define DM_MSG_PREFIX "core-rq"
14
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
19
20 /*
21 * Request-based DM's mempools' reserved IOs set by the user.
22 */
23 #define RESERVED_REQUEST_BASED_IOS 256
24 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25
26 static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
27
28 bool dm_use_blk_mq_default(void)
29 {
30 return use_blk_mq;
31 }
32
33 bool dm_use_blk_mq(struct mapped_device *md)
34 {
35 return md->use_blk_mq;
36 }
37 EXPORT_SYMBOL_GPL(dm_use_blk_mq);
38
39 unsigned dm_get_reserved_rq_based_ios(void)
40 {
41 return __dm_get_module_param(&reserved_rq_based_ios,
42 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
43 }
44 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
45
46 static unsigned dm_get_blk_mq_nr_hw_queues(void)
47 {
48 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
49 }
50
51 static unsigned dm_get_blk_mq_queue_depth(void)
52 {
53 return __dm_get_module_param(&dm_mq_queue_depth,
54 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
55 }
56
57 int dm_request_based(struct mapped_device *md)
58 {
59 return blk_queue_stackable(md->queue);
60 }
61
62 static void dm_old_start_queue(struct request_queue *q)
63 {
64 unsigned long flags;
65
66 spin_lock_irqsave(q->queue_lock, flags);
67 if (blk_queue_stopped(q))
68 blk_start_queue(q);
69 spin_unlock_irqrestore(q->queue_lock, flags);
70 }
71
72 static void dm_mq_start_queue(struct request_queue *q)
73 {
74 blk_mq_unquiesce_queue(q);
75 blk_mq_kick_requeue_list(q);
76 }
77
78 void dm_start_queue(struct request_queue *q)
79 {
80 if (!q->mq_ops)
81 dm_old_start_queue(q);
82 else
83 dm_mq_start_queue(q);
84 }
85
86 static void dm_old_stop_queue(struct request_queue *q)
87 {
88 unsigned long flags;
89
90 spin_lock_irqsave(q->queue_lock, flags);
91 if (!blk_queue_stopped(q))
92 blk_stop_queue(q);
93 spin_unlock_irqrestore(q->queue_lock, flags);
94 }
95
96 static void dm_mq_stop_queue(struct request_queue *q)
97 {
98 if (blk_mq_queue_stopped(q))
99 return;
100
101 blk_mq_quiesce_queue(q);
102 }
103
104 void dm_stop_queue(struct request_queue *q)
105 {
106 if (!q->mq_ops)
107 dm_old_stop_queue(q);
108 else
109 dm_mq_stop_queue(q);
110 }
111
112 /*
113 * Partial completion handling for request-based dm
114 */
115 static void end_clone_bio(struct bio *clone)
116 {
117 struct dm_rq_clone_bio_info *info =
118 container_of(clone, struct dm_rq_clone_bio_info, clone);
119 struct dm_rq_target_io *tio = info->tio;
120 struct bio *bio = info->orig;
121 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
122 blk_status_t error = clone->bi_status;
123
124 bio_put(clone);
125
126 if (tio->error)
127 /*
128 * An error has already been detected on the request.
129 * Once error occurred, just let clone->end_io() handle
130 * the remainder.
131 */
132 return;
133 else if (error) {
134 /*
135 * Don't notice the error to the upper layer yet.
136 * The error handling decision is made by the target driver,
137 * when the request is completed.
138 */
139 tio->error = error;
140 return;
141 }
142
143 /*
144 * I/O for the bio successfully completed.
145 * Notice the data completion to the upper layer.
146 */
147
148 /*
149 * bios are processed from the head of the list.
150 * So the completing bio should always be rq->bio.
151 * If it's not, something wrong is happening.
152 */
153 if (tio->orig->bio != bio)
154 DMERR("bio completion is going in the middle of the request");
155
156 /*
157 * Update the original request.
158 * Do not use blk_end_request() here, because it may complete
159 * the original request before the clone, and break the ordering.
160 */
161 blk_update_request(tio->orig, BLK_STS_OK, nr_bytes);
162 }
163
164 static struct dm_rq_target_io *tio_from_request(struct request *rq)
165 {
166 return blk_mq_rq_to_pdu(rq);
167 }
168
169 static void rq_end_stats(struct mapped_device *md, struct request *orig)
170 {
171 if (unlikely(dm_stats_used(&md->stats))) {
172 struct dm_rq_target_io *tio = tio_from_request(orig);
173 tio->duration_jiffies = jiffies - tio->duration_jiffies;
174 dm_stats_account_io(&md->stats, rq_data_dir(orig),
175 blk_rq_pos(orig), tio->n_sectors, true,
176 tio->duration_jiffies, &tio->stats_aux);
177 }
178 }
179
180 /*
181 * Don't touch any member of the md after calling this function because
182 * the md may be freed in dm_put() at the end of this function.
183 * Or do dm_get() before calling this function and dm_put() later.
184 */
185 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
186 {
187 struct request_queue *q = md->queue;
188 unsigned long flags;
189
190 atomic_dec(&md->pending[rw]);
191
192 /* nudge anyone waiting on suspend queue */
193 if (!md_in_flight(md))
194 wake_up(&md->wait);
195
196 /*
197 * Run this off this callpath, as drivers could invoke end_io while
198 * inside their request_fn (and holding the queue lock). Calling
199 * back into ->request_fn() could deadlock attempting to grab the
200 * queue lock again.
201 */
202 if (!q->mq_ops && run_queue) {
203 spin_lock_irqsave(q->queue_lock, flags);
204 blk_run_queue_async(q);
205 spin_unlock_irqrestore(q->queue_lock, flags);
206 }
207
208 /*
209 * dm_put() must be at the end of this function. See the comment above
210 */
211 dm_put(md);
212 }
213
214 /*
215 * Complete the clone and the original request.
216 * Must be called without clone's queue lock held,
217 * see end_clone_request() for more details.
218 */
219 static void dm_end_request(struct request *clone, blk_status_t error)
220 {
221 int rw = rq_data_dir(clone);
222 struct dm_rq_target_io *tio = clone->end_io_data;
223 struct mapped_device *md = tio->md;
224 struct request *rq = tio->orig;
225
226 blk_rq_unprep_clone(clone);
227 tio->ti->type->release_clone_rq(clone);
228
229 rq_end_stats(md, rq);
230 if (!rq->q->mq_ops)
231 blk_end_request_all(rq, error);
232 else
233 blk_mq_end_request(rq, error);
234 rq_completed(md, rw, true);
235 }
236
237 /*
238 * Requeue the original request of a clone.
239 */
240 static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms)
241 {
242 struct request_queue *q = rq->q;
243 unsigned long flags;
244
245 spin_lock_irqsave(q->queue_lock, flags);
246 blk_requeue_request(q, rq);
247 blk_delay_queue(q, delay_ms);
248 spin_unlock_irqrestore(q->queue_lock, flags);
249 }
250
251 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
252 {
253 blk_mq_delay_kick_requeue_list(q, msecs);
254 }
255
256 void dm_mq_kick_requeue_list(struct mapped_device *md)
257 {
258 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
259 }
260 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
261
262 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
263 {
264 blk_mq_requeue_request(rq, false);
265 __dm_mq_kick_requeue_list(rq->q, msecs);
266 }
267
268 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
269 {
270 struct mapped_device *md = tio->md;
271 struct request *rq = tio->orig;
272 int rw = rq_data_dir(rq);
273 unsigned long delay_ms = delay_requeue ? 100 : 0;
274
275 rq_end_stats(md, rq);
276 if (tio->clone) {
277 blk_rq_unprep_clone(tio->clone);
278 tio->ti->type->release_clone_rq(tio->clone);
279 }
280
281 if (!rq->q->mq_ops)
282 dm_old_requeue_request(rq, delay_ms);
283 else
284 dm_mq_delay_requeue_request(rq, delay_ms);
285
286 rq_completed(md, rw, false);
287 }
288
289 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
290 {
291 int r = DM_ENDIO_DONE;
292 struct dm_rq_target_io *tio = clone->end_io_data;
293 dm_request_endio_fn rq_end_io = NULL;
294
295 if (tio->ti) {
296 rq_end_io = tio->ti->type->rq_end_io;
297
298 if (mapped && rq_end_io)
299 r = rq_end_io(tio->ti, clone, error, &tio->info);
300 }
301
302 if (unlikely(error == BLK_STS_TARGET)) {
303 if (req_op(clone) == REQ_OP_WRITE_SAME &&
304 !clone->q->limits.max_write_same_sectors)
305 disable_write_same(tio->md);
306 if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
307 !clone->q->limits.max_write_zeroes_sectors)
308 disable_write_zeroes(tio->md);
309 }
310
311 switch (r) {
312 case DM_ENDIO_DONE:
313 /* The target wants to complete the I/O */
314 dm_end_request(clone, error);
315 break;
316 case DM_ENDIO_INCOMPLETE:
317 /* The target will handle the I/O */
318 return;
319 case DM_ENDIO_REQUEUE:
320 /* The target wants to requeue the I/O */
321 dm_requeue_original_request(tio, false);
322 break;
323 default:
324 DMWARN("unimplemented target endio return value: %d", r);
325 BUG();
326 }
327 }
328
329 /*
330 * Request completion handler for request-based dm
331 */
332 static void dm_softirq_done(struct request *rq)
333 {
334 bool mapped = true;
335 struct dm_rq_target_io *tio = tio_from_request(rq);
336 struct request *clone = tio->clone;
337 int rw;
338
339 if (!clone) {
340 struct mapped_device *md = tio->md;
341
342 rq_end_stats(md, rq);
343 rw = rq_data_dir(rq);
344 if (!rq->q->mq_ops)
345 blk_end_request_all(rq, tio->error);
346 else
347 blk_mq_end_request(rq, tio->error);
348 rq_completed(md, rw, false);
349 return;
350 }
351
352 if (rq->rq_flags & RQF_FAILED)
353 mapped = false;
354
355 dm_done(clone, tio->error, mapped);
356 }
357
358 /*
359 * Complete the clone and the original request with the error status
360 * through softirq context.
361 */
362 static void dm_complete_request(struct request *rq, blk_status_t error)
363 {
364 struct dm_rq_target_io *tio = tio_from_request(rq);
365
366 tio->error = error;
367 if (!rq->q->mq_ops)
368 blk_complete_request(rq);
369 else
370 blk_mq_complete_request(rq);
371 }
372
373 /*
374 * Complete the not-mapped clone and the original request with the error status
375 * through softirq context.
376 * Target's rq_end_io() function isn't called.
377 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
378 */
379 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
380 {
381 rq->rq_flags |= RQF_FAILED;
382 dm_complete_request(rq, error);
383 }
384
385 /*
386 * Called with the clone's queue lock held (in the case of .request_fn)
387 */
388 static void end_clone_request(struct request *clone, blk_status_t error)
389 {
390 struct dm_rq_target_io *tio = clone->end_io_data;
391
392 /*
393 * Actual request completion is done in a softirq context which doesn't
394 * hold the clone's queue lock. Otherwise, deadlock could occur because:
395 * - another request may be submitted by the upper level driver
396 * of the stacking during the completion
397 * - the submission which requires queue lock may be done
398 * against this clone's queue
399 */
400 dm_complete_request(tio->orig, error);
401 }
402
403 static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
404 {
405 blk_status_t r;
406
407 if (blk_queue_io_stat(clone->q))
408 clone->rq_flags |= RQF_IO_STAT;
409
410 clone->start_time = jiffies;
411 r = blk_insert_cloned_request(clone->q, clone);
412 if (r)
413 /* must complete clone in terms of original request */
414 dm_complete_request(rq, r);
415 }
416
417 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
418 void *data)
419 {
420 struct dm_rq_target_io *tio = data;
421 struct dm_rq_clone_bio_info *info =
422 container_of(bio, struct dm_rq_clone_bio_info, clone);
423
424 info->orig = bio_orig;
425 info->tio = tio;
426 bio->bi_end_io = end_clone_bio;
427
428 return 0;
429 }
430
431 static int setup_clone(struct request *clone, struct request *rq,
432 struct dm_rq_target_io *tio, gfp_t gfp_mask)
433 {
434 int r;
435
436 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
437 dm_rq_bio_constructor, tio);
438 if (r)
439 return r;
440
441 clone->end_io = end_clone_request;
442 clone->end_io_data = tio;
443
444 tio->clone = clone;
445
446 return 0;
447 }
448
449 static void map_tio_request(struct kthread_work *work);
450
451 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
452 struct mapped_device *md)
453 {
454 tio->md = md;
455 tio->ti = NULL;
456 tio->clone = NULL;
457 tio->orig = rq;
458 tio->error = 0;
459 /*
460 * Avoid initializing info for blk-mq; it passes
461 * target-specific data through info.ptr
462 * (see: dm_mq_init_request)
463 */
464 if (!md->init_tio_pdu)
465 memset(&tio->info, 0, sizeof(tio->info));
466 if (md->kworker_task)
467 kthread_init_work(&tio->work, map_tio_request);
468 }
469
470 /*
471 * Returns:
472 * DM_MAPIO_* : the request has been processed as indicated
473 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
474 * < 0 : the request was completed due to failure
475 */
476 static int map_request(struct dm_rq_target_io *tio)
477 {
478 int r;
479 struct dm_target *ti = tio->ti;
480 struct mapped_device *md = tio->md;
481 struct request *rq = tio->orig;
482 struct request *clone = NULL;
483
484 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
485 switch (r) {
486 case DM_MAPIO_SUBMITTED:
487 /* The target has taken the I/O to submit by itself later */
488 break;
489 case DM_MAPIO_REMAPPED:
490 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
491 /* -ENOMEM */
492 ti->type->release_clone_rq(clone);
493 return DM_MAPIO_REQUEUE;
494 }
495
496 /* The target has remapped the I/O so dispatch it */
497 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
498 blk_rq_pos(rq));
499 dm_dispatch_clone_request(clone, rq);
500 break;
501 case DM_MAPIO_REQUEUE:
502 /* The target wants to requeue the I/O */
503 break;
504 case DM_MAPIO_DELAY_REQUEUE:
505 /* The target wants to requeue the I/O after a delay */
506 dm_requeue_original_request(tio, true);
507 break;
508 case DM_MAPIO_KILL:
509 /* The target wants to complete the I/O */
510 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
511 break;
512 default:
513 DMWARN("unimplemented target map return value: %d", r);
514 BUG();
515 }
516
517 return r;
518 }
519
520 static void dm_start_request(struct mapped_device *md, struct request *orig)
521 {
522 if (!orig->q->mq_ops)
523 blk_start_request(orig);
524 else
525 blk_mq_start_request(orig);
526 atomic_inc(&md->pending[rq_data_dir(orig)]);
527
528 if (md->seq_rq_merge_deadline_usecs) {
529 md->last_rq_pos = rq_end_sector(orig);
530 md->last_rq_rw = rq_data_dir(orig);
531 md->last_rq_start_time = ktime_get();
532 }
533
534 if (unlikely(dm_stats_used(&md->stats))) {
535 struct dm_rq_target_io *tio = tio_from_request(orig);
536 tio->duration_jiffies = jiffies;
537 tio->n_sectors = blk_rq_sectors(orig);
538 dm_stats_account_io(&md->stats, rq_data_dir(orig),
539 blk_rq_pos(orig), tio->n_sectors, false, 0,
540 &tio->stats_aux);
541 }
542
543 /*
544 * Hold the md reference here for the in-flight I/O.
545 * We can't rely on the reference count by device opener,
546 * because the device may be closed during the request completion
547 * when all bios are completed.
548 * See the comment in rq_completed() too.
549 */
550 dm_get(md);
551 }
552
553 static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
554 {
555 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
556
557 /*
558 * Must initialize md member of tio, otherwise it won't
559 * be available in dm_mq_queue_rq.
560 */
561 tio->md = md;
562
563 if (md->init_tio_pdu) {
564 /* target-specific per-io data is immediately after the tio */
565 tio->info.ptr = tio + 1;
566 }
567
568 return 0;
569 }
570
571 static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
572 {
573 return __dm_rq_init_rq(q->rq_alloc_data, rq);
574 }
575
576 static void map_tio_request(struct kthread_work *work)
577 {
578 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
579
580 if (map_request(tio) == DM_MAPIO_REQUEUE)
581 dm_requeue_original_request(tio, false);
582 }
583
584 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
585 {
586 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
587 }
588
589 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
590
591 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
592 const char *buf, size_t count)
593 {
594 unsigned deadline;
595
596 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
597 return count;
598
599 if (kstrtouint(buf, 10, &deadline))
600 return -EINVAL;
601
602 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
603 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
604
605 md->seq_rq_merge_deadline_usecs = deadline;
606
607 return count;
608 }
609
610 static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
611 {
612 ktime_t kt_deadline;
613
614 if (!md->seq_rq_merge_deadline_usecs)
615 return false;
616
617 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
618 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
619
620 return !ktime_after(ktime_get(), kt_deadline);
621 }
622
623 /*
624 * q->request_fn for old request-based dm.
625 * Called with the queue lock held.
626 */
627 static void dm_old_request_fn(struct request_queue *q)
628 {
629 struct mapped_device *md = q->queuedata;
630 struct dm_target *ti = md->immutable_target;
631 struct request *rq;
632 struct dm_rq_target_io *tio;
633 sector_t pos = 0;
634
635 if (unlikely(!ti)) {
636 int srcu_idx;
637 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
638
639 if (unlikely(!map)) {
640 dm_put_live_table(md, srcu_idx);
641 return;
642 }
643 ti = dm_table_find_target(map, pos);
644 dm_put_live_table(md, srcu_idx);
645 }
646
647 /*
648 * For suspend, check blk_queue_stopped() and increment
649 * ->pending within a single queue_lock not to increment the
650 * number of in-flight I/Os after the queue is stopped in
651 * dm_suspend().
652 */
653 while (!blk_queue_stopped(q)) {
654 rq = blk_peek_request(q);
655 if (!rq)
656 return;
657
658 /* always use block 0 to find the target for flushes for now */
659 pos = 0;
660 if (req_op(rq) != REQ_OP_FLUSH)
661 pos = blk_rq_pos(rq);
662
663 if ((dm_old_request_peeked_before_merge_deadline(md) &&
664 md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
665 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
666 (ti->type->busy && ti->type->busy(ti))) {
667 blk_delay_queue(q, 10);
668 return;
669 }
670
671 dm_start_request(md, rq);
672
673 tio = tio_from_request(rq);
674 init_tio(tio, rq, md);
675 /* Establish tio->ti before queuing work (map_tio_request) */
676 tio->ti = ti;
677 kthread_queue_work(&md->kworker, &tio->work);
678 BUG_ON(!irqs_disabled());
679 }
680 }
681
682 /*
683 * Fully initialize a .request_fn request-based queue.
684 */
685 int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
686 {
687 struct dm_target *immutable_tgt;
688
689 /* Fully initialize the queue */
690 md->queue->cmd_size = sizeof(struct dm_rq_target_io);
691 md->queue->rq_alloc_data = md;
692 md->queue->request_fn = dm_old_request_fn;
693 md->queue->init_rq_fn = dm_rq_init_rq;
694
695 immutable_tgt = dm_table_get_immutable_target(t);
696 if (immutable_tgt && immutable_tgt->per_io_data_size) {
697 /* any target-specific per-io data is immediately after the tio */
698 md->queue->cmd_size += immutable_tgt->per_io_data_size;
699 md->init_tio_pdu = true;
700 }
701 if (blk_init_allocated_queue(md->queue) < 0)
702 return -EINVAL;
703
704 /* disable dm_old_request_fn's merge heuristic by default */
705 md->seq_rq_merge_deadline_usecs = 0;
706
707 dm_init_normal_md_queue(md);
708 blk_queue_softirq_done(md->queue, dm_softirq_done);
709
710 /* Initialize the request-based DM worker thread */
711 kthread_init_worker(&md->kworker);
712 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
713 "kdmwork-%s", dm_device_name(md));
714 if (IS_ERR(md->kworker_task)) {
715 int error = PTR_ERR(md->kworker_task);
716 md->kworker_task = NULL;
717 return error;
718 }
719
720 elv_register_queue(md->queue);
721
722 return 0;
723 }
724
725 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
726 unsigned int hctx_idx, unsigned int numa_node)
727 {
728 return __dm_rq_init_rq(set->driver_data, rq);
729 }
730
731 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
732 const struct blk_mq_queue_data *bd)
733 {
734 struct request *rq = bd->rq;
735 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
736 struct mapped_device *md = tio->md;
737 struct dm_target *ti = md->immutable_target;
738
739 if (unlikely(!ti)) {
740 int srcu_idx;
741 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
742
743 ti = dm_table_find_target(map, 0);
744 dm_put_live_table(md, srcu_idx);
745 }
746
747 if (ti->type->busy && ti->type->busy(ti))
748 return BLK_STS_RESOURCE;
749
750 dm_start_request(md, rq);
751
752 /* Init tio using md established in .init_request */
753 init_tio(tio, rq, md);
754
755 /*
756 * Establish tio->ti before calling map_request().
757 */
758 tio->ti = ti;
759
760 /* Direct call is fine since .queue_rq allows allocations */
761 if (map_request(tio) == DM_MAPIO_REQUEUE) {
762 /* Undo dm_start_request() before requeuing */
763 rq_end_stats(md, rq);
764 rq_completed(md, rq_data_dir(rq), false);
765 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
766 return BLK_STS_RESOURCE;
767 }
768
769 return BLK_STS_OK;
770 }
771
772 static const struct blk_mq_ops dm_mq_ops = {
773 .queue_rq = dm_mq_queue_rq,
774 .complete = dm_softirq_done,
775 .init_request = dm_mq_init_request,
776 };
777
778 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
779 {
780 struct request_queue *q;
781 struct dm_target *immutable_tgt;
782 int err;
783
784 if (!dm_table_all_blk_mq_devices(t)) {
785 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
786 return -EINVAL;
787 }
788
789 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
790 if (!md->tag_set)
791 return -ENOMEM;
792
793 md->tag_set->ops = &dm_mq_ops;
794 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
795 md->tag_set->numa_node = md->numa_node_id;
796 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
797 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
798 md->tag_set->driver_data = md;
799
800 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
801 immutable_tgt = dm_table_get_immutable_target(t);
802 if (immutable_tgt && immutable_tgt->per_io_data_size) {
803 /* any target-specific per-io data is immediately after the tio */
804 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
805 md->init_tio_pdu = true;
806 }
807
808 err = blk_mq_alloc_tag_set(md->tag_set);
809 if (err)
810 goto out_kfree_tag_set;
811
812 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
813 if (IS_ERR(q)) {
814 err = PTR_ERR(q);
815 goto out_tag_set;
816 }
817 dm_init_md_queue(md);
818
819 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
820 err = blk_mq_register_dev(disk_to_dev(md->disk), q);
821 if (err)
822 goto out_cleanup_queue;
823
824 return 0;
825
826 out_cleanup_queue:
827 blk_cleanup_queue(q);
828 out_tag_set:
829 blk_mq_free_tag_set(md->tag_set);
830 out_kfree_tag_set:
831 kfree(md->tag_set);
832
833 return err;
834 }
835
836 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
837 {
838 if (md->tag_set) {
839 blk_mq_free_tag_set(md->tag_set);
840 kfree(md->tag_set);
841 }
842 }
843
844 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
845 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
846
847 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
848 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
849
850 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
851 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
852
853 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
854 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");