2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
22 #include <linux/wait.h>
23 #include <linux/kthread.h>
24 #include <linux/ktime.h>
25 #include <linux/elevator.h> /* for rq_end_sector() */
26 #include <linux/blk-mq.h>
28 #include <trace/events/block.h>
30 #define DM_MSG_PREFIX "core"
34 * ratelimit state to be used in DMXXX_LIMIT().
36 DEFINE_RATELIMIT_STATE(dm_ratelimit_state
,
37 DEFAULT_RATELIMIT_INTERVAL
,
38 DEFAULT_RATELIMIT_BURST
);
39 EXPORT_SYMBOL(dm_ratelimit_state
);
43 * Cookies are numeric values sent with CHANGE and REMOVE
44 * uevents while resuming, removing or renaming the device.
46 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
47 #define DM_COOKIE_LENGTH 24
49 static const char *_name
= DM_NAME
;
51 static unsigned int major
= 0;
52 static unsigned int _major
= 0;
54 static DEFINE_IDR(_minor_idr
);
56 static DEFINE_SPINLOCK(_minor_lock
);
58 static void do_deferred_remove(struct work_struct
*w
);
60 static DECLARE_WORK(deferred_remove_work
, do_deferred_remove
);
62 static struct workqueue_struct
*deferred_remove_workqueue
;
66 * One of these is allocated per bio.
69 struct mapped_device
*md
;
73 unsigned long start_time
;
74 spinlock_t endio_lock
;
75 struct dm_stats_aux stats_aux
;
79 * For request-based dm.
80 * One of these is allocated per request.
82 struct dm_rq_target_io
{
83 struct mapped_device
*md
;
85 struct request
*orig
, *clone
;
86 struct kthread_work work
;
89 struct dm_stats_aux stats_aux
;
90 unsigned long duration_jiffies
;
95 * For request-based dm - the bio clones we allocate are embedded in these
98 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
99 * the bioset is created - this means the bio has to come at the end of the
102 struct dm_rq_clone_bio_info
{
104 struct dm_rq_target_io
*tio
;
108 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
110 if (rq
&& rq
->end_io_data
)
111 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
114 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
116 #define MINOR_ALLOCED ((void *)-1)
119 * Bits for the md->flags field.
121 #define DMF_BLOCK_IO_FOR_SUSPEND 0
122 #define DMF_SUSPENDED 1
124 #define DMF_FREEING 3
125 #define DMF_DELETING 4
126 #define DMF_NOFLUSH_SUSPENDING 5
127 #define DMF_MERGE_IS_OPTIONAL 6
128 #define DMF_DEFERRED_REMOVE 7
129 #define DMF_SUSPENDED_INTERNALLY 8
132 * A dummy definition to make RCU happy.
133 * struct dm_table should never be dereferenced in this file.
140 * Work processed by per-device workqueue.
142 struct mapped_device
{
143 struct srcu_struct io_barrier
;
144 struct mutex suspend_lock
;
149 * The current mapping.
150 * Use dm_get_live_table{_fast} or take suspend_lock for
153 struct dm_table __rcu
*map
;
155 struct list_head table_devices
;
156 struct mutex table_devices_lock
;
160 struct request_queue
*queue
;
162 /* Protect queue and type against concurrent access. */
163 struct mutex type_lock
;
165 struct target_type
*immutable_target_type
;
167 struct gendisk
*disk
;
173 * A list of ios that arrived while we were suspended.
176 wait_queue_head_t wait
;
177 struct work_struct work
;
178 struct bio_list deferred
;
179 spinlock_t deferred_lock
;
182 * Processing queue (flush)
184 struct workqueue_struct
*wq
;
187 * io objects are allocated from here.
198 wait_queue_head_t eventq
;
200 struct list_head uevent_list
;
201 spinlock_t uevent_lock
; /* Protect access to uevent_list */
204 * freeze/thaw support require holding onto a super block
206 struct super_block
*frozen_sb
;
207 struct block_device
*bdev
;
209 /* forced geometry settings */
210 struct hd_geometry geometry
;
212 /* kobject and completion */
213 struct dm_kobject_holder kobj_holder
;
215 /* zero-length flush that will be cloned and submitted to targets */
216 struct bio flush_bio
;
218 /* the number of internal suspends */
219 unsigned internal_suspend_count
;
221 struct dm_stats stats
;
223 struct kthread_worker kworker
;
224 struct task_struct
*kworker_task
;
226 /* for request-based merge heuristic in dm_request_fn() */
227 unsigned seq_rq_merge_deadline_usecs
;
229 sector_t last_rq_pos
;
230 ktime_t last_rq_start_time
;
232 /* for blk-mq request-based DM support */
233 struct blk_mq_tag_set tag_set
;
237 #ifdef CONFIG_DM_MQ_DEFAULT
238 static bool use_blk_mq
= true;
240 static bool use_blk_mq
= false;
243 bool dm_use_blk_mq(struct mapped_device
*md
)
245 return md
->use_blk_mq
;
249 * For mempools pre-allocation at the table loading time.
251 struct dm_md_mempools
{
257 struct table_device
{
258 struct list_head list
;
260 struct dm_dev dm_dev
;
263 #define RESERVED_BIO_BASED_IOS 16
264 #define RESERVED_REQUEST_BASED_IOS 256
265 #define RESERVED_MAX_IOS 1024
266 static struct kmem_cache
*_io_cache
;
267 static struct kmem_cache
*_rq_tio_cache
;
268 static struct kmem_cache
*_rq_cache
;
271 * Bio-based DM's mempools' reserved IOs set by the user.
273 static unsigned reserved_bio_based_ios
= RESERVED_BIO_BASED_IOS
;
276 * Request-based DM's mempools' reserved IOs set by the user.
278 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
280 static unsigned __dm_get_module_param(unsigned *module_param
,
281 unsigned def
, unsigned max
)
283 unsigned param
= ACCESS_ONCE(*module_param
);
284 unsigned modified_param
= 0;
287 modified_param
= def
;
288 else if (param
> max
)
289 modified_param
= max
;
291 if (modified_param
) {
292 (void)cmpxchg(module_param
, param
, modified_param
);
293 param
= modified_param
;
299 unsigned dm_get_reserved_bio_based_ios(void)
301 return __dm_get_module_param(&reserved_bio_based_ios
,
302 RESERVED_BIO_BASED_IOS
, RESERVED_MAX_IOS
);
304 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios
);
306 unsigned dm_get_reserved_rq_based_ios(void)
308 return __dm_get_module_param(&reserved_rq_based_ios
,
309 RESERVED_REQUEST_BASED_IOS
, RESERVED_MAX_IOS
);
311 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
313 static int __init
local_init(void)
317 /* allocate a slab for the dm_ios */
318 _io_cache
= KMEM_CACHE(dm_io
, 0);
322 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
324 goto out_free_io_cache
;
326 _rq_cache
= kmem_cache_create("dm_clone_request", sizeof(struct request
),
327 __alignof__(struct request
), 0, NULL
);
329 goto out_free_rq_tio_cache
;
331 r
= dm_uevent_init();
333 goto out_free_rq_cache
;
335 deferred_remove_workqueue
= alloc_workqueue("kdmremove", WQ_UNBOUND
, 1);
336 if (!deferred_remove_workqueue
) {
338 goto out_uevent_exit
;
342 r
= register_blkdev(_major
, _name
);
344 goto out_free_workqueue
;
352 destroy_workqueue(deferred_remove_workqueue
);
356 kmem_cache_destroy(_rq_cache
);
357 out_free_rq_tio_cache
:
358 kmem_cache_destroy(_rq_tio_cache
);
360 kmem_cache_destroy(_io_cache
);
365 static void local_exit(void)
367 flush_scheduled_work();
368 destroy_workqueue(deferred_remove_workqueue
);
370 kmem_cache_destroy(_rq_cache
);
371 kmem_cache_destroy(_rq_tio_cache
);
372 kmem_cache_destroy(_io_cache
);
373 unregister_blkdev(_major
, _name
);
378 DMINFO("cleaned up");
381 static int (*_inits
[])(void) __initdata
= {
392 static void (*_exits
[])(void) = {
403 static int __init
dm_init(void)
405 const int count
= ARRAY_SIZE(_inits
);
409 for (i
= 0; i
< count
; i
++) {
424 static void __exit
dm_exit(void)
426 int i
= ARRAY_SIZE(_exits
);
432 * Should be empty by this point.
434 idr_destroy(&_minor_idr
);
438 * Block device functions
440 int dm_deleting_md(struct mapped_device
*md
)
442 return test_bit(DMF_DELETING
, &md
->flags
);
445 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
447 struct mapped_device
*md
;
449 spin_lock(&_minor_lock
);
451 md
= bdev
->bd_disk
->private_data
;
455 if (test_bit(DMF_FREEING
, &md
->flags
) ||
456 dm_deleting_md(md
)) {
462 atomic_inc(&md
->open_count
);
464 spin_unlock(&_minor_lock
);
466 return md
? 0 : -ENXIO
;
469 static void dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
471 struct mapped_device
*md
;
473 spin_lock(&_minor_lock
);
475 md
= disk
->private_data
;
479 if (atomic_dec_and_test(&md
->open_count
) &&
480 (test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
)))
481 queue_work(deferred_remove_workqueue
, &deferred_remove_work
);
485 spin_unlock(&_minor_lock
);
488 int dm_open_count(struct mapped_device
*md
)
490 return atomic_read(&md
->open_count
);
494 * Guarantees nothing is using the device before it's deleted.
496 int dm_lock_for_deletion(struct mapped_device
*md
, bool mark_deferred
, bool only_deferred
)
500 spin_lock(&_minor_lock
);
502 if (dm_open_count(md
)) {
505 set_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
506 } else if (only_deferred
&& !test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
))
509 set_bit(DMF_DELETING
, &md
->flags
);
511 spin_unlock(&_minor_lock
);
516 int dm_cancel_deferred_remove(struct mapped_device
*md
)
520 spin_lock(&_minor_lock
);
522 if (test_bit(DMF_DELETING
, &md
->flags
))
525 clear_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
527 spin_unlock(&_minor_lock
);
532 static void do_deferred_remove(struct work_struct
*w
)
534 dm_deferred_remove();
537 sector_t
dm_get_size(struct mapped_device
*md
)
539 return get_capacity(md
->disk
);
542 struct request_queue
*dm_get_md_queue(struct mapped_device
*md
)
547 struct dm_stats
*dm_get_stats(struct mapped_device
*md
)
552 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
554 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
556 return dm_get_geometry(md
, geo
);
559 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
560 unsigned int cmd
, unsigned long arg
)
562 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
564 struct dm_table
*map
;
565 struct dm_target
*tgt
;
569 map
= dm_get_live_table(md
, &srcu_idx
);
571 if (!map
|| !dm_table_get_size(map
))
574 /* We only support devices that have a single target */
575 if (dm_table_get_num_targets(map
) != 1)
578 tgt
= dm_table_get_target(map
, 0);
579 if (!tgt
->type
->ioctl
)
582 if (dm_suspended_md(md
)) {
587 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
590 dm_put_live_table(md
, srcu_idx
);
592 if (r
== -ENOTCONN
) {
600 static struct dm_io
*alloc_io(struct mapped_device
*md
)
602 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
605 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
607 mempool_free(io
, md
->io_pool
);
610 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
612 bio_put(&tio
->clone
);
615 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
,
618 return mempool_alloc(md
->io_pool
, gfp_mask
);
621 static void free_rq_tio(struct dm_rq_target_io
*tio
)
623 mempool_free(tio
, tio
->md
->io_pool
);
626 static struct request
*alloc_clone_request(struct mapped_device
*md
,
629 return mempool_alloc(md
->rq_pool
, gfp_mask
);
632 static void free_clone_request(struct mapped_device
*md
, struct request
*rq
)
634 mempool_free(rq
, md
->rq_pool
);
637 static int md_in_flight(struct mapped_device
*md
)
639 return atomic_read(&md
->pending
[READ
]) +
640 atomic_read(&md
->pending
[WRITE
]);
643 static void start_io_acct(struct dm_io
*io
)
645 struct mapped_device
*md
= io
->md
;
646 struct bio
*bio
= io
->bio
;
648 int rw
= bio_data_dir(bio
);
650 io
->start_time
= jiffies
;
652 cpu
= part_stat_lock();
653 part_round_stats(cpu
, &dm_disk(md
)->part0
);
655 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
656 atomic_inc_return(&md
->pending
[rw
]));
658 if (unlikely(dm_stats_used(&md
->stats
)))
659 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_iter
.bi_sector
,
660 bio_sectors(bio
), false, 0, &io
->stats_aux
);
663 static void end_io_acct(struct dm_io
*io
)
665 struct mapped_device
*md
= io
->md
;
666 struct bio
*bio
= io
->bio
;
667 unsigned long duration
= jiffies
- io
->start_time
;
669 int rw
= bio_data_dir(bio
);
671 generic_end_io_acct(rw
, &dm_disk(md
)->part0
, io
->start_time
);
673 if (unlikely(dm_stats_used(&md
->stats
)))
674 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_iter
.bi_sector
,
675 bio_sectors(bio
), true, duration
, &io
->stats_aux
);
678 * After this is decremented the bio must not be touched if it is
681 pending
= atomic_dec_return(&md
->pending
[rw
]);
682 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
683 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
685 /* nudge anyone waiting on suspend queue */
691 * Add the bio to the list of deferred io.
693 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
697 spin_lock_irqsave(&md
->deferred_lock
, flags
);
698 bio_list_add(&md
->deferred
, bio
);
699 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
700 queue_work(md
->wq
, &md
->work
);
704 * Everyone (including functions in this file), should use this
705 * function to access the md->map field, and make sure they call
706 * dm_put_live_table() when finished.
708 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
) __acquires(md
->io_barrier
)
710 *srcu_idx
= srcu_read_lock(&md
->io_barrier
);
712 return srcu_dereference(md
->map
, &md
->io_barrier
);
715 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
) __releases(md
->io_barrier
)
717 srcu_read_unlock(&md
->io_barrier
, srcu_idx
);
720 void dm_sync_table(struct mapped_device
*md
)
722 synchronize_srcu(&md
->io_barrier
);
723 synchronize_rcu_expedited();
727 * A fast alternative to dm_get_live_table/dm_put_live_table.
728 * The caller must not block between these two functions.
730 static struct dm_table
*dm_get_live_table_fast(struct mapped_device
*md
) __acquires(RCU
)
733 return rcu_dereference(md
->map
);
736 static void dm_put_live_table_fast(struct mapped_device
*md
) __releases(RCU
)
742 * Open a table device so we can use it as a map destination.
744 static int open_table_device(struct table_device
*td
, dev_t dev
,
745 struct mapped_device
*md
)
747 static char *_claim_ptr
= "I belong to device-mapper";
748 struct block_device
*bdev
;
752 BUG_ON(td
->dm_dev
.bdev
);
754 bdev
= blkdev_get_by_dev(dev
, td
->dm_dev
.mode
| FMODE_EXCL
, _claim_ptr
);
756 return PTR_ERR(bdev
);
758 r
= bd_link_disk_holder(bdev
, dm_disk(md
));
760 blkdev_put(bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
764 td
->dm_dev
.bdev
= bdev
;
769 * Close a table device that we've been using.
771 static void close_table_device(struct table_device
*td
, struct mapped_device
*md
)
773 if (!td
->dm_dev
.bdev
)
776 bd_unlink_disk_holder(td
->dm_dev
.bdev
, dm_disk(md
));
777 blkdev_put(td
->dm_dev
.bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
778 td
->dm_dev
.bdev
= NULL
;
781 static struct table_device
*find_table_device(struct list_head
*l
, dev_t dev
,
783 struct table_device
*td
;
785 list_for_each_entry(td
, l
, list
)
786 if (td
->dm_dev
.bdev
->bd_dev
== dev
&& td
->dm_dev
.mode
== mode
)
792 int dm_get_table_device(struct mapped_device
*md
, dev_t dev
, fmode_t mode
,
793 struct dm_dev
**result
) {
795 struct table_device
*td
;
797 mutex_lock(&md
->table_devices_lock
);
798 td
= find_table_device(&md
->table_devices
, dev
, mode
);
800 td
= kmalloc(sizeof(*td
), GFP_KERNEL
);
802 mutex_unlock(&md
->table_devices_lock
);
806 td
->dm_dev
.mode
= mode
;
807 td
->dm_dev
.bdev
= NULL
;
809 if ((r
= open_table_device(td
, dev
, md
))) {
810 mutex_unlock(&md
->table_devices_lock
);
815 format_dev_t(td
->dm_dev
.name
, dev
);
817 atomic_set(&td
->count
, 0);
818 list_add(&td
->list
, &md
->table_devices
);
820 atomic_inc(&td
->count
);
821 mutex_unlock(&md
->table_devices_lock
);
823 *result
= &td
->dm_dev
;
826 EXPORT_SYMBOL_GPL(dm_get_table_device
);
828 void dm_put_table_device(struct mapped_device
*md
, struct dm_dev
*d
)
830 struct table_device
*td
= container_of(d
, struct table_device
, dm_dev
);
832 mutex_lock(&md
->table_devices_lock
);
833 if (atomic_dec_and_test(&td
->count
)) {
834 close_table_device(td
, md
);
838 mutex_unlock(&md
->table_devices_lock
);
840 EXPORT_SYMBOL(dm_put_table_device
);
842 static void free_table_devices(struct list_head
*devices
)
844 struct list_head
*tmp
, *next
;
846 list_for_each_safe(tmp
, next
, devices
) {
847 struct table_device
*td
= list_entry(tmp
, struct table_device
, list
);
849 DMWARN("dm_destroy: %s still exists with %d references",
850 td
->dm_dev
.name
, atomic_read(&td
->count
));
856 * Get the geometry associated with a dm device
858 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
866 * Set the geometry of a device.
868 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
870 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
872 if (geo
->start
> sz
) {
873 DMWARN("Start sector is beyond the geometry limits.");
882 /*-----------------------------------------------------------------
884 * A more elegant soln is in the works that uses the queue
885 * merge fn, unfortunately there are a couple of changes to
886 * the block layer that I want to make for this. So in the
887 * interests of getting something for people to use I give
888 * you this clearly demarcated crap.
889 *---------------------------------------------------------------*/
891 static int __noflush_suspending(struct mapped_device
*md
)
893 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
897 * Decrements the number of outstanding ios that a bio has been
898 * cloned into, completing the original io if necc.
900 static void dec_pending(struct dm_io
*io
, int error
)
905 struct mapped_device
*md
= io
->md
;
907 /* Push-back supersedes any I/O errors */
908 if (unlikely(error
)) {
909 spin_lock_irqsave(&io
->endio_lock
, flags
);
910 if (!(io
->error
> 0 && __noflush_suspending(md
)))
912 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
915 if (atomic_dec_and_test(&io
->io_count
)) {
916 if (io
->error
== DM_ENDIO_REQUEUE
) {
918 * Target requested pushing back the I/O.
920 spin_lock_irqsave(&md
->deferred_lock
, flags
);
921 if (__noflush_suspending(md
))
922 bio_list_add_head(&md
->deferred
, io
->bio
);
924 /* noflush suspend was interrupted. */
926 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
929 io_error
= io
->error
;
934 if (io_error
== DM_ENDIO_REQUEUE
)
937 if ((bio
->bi_rw
& REQ_FLUSH
) && bio
->bi_iter
.bi_size
) {
939 * Preflush done for flush with data, reissue
942 bio
->bi_rw
&= ~REQ_FLUSH
;
945 /* done with normal IO or empty flush */
946 trace_block_bio_complete(md
->queue
, bio
, io_error
);
947 bio
->bi_error
= io_error
;
953 static void disable_write_same(struct mapped_device
*md
)
955 struct queue_limits
*limits
= dm_get_queue_limits(md
);
957 /* device doesn't really support WRITE SAME, disable it */
958 limits
->max_write_same_sectors
= 0;
961 static void clone_endio(struct bio
*bio
)
963 int error
= bio
->bi_error
;
965 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
966 struct dm_io
*io
= tio
->io
;
967 struct mapped_device
*md
= tio
->io
->md
;
968 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
971 r
= endio(tio
->ti
, bio
, error
);
972 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
974 * error and requeue request are handled
978 else if (r
== DM_ENDIO_INCOMPLETE
)
979 /* The target will handle the io */
982 DMWARN("unimplemented target endio return value: %d", r
);
987 if (unlikely(r
== -EREMOTEIO
&& (bio
->bi_rw
& REQ_WRITE_SAME
) &&
988 !bdev_get_queue(bio
->bi_bdev
)->limits
.max_write_same_sectors
))
989 disable_write_same(md
);
992 dec_pending(io
, error
);
996 * Partial completion handling for request-based dm
998 static void end_clone_bio(struct bio
*clone
)
1000 struct dm_rq_clone_bio_info
*info
=
1001 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
1002 struct dm_rq_target_io
*tio
= info
->tio
;
1003 struct bio
*bio
= info
->orig
;
1004 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
1010 * An error has already been detected on the request.
1011 * Once error occurred, just let clone->end_io() handle
1015 else if (bio
->bi_error
) {
1017 * Don't notice the error to the upper layer yet.
1018 * The error handling decision is made by the target driver,
1019 * when the request is completed.
1021 tio
->error
= bio
->bi_error
;
1026 * I/O for the bio successfully completed.
1027 * Notice the data completion to the upper layer.
1031 * bios are processed from the head of the list.
1032 * So the completing bio should always be rq->bio.
1033 * If it's not, something wrong is happening.
1035 if (tio
->orig
->bio
!= bio
)
1036 DMERR("bio completion is going in the middle of the request");
1039 * Update the original request.
1040 * Do not use blk_end_request() here, because it may complete
1041 * the original request before the clone, and break the ordering.
1043 blk_update_request(tio
->orig
, 0, nr_bytes
);
1046 static struct dm_rq_target_io
*tio_from_request(struct request
*rq
)
1048 return (rq
->q
->mq_ops
? blk_mq_rq_to_pdu(rq
) : rq
->special
);
1051 static void rq_end_stats(struct mapped_device
*md
, struct request
*orig
)
1053 if (unlikely(dm_stats_used(&md
->stats
))) {
1054 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
1055 tio
->duration_jiffies
= jiffies
- tio
->duration_jiffies
;
1056 dm_stats_account_io(&md
->stats
, orig
->cmd_flags
, blk_rq_pos(orig
),
1057 tio
->n_sectors
, true, tio
->duration_jiffies
,
1063 * Don't touch any member of the md after calling this function because
1064 * the md may be freed in dm_put() at the end of this function.
1065 * Or do dm_get() before calling this function and dm_put() later.
1067 static void rq_completed(struct mapped_device
*md
, int rw
, bool run_queue
)
1069 int nr_requests_pending
;
1071 atomic_dec(&md
->pending
[rw
]);
1073 /* nudge anyone waiting on suspend queue */
1074 nr_requests_pending
= md_in_flight(md
);
1075 if (!nr_requests_pending
)
1079 * Run this off this callpath, as drivers could invoke end_io while
1080 * inside their request_fn (and holding the queue lock). Calling
1081 * back into ->request_fn() could deadlock attempting to grab the
1085 if (md
->queue
->mq_ops
)
1086 blk_mq_run_hw_queues(md
->queue
, true);
1087 else if (!nr_requests_pending
||
1088 (nr_requests_pending
>= md
->queue
->nr_congestion_on
))
1089 blk_run_queue_async(md
->queue
);
1093 * dm_put() must be at the end of this function. See the comment above
1098 static void free_rq_clone(struct request
*clone
)
1100 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1101 struct mapped_device
*md
= tio
->md
;
1103 blk_rq_unprep_clone(clone
);
1105 if (md
->type
== DM_TYPE_MQ_REQUEST_BASED
)
1106 /* stacked on blk-mq queue(s) */
1107 tio
->ti
->type
->release_clone_rq(clone
);
1108 else if (!md
->queue
->mq_ops
)
1109 /* request_fn queue stacked on request_fn queue(s) */
1110 free_clone_request(md
, clone
);
1112 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
1113 * no need to call free_clone_request() because we leverage blk-mq by
1114 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
1117 if (!md
->queue
->mq_ops
)
1122 * Complete the clone and the original request.
1123 * Must be called without clone's queue lock held,
1124 * see end_clone_request() for more details.
1126 static void dm_end_request(struct request
*clone
, int error
)
1128 int rw
= rq_data_dir(clone
);
1129 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1130 struct mapped_device
*md
= tio
->md
;
1131 struct request
*rq
= tio
->orig
;
1133 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
1134 rq
->errors
= clone
->errors
;
1135 rq
->resid_len
= clone
->resid_len
;
1139 * We are using the sense buffer of the original
1141 * So setting the length of the sense data is enough.
1143 rq
->sense_len
= clone
->sense_len
;
1146 free_rq_clone(clone
);
1147 rq_end_stats(md
, rq
);
1149 blk_end_request_all(rq
, error
);
1151 blk_mq_end_request(rq
, error
);
1152 rq_completed(md
, rw
, true);
1155 static void dm_unprep_request(struct request
*rq
)
1157 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
1158 struct request
*clone
= tio
->clone
;
1160 if (!rq
->q
->mq_ops
) {
1162 rq
->cmd_flags
&= ~REQ_DONTPREP
;
1166 free_rq_clone(clone
);
1170 * Requeue the original request of a clone.
1172 static void old_requeue_request(struct request
*rq
)
1174 struct request_queue
*q
= rq
->q
;
1175 unsigned long flags
;
1177 spin_lock_irqsave(q
->queue_lock
, flags
);
1178 blk_requeue_request(q
, rq
);
1179 blk_run_queue_async(q
);
1180 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1183 static void dm_requeue_original_request(struct mapped_device
*md
,
1186 int rw
= rq_data_dir(rq
);
1188 dm_unprep_request(rq
);
1190 rq_end_stats(md
, rq
);
1192 old_requeue_request(rq
);
1194 blk_mq_requeue_request(rq
);
1195 blk_mq_kick_requeue_list(rq
->q
);
1198 rq_completed(md
, rw
, false);
1201 static void old_stop_queue(struct request_queue
*q
)
1203 unsigned long flags
;
1205 if (blk_queue_stopped(q
))
1208 spin_lock_irqsave(q
->queue_lock
, flags
);
1210 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1213 static void stop_queue(struct request_queue
*q
)
1218 blk_mq_stop_hw_queues(q
);
1221 static void old_start_queue(struct request_queue
*q
)
1223 unsigned long flags
;
1225 spin_lock_irqsave(q
->queue_lock
, flags
);
1226 if (blk_queue_stopped(q
))
1228 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1231 static void start_queue(struct request_queue
*q
)
1236 blk_mq_start_stopped_hw_queues(q
, true);
1239 static void dm_done(struct request
*clone
, int error
, bool mapped
)
1242 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1243 dm_request_endio_fn rq_end_io
= NULL
;
1246 rq_end_io
= tio
->ti
->type
->rq_end_io
;
1248 if (mapped
&& rq_end_io
)
1249 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
1252 if (unlikely(r
== -EREMOTEIO
&& (clone
->cmd_flags
& REQ_WRITE_SAME
) &&
1253 !clone
->q
->limits
.max_write_same_sectors
))
1254 disable_write_same(tio
->md
);
1257 /* The target wants to complete the I/O */
1258 dm_end_request(clone
, r
);
1259 else if (r
== DM_ENDIO_INCOMPLETE
)
1260 /* The target will handle the I/O */
1262 else if (r
== DM_ENDIO_REQUEUE
)
1263 /* The target wants to requeue the I/O */
1264 dm_requeue_original_request(tio
->md
, tio
->orig
);
1266 DMWARN("unimplemented target endio return value: %d", r
);
1272 * Request completion handler for request-based dm
1274 static void dm_softirq_done(struct request
*rq
)
1277 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
1278 struct request
*clone
= tio
->clone
;
1282 rq_end_stats(tio
->md
, rq
);
1283 rw
= rq_data_dir(rq
);
1284 if (!rq
->q
->mq_ops
) {
1285 blk_end_request_all(rq
, tio
->error
);
1286 rq_completed(tio
->md
, rw
, false);
1289 blk_mq_end_request(rq
, tio
->error
);
1290 rq_completed(tio
->md
, rw
, false);
1295 if (rq
->cmd_flags
& REQ_FAILED
)
1298 dm_done(clone
, tio
->error
, mapped
);
1302 * Complete the clone and the original request with the error status
1303 * through softirq context.
1305 static void dm_complete_request(struct request
*rq
, int error
)
1307 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
1310 blk_complete_request(rq
);
1314 * Complete the not-mapped clone and the original request with the error status
1315 * through softirq context.
1316 * Target's rq_end_io() function isn't called.
1317 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
1319 static void dm_kill_unmapped_request(struct request
*rq
, int error
)
1321 rq
->cmd_flags
|= REQ_FAILED
;
1322 dm_complete_request(rq
, error
);
1326 * Called with the clone's queue lock held (for non-blk-mq)
1328 static void end_clone_request(struct request
*clone
, int error
)
1330 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1332 if (!clone
->q
->mq_ops
) {
1334 * For just cleaning up the information of the queue in which
1335 * the clone was dispatched.
1336 * The clone is *NOT* freed actually here because it is alloced
1337 * from dm own mempool (REQ_ALLOCED isn't set).
1339 __blk_put_request(clone
->q
, clone
);
1343 * Actual request completion is done in a softirq context which doesn't
1344 * hold the clone's queue lock. Otherwise, deadlock could occur because:
1345 * - another request may be submitted by the upper level driver
1346 * of the stacking during the completion
1347 * - the submission which requires queue lock may be done
1348 * against this clone's queue
1350 dm_complete_request(tio
->orig
, error
);
1354 * Return maximum size of I/O possible at the supplied sector up to the current
1357 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
1359 sector_t target_offset
= dm_target_offset(ti
, sector
);
1361 return ti
->len
- target_offset
;
1364 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
1366 sector_t len
= max_io_len_target_boundary(sector
, ti
);
1367 sector_t offset
, max_len
;
1370 * Does the target need to split even further?
1372 if (ti
->max_io_len
) {
1373 offset
= dm_target_offset(ti
, sector
);
1374 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
1375 max_len
= sector_div(offset
, ti
->max_io_len
);
1377 max_len
= offset
& (ti
->max_io_len
- 1);
1378 max_len
= ti
->max_io_len
- max_len
;
1387 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
1389 if (len
> UINT_MAX
) {
1390 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1391 (unsigned long long)len
, UINT_MAX
);
1392 ti
->error
= "Maximum size of target IO is too large";
1396 ti
->max_io_len
= (uint32_t) len
;
1400 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
1403 * A target may call dm_accept_partial_bio only from the map routine. It is
1404 * allowed for all bio types except REQ_FLUSH.
1406 * dm_accept_partial_bio informs the dm that the target only wants to process
1407 * additional n_sectors sectors of the bio and the rest of the data should be
1408 * sent in a next bio.
1410 * A diagram that explains the arithmetics:
1411 * +--------------------+---------------+-------+
1413 * +--------------------+---------------+-------+
1415 * <-------------- *tio->len_ptr --------------->
1416 * <------- bi_size ------->
1419 * Region 1 was already iterated over with bio_advance or similar function.
1420 * (it may be empty if the target doesn't use bio_advance)
1421 * Region 2 is the remaining bio size that the target wants to process.
1422 * (it may be empty if region 1 is non-empty, although there is no reason
1424 * The target requires that region 3 is to be sent in the next bio.
1426 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1427 * the partially processed part (the sum of regions 1+2) must be the same for all
1428 * copies of the bio.
1430 void dm_accept_partial_bio(struct bio
*bio
, unsigned n_sectors
)
1432 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1433 unsigned bi_size
= bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
;
1434 BUG_ON(bio
->bi_rw
& REQ_FLUSH
);
1435 BUG_ON(bi_size
> *tio
->len_ptr
);
1436 BUG_ON(n_sectors
> bi_size
);
1437 *tio
->len_ptr
-= bi_size
- n_sectors
;
1438 bio
->bi_iter
.bi_size
= n_sectors
<< SECTOR_SHIFT
;
1440 EXPORT_SYMBOL_GPL(dm_accept_partial_bio
);
1442 static void __map_bio(struct dm_target_io
*tio
)
1446 struct mapped_device
*md
;
1447 struct bio
*clone
= &tio
->clone
;
1448 struct dm_target
*ti
= tio
->ti
;
1450 clone
->bi_end_io
= clone_endio
;
1453 * Map the clone. If r == 0 we don't need to do
1454 * anything, the target has assumed ownership of
1457 atomic_inc(&tio
->io
->io_count
);
1458 sector
= clone
->bi_iter
.bi_sector
;
1459 r
= ti
->type
->map(ti
, clone
);
1460 if (r
== DM_MAPIO_REMAPPED
) {
1461 /* the bio has been remapped so dispatch it */
1463 trace_block_bio_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1464 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1466 generic_make_request(clone
);
1467 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1468 /* error the io and bail out, or requeue it if needed */
1470 dec_pending(tio
->io
, r
);
1473 DMWARN("unimplemented target map return value: %d", r
);
1479 struct mapped_device
*md
;
1480 struct dm_table
*map
;
1484 unsigned sector_count
;
1487 static void bio_setup_sector(struct bio
*bio
, sector_t sector
, unsigned len
)
1489 bio
->bi_iter
.bi_sector
= sector
;
1490 bio
->bi_iter
.bi_size
= to_bytes(len
);
1494 * Creates a bio that consists of range of complete bvecs.
1496 static void clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1497 sector_t sector
, unsigned len
)
1499 struct bio
*clone
= &tio
->clone
;
1501 __bio_clone_fast(clone
, bio
);
1503 if (bio_integrity(bio
))
1504 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1506 bio_advance(clone
, to_bytes(sector
- clone
->bi_iter
.bi_sector
));
1507 clone
->bi_iter
.bi_size
= to_bytes(len
);
1509 if (bio_integrity(bio
))
1510 bio_integrity_trim(clone
, 0, len
);
1513 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1514 struct dm_target
*ti
,
1515 unsigned target_bio_nr
)
1517 struct dm_target_io
*tio
;
1520 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1521 tio
= container_of(clone
, struct dm_target_io
, clone
);
1525 tio
->target_bio_nr
= target_bio_nr
;
1530 static void __clone_and_map_simple_bio(struct clone_info
*ci
,
1531 struct dm_target
*ti
,
1532 unsigned target_bio_nr
, unsigned *len
)
1534 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1535 struct bio
*clone
= &tio
->clone
;
1539 __bio_clone_fast(clone
, ci
->bio
);
1541 bio_setup_sector(clone
, ci
->sector
, *len
);
1546 static void __send_duplicate_bios(struct clone_info
*ci
, struct dm_target
*ti
,
1547 unsigned num_bios
, unsigned *len
)
1549 unsigned target_bio_nr
;
1551 for (target_bio_nr
= 0; target_bio_nr
< num_bios
; target_bio_nr
++)
1552 __clone_and_map_simple_bio(ci
, ti
, target_bio_nr
, len
);
1555 static int __send_empty_flush(struct clone_info
*ci
)
1557 unsigned target_nr
= 0;
1558 struct dm_target
*ti
;
1560 BUG_ON(bio_has_data(ci
->bio
));
1561 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1562 __send_duplicate_bios(ci
, ti
, ti
->num_flush_bios
, NULL
);
1567 static void __clone_and_map_data_bio(struct clone_info
*ci
, struct dm_target
*ti
,
1568 sector_t sector
, unsigned *len
)
1570 struct bio
*bio
= ci
->bio
;
1571 struct dm_target_io
*tio
;
1572 unsigned target_bio_nr
;
1573 unsigned num_target_bios
= 1;
1576 * Does the target want to receive duplicate copies of the bio?
1578 if (bio_data_dir(bio
) == WRITE
&& ti
->num_write_bios
)
1579 num_target_bios
= ti
->num_write_bios(ti
, bio
);
1581 for (target_bio_nr
= 0; target_bio_nr
< num_target_bios
; target_bio_nr
++) {
1582 tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1584 clone_bio(tio
, bio
, sector
, *len
);
1589 typedef unsigned (*get_num_bios_fn
)(struct dm_target
*ti
);
1591 static unsigned get_num_discard_bios(struct dm_target
*ti
)
1593 return ti
->num_discard_bios
;
1596 static unsigned get_num_write_same_bios(struct dm_target
*ti
)
1598 return ti
->num_write_same_bios
;
1601 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1603 static bool is_split_required_for_discard(struct dm_target
*ti
)
1605 return ti
->split_discard_bios
;
1608 static int __send_changing_extent_only(struct clone_info
*ci
,
1609 get_num_bios_fn get_num_bios
,
1610 is_split_required_fn is_split_required
)
1612 struct dm_target
*ti
;
1617 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1618 if (!dm_target_is_valid(ti
))
1622 * Even though the device advertised support for this type of
1623 * request, that does not mean every target supports it, and
1624 * reconfiguration might also have changed that since the
1625 * check was performed.
1627 num_bios
= get_num_bios
? get_num_bios(ti
) : 0;
1631 if (is_split_required
&& !is_split_required(ti
))
1632 len
= min((sector_t
)ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1634 len
= min((sector_t
)ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1636 __send_duplicate_bios(ci
, ti
, num_bios
, &len
);
1639 } while (ci
->sector_count
-= len
);
1644 static int __send_discard(struct clone_info
*ci
)
1646 return __send_changing_extent_only(ci
, get_num_discard_bios
,
1647 is_split_required_for_discard
);
1650 static int __send_write_same(struct clone_info
*ci
)
1652 return __send_changing_extent_only(ci
, get_num_write_same_bios
, NULL
);
1656 * Select the correct strategy for processing a non-flush bio.
1658 static int __split_and_process_non_flush(struct clone_info
*ci
)
1660 struct bio
*bio
= ci
->bio
;
1661 struct dm_target
*ti
;
1664 if (unlikely(bio
->bi_rw
& REQ_DISCARD
))
1665 return __send_discard(ci
);
1666 else if (unlikely(bio
->bi_rw
& REQ_WRITE_SAME
))
1667 return __send_write_same(ci
);
1669 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1670 if (!dm_target_is_valid(ti
))
1673 len
= min_t(sector_t
, max_io_len(ci
->sector
, ti
), ci
->sector_count
);
1675 __clone_and_map_data_bio(ci
, ti
, ci
->sector
, &len
);
1678 ci
->sector_count
-= len
;
1684 * Entry point to split a bio into clones and submit them to the targets.
1686 static void __split_and_process_bio(struct mapped_device
*md
,
1687 struct dm_table
*map
, struct bio
*bio
)
1689 struct clone_info ci
;
1692 if (unlikely(!map
)) {
1699 ci
.io
= alloc_io(md
);
1701 atomic_set(&ci
.io
->io_count
, 1);
1704 spin_lock_init(&ci
.io
->endio_lock
);
1705 ci
.sector
= bio
->bi_iter
.bi_sector
;
1707 start_io_acct(ci
.io
);
1709 if (bio
->bi_rw
& REQ_FLUSH
) {
1710 ci
.bio
= &ci
.md
->flush_bio
;
1711 ci
.sector_count
= 0;
1712 error
= __send_empty_flush(&ci
);
1713 /* dec_pending submits any data associated with flush */
1716 ci
.sector_count
= bio_sectors(bio
);
1717 while (ci
.sector_count
&& !error
)
1718 error
= __split_and_process_non_flush(&ci
);
1721 /* drop the extra reference count */
1722 dec_pending(ci
.io
, error
);
1724 /*-----------------------------------------------------------------
1726 *---------------------------------------------------------------*/
1728 static int dm_merge_bvec(struct request_queue
*q
,
1729 struct bvec_merge_data
*bvm
,
1730 struct bio_vec
*biovec
)
1732 struct mapped_device
*md
= q
->queuedata
;
1733 struct dm_table
*map
= dm_get_live_table_fast(md
);
1734 struct dm_target
*ti
;
1735 sector_t max_sectors
, max_size
= 0;
1740 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1741 if (!dm_target_is_valid(ti
))
1745 * Find maximum amount of I/O that won't need splitting
1747 max_sectors
= min(max_io_len(bvm
->bi_sector
, ti
),
1748 (sector_t
) queue_max_sectors(q
));
1749 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1752 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1753 * to the targets' merge function since it holds sectors not bytes).
1754 * Just doing this as an interim fix for stable@ because the more
1755 * comprehensive cleanup of switching to sector_t will impact every
1756 * DM target that implements a ->merge hook.
1758 if (max_size
> INT_MAX
)
1762 * merge_bvec_fn() returns number of bytes
1763 * it can accept at this offset
1764 * max is precomputed maximal io size
1766 if (max_size
&& ti
->type
->merge
)
1767 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, (int) max_size
);
1769 * If the target doesn't support merge method and some of the devices
1770 * provided their merge_bvec method (we know this by looking for the
1771 * max_hw_sectors that dm_set_device_limits may set), then we can't
1772 * allow bios with multiple vector entries. So always set max_size
1773 * to 0, and the code below allows just one page.
1775 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1779 dm_put_live_table_fast(md
);
1781 * Always allow an entire first page
1783 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1784 max_size
= biovec
->bv_len
;
1790 * The request function that just remaps the bio built up by
1793 static void dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1795 int rw
= bio_data_dir(bio
);
1796 struct mapped_device
*md
= q
->queuedata
;
1798 struct dm_table
*map
;
1800 map
= dm_get_live_table(md
, &srcu_idx
);
1802 generic_start_io_acct(rw
, bio_sectors(bio
), &dm_disk(md
)->part0
);
1804 /* if we're suspended, we have to queue this io for later */
1805 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1806 dm_put_live_table(md
, srcu_idx
);
1808 if (bio_rw(bio
) != READA
)
1815 __split_and_process_bio(md
, map
, bio
);
1816 dm_put_live_table(md
, srcu_idx
);
1820 int dm_request_based(struct mapped_device
*md
)
1822 return blk_queue_stackable(md
->queue
);
1825 static void dm_dispatch_clone_request(struct request
*clone
, struct request
*rq
)
1829 if (blk_queue_io_stat(clone
->q
))
1830 clone
->cmd_flags
|= REQ_IO_STAT
;
1832 clone
->start_time
= jiffies
;
1833 r
= blk_insert_cloned_request(clone
->q
, clone
);
1835 /* must complete clone in terms of original request */
1836 dm_complete_request(rq
, r
);
1839 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1842 struct dm_rq_target_io
*tio
= data
;
1843 struct dm_rq_clone_bio_info
*info
=
1844 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
1846 info
->orig
= bio_orig
;
1848 bio
->bi_end_io
= end_clone_bio
;
1853 static int setup_clone(struct request
*clone
, struct request
*rq
,
1854 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
1858 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, gfp_mask
,
1859 dm_rq_bio_constructor
, tio
);
1863 clone
->cmd
= rq
->cmd
;
1864 clone
->cmd_len
= rq
->cmd_len
;
1865 clone
->sense
= rq
->sense
;
1866 clone
->end_io
= end_clone_request
;
1867 clone
->end_io_data
= tio
;
1874 static struct request
*clone_rq(struct request
*rq
, struct mapped_device
*md
,
1875 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
1878 * Do not allocate a clone if tio->clone was already set
1879 * (see: dm_mq_queue_rq).
1881 bool alloc_clone
= !tio
->clone
;
1882 struct request
*clone
;
1885 clone
= alloc_clone_request(md
, gfp_mask
);
1891 blk_rq_init(NULL
, clone
);
1892 if (setup_clone(clone
, rq
, tio
, gfp_mask
)) {
1895 free_clone_request(md
, clone
);
1902 static void map_tio_request(struct kthread_work
*work
);
1904 static void init_tio(struct dm_rq_target_io
*tio
, struct request
*rq
,
1905 struct mapped_device
*md
)
1912 memset(&tio
->info
, 0, sizeof(tio
->info
));
1913 if (md
->kworker_task
)
1914 init_kthread_work(&tio
->work
, map_tio_request
);
1917 static struct dm_rq_target_io
*prep_tio(struct request
*rq
,
1918 struct mapped_device
*md
, gfp_t gfp_mask
)
1920 struct dm_rq_target_io
*tio
;
1922 struct dm_table
*table
;
1924 tio
= alloc_rq_tio(md
, gfp_mask
);
1928 init_tio(tio
, rq
, md
);
1930 table
= dm_get_live_table(md
, &srcu_idx
);
1931 if (!dm_table_mq_request_based(table
)) {
1932 if (!clone_rq(rq
, md
, tio
, gfp_mask
)) {
1933 dm_put_live_table(md
, srcu_idx
);
1938 dm_put_live_table(md
, srcu_idx
);
1944 * Called with the queue lock held.
1946 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1948 struct mapped_device
*md
= q
->queuedata
;
1949 struct dm_rq_target_io
*tio
;
1951 if (unlikely(rq
->special
)) {
1952 DMWARN("Already has something in rq->special.");
1953 return BLKPREP_KILL
;
1956 tio
= prep_tio(rq
, md
, GFP_ATOMIC
);
1958 return BLKPREP_DEFER
;
1961 rq
->cmd_flags
|= REQ_DONTPREP
;
1968 * 0 : the request has been processed
1969 * DM_MAPIO_REQUEUE : the original request needs to be requeued
1970 * < 0 : the request was completed due to failure
1972 static int map_request(struct dm_rq_target_io
*tio
, struct request
*rq
,
1973 struct mapped_device
*md
)
1976 struct dm_target
*ti
= tio
->ti
;
1977 struct request
*clone
= NULL
;
1981 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1983 r
= ti
->type
->clone_and_map_rq(ti
, rq
, &tio
->info
, &clone
);
1985 /* The target wants to complete the I/O */
1986 dm_kill_unmapped_request(rq
, r
);
1989 if (r
!= DM_MAPIO_REMAPPED
)
1991 if (setup_clone(clone
, rq
, tio
, GFP_ATOMIC
)) {
1993 ti
->type
->release_clone_rq(clone
);
1994 return DM_MAPIO_REQUEUE
;
1999 case DM_MAPIO_SUBMITTED
:
2000 /* The target has taken the I/O to submit by itself later */
2002 case DM_MAPIO_REMAPPED
:
2003 /* The target has remapped the I/O so dispatch it */
2004 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
2006 dm_dispatch_clone_request(clone
, rq
);
2008 case DM_MAPIO_REQUEUE
:
2009 /* The target wants to requeue the I/O */
2010 dm_requeue_original_request(md
, tio
->orig
);
2014 DMWARN("unimplemented target map return value: %d", r
);
2018 /* The target wants to complete the I/O */
2019 dm_kill_unmapped_request(rq
, r
);
2026 static void map_tio_request(struct kthread_work
*work
)
2028 struct dm_rq_target_io
*tio
= container_of(work
, struct dm_rq_target_io
, work
);
2029 struct request
*rq
= tio
->orig
;
2030 struct mapped_device
*md
= tio
->md
;
2032 if (map_request(tio
, rq
, md
) == DM_MAPIO_REQUEUE
)
2033 dm_requeue_original_request(md
, rq
);
2036 static void dm_start_request(struct mapped_device
*md
, struct request
*orig
)
2038 if (!orig
->q
->mq_ops
)
2039 blk_start_request(orig
);
2041 blk_mq_start_request(orig
);
2042 atomic_inc(&md
->pending
[rq_data_dir(orig
)]);
2044 if (md
->seq_rq_merge_deadline_usecs
) {
2045 md
->last_rq_pos
= rq_end_sector(orig
);
2046 md
->last_rq_rw
= rq_data_dir(orig
);
2047 md
->last_rq_start_time
= ktime_get();
2050 if (unlikely(dm_stats_used(&md
->stats
))) {
2051 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
2052 tio
->duration_jiffies
= jiffies
;
2053 tio
->n_sectors
= blk_rq_sectors(orig
);
2054 dm_stats_account_io(&md
->stats
, orig
->cmd_flags
, blk_rq_pos(orig
),
2055 tio
->n_sectors
, false, 0, &tio
->stats_aux
);
2059 * Hold the md reference here for the in-flight I/O.
2060 * We can't rely on the reference count by device opener,
2061 * because the device may be closed during the request completion
2062 * when all bios are completed.
2063 * See the comment in rq_completed() too.
2068 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
2070 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device
*md
, char *buf
)
2072 return sprintf(buf
, "%u\n", md
->seq_rq_merge_deadline_usecs
);
2075 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device
*md
,
2076 const char *buf
, size_t count
)
2080 if (!dm_request_based(md
) || md
->use_blk_mq
)
2083 if (kstrtouint(buf
, 10, &deadline
))
2086 if (deadline
> MAX_SEQ_RQ_MERGE_DEADLINE_USECS
)
2087 deadline
= MAX_SEQ_RQ_MERGE_DEADLINE_USECS
;
2089 md
->seq_rq_merge_deadline_usecs
= deadline
;
2094 static bool dm_request_peeked_before_merge_deadline(struct mapped_device
*md
)
2096 ktime_t kt_deadline
;
2098 if (!md
->seq_rq_merge_deadline_usecs
)
2101 kt_deadline
= ns_to_ktime((u64
)md
->seq_rq_merge_deadline_usecs
* NSEC_PER_USEC
);
2102 kt_deadline
= ktime_add_safe(md
->last_rq_start_time
, kt_deadline
);
2104 return !ktime_after(ktime_get(), kt_deadline
);
2108 * q->request_fn for request-based dm.
2109 * Called with the queue lock held.
2111 static void dm_request_fn(struct request_queue
*q
)
2113 struct mapped_device
*md
= q
->queuedata
;
2115 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
2116 struct dm_target
*ti
;
2118 struct dm_rq_target_io
*tio
;
2122 * For suspend, check blk_queue_stopped() and increment
2123 * ->pending within a single queue_lock not to increment the
2124 * number of in-flight I/Os after the queue is stopped in
2127 while (!blk_queue_stopped(q
)) {
2128 rq
= blk_peek_request(q
);
2132 /* always use block 0 to find the target for flushes for now */
2134 if (!(rq
->cmd_flags
& REQ_FLUSH
))
2135 pos
= blk_rq_pos(rq
);
2137 ti
= dm_table_find_target(map
, pos
);
2138 if (!dm_target_is_valid(ti
)) {
2140 * Must perform setup, that rq_completed() requires,
2141 * before calling dm_kill_unmapped_request
2143 DMERR_LIMIT("request attempted access beyond the end of device");
2144 dm_start_request(md
, rq
);
2145 dm_kill_unmapped_request(rq
, -EIO
);
2149 if (dm_request_peeked_before_merge_deadline(md
) &&
2150 md_in_flight(md
) && rq
->bio
&& rq
->bio
->bi_vcnt
== 1 &&
2151 md
->last_rq_pos
== pos
&& md
->last_rq_rw
== rq_data_dir(rq
))
2154 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
2157 dm_start_request(md
, rq
);
2159 tio
= tio_from_request(rq
);
2160 /* Establish tio->ti before queuing work (map_tio_request) */
2162 queue_kthread_work(&md
->kworker
, &tio
->work
);
2163 BUG_ON(!irqs_disabled());
2169 blk_delay_queue(q
, HZ
/ 100);
2171 dm_put_live_table(md
, srcu_idx
);
2174 static int dm_any_congested(void *congested_data
, int bdi_bits
)
2177 struct mapped_device
*md
= congested_data
;
2178 struct dm_table
*map
;
2180 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2181 map
= dm_get_live_table_fast(md
);
2184 * Request-based dm cares about only own queue for
2185 * the query about congestion status of request_queue
2187 if (dm_request_based(md
))
2188 r
= md
->queue
->backing_dev_info
.wb
.state
&
2191 r
= dm_table_any_congested(map
, bdi_bits
);
2193 dm_put_live_table_fast(md
);
2199 /*-----------------------------------------------------------------
2200 * An IDR is used to keep track of allocated minor numbers.
2201 *---------------------------------------------------------------*/
2202 static void free_minor(int minor
)
2204 spin_lock(&_minor_lock
);
2205 idr_remove(&_minor_idr
, minor
);
2206 spin_unlock(&_minor_lock
);
2210 * See if the device with a specific minor # is free.
2212 static int specific_minor(int minor
)
2216 if (minor
>= (1 << MINORBITS
))
2219 idr_preload(GFP_KERNEL
);
2220 spin_lock(&_minor_lock
);
2222 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
2224 spin_unlock(&_minor_lock
);
2227 return r
== -ENOSPC
? -EBUSY
: r
;
2231 static int next_free_minor(int *minor
)
2235 idr_preload(GFP_KERNEL
);
2236 spin_lock(&_minor_lock
);
2238 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
2240 spin_unlock(&_minor_lock
);
2248 static const struct block_device_operations dm_blk_dops
;
2250 static void dm_wq_work(struct work_struct
*work
);
2252 static void dm_init_md_queue(struct mapped_device
*md
)
2255 * Request-based dm devices cannot be stacked on top of bio-based dm
2256 * devices. The type of this dm device may not have been decided yet.
2257 * The type is decided at the first table loading time.
2258 * To prevent problematic device stacking, clear the queue flag
2259 * for request stacking support until then.
2261 * This queue is new, so no concurrency on the queue_flags.
2263 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
2266 static void dm_init_old_md_queue(struct mapped_device
*md
)
2268 md
->use_blk_mq
= false;
2269 dm_init_md_queue(md
);
2272 * Initialize aspects of queue that aren't relevant for blk-mq
2274 md
->queue
->queuedata
= md
;
2275 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
2276 md
->queue
->backing_dev_info
.congested_data
= md
;
2278 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
2281 static void cleanup_mapped_device(struct mapped_device
*md
)
2283 cleanup_srcu_struct(&md
->io_barrier
);
2286 destroy_workqueue(md
->wq
);
2287 if (md
->kworker_task
)
2288 kthread_stop(md
->kworker_task
);
2290 mempool_destroy(md
->io_pool
);
2292 mempool_destroy(md
->rq_pool
);
2294 bioset_free(md
->bs
);
2297 spin_lock(&_minor_lock
);
2298 md
->disk
->private_data
= NULL
;
2299 spin_unlock(&_minor_lock
);
2300 if (blk_get_integrity(md
->disk
))
2301 blk_integrity_unregister(md
->disk
);
2302 del_gendisk(md
->disk
);
2307 blk_cleanup_queue(md
->queue
);
2316 * Allocate and initialise a blank device with a given minor.
2318 static struct mapped_device
*alloc_dev(int minor
)
2321 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
2325 DMWARN("unable to allocate device, out of memory.");
2329 if (!try_module_get(THIS_MODULE
))
2330 goto bad_module_get
;
2332 /* get a minor number for the dev */
2333 if (minor
== DM_ANY_MINOR
)
2334 r
= next_free_minor(&minor
);
2336 r
= specific_minor(minor
);
2340 r
= init_srcu_struct(&md
->io_barrier
);
2342 goto bad_io_barrier
;
2344 md
->use_blk_mq
= use_blk_mq
;
2345 md
->type
= DM_TYPE_NONE
;
2346 mutex_init(&md
->suspend_lock
);
2347 mutex_init(&md
->type_lock
);
2348 mutex_init(&md
->table_devices_lock
);
2349 spin_lock_init(&md
->deferred_lock
);
2350 atomic_set(&md
->holders
, 1);
2351 atomic_set(&md
->open_count
, 0);
2352 atomic_set(&md
->event_nr
, 0);
2353 atomic_set(&md
->uevent_seq
, 0);
2354 INIT_LIST_HEAD(&md
->uevent_list
);
2355 INIT_LIST_HEAD(&md
->table_devices
);
2356 spin_lock_init(&md
->uevent_lock
);
2358 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
2362 dm_init_md_queue(md
);
2364 md
->disk
= alloc_disk(1);
2368 atomic_set(&md
->pending
[0], 0);
2369 atomic_set(&md
->pending
[1], 0);
2370 init_waitqueue_head(&md
->wait
);
2371 INIT_WORK(&md
->work
, dm_wq_work
);
2372 init_waitqueue_head(&md
->eventq
);
2373 init_completion(&md
->kobj_holder
.completion
);
2374 md
->kworker_task
= NULL
;
2376 md
->disk
->major
= _major
;
2377 md
->disk
->first_minor
= minor
;
2378 md
->disk
->fops
= &dm_blk_dops
;
2379 md
->disk
->queue
= md
->queue
;
2380 md
->disk
->private_data
= md
;
2381 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
2383 format_dev_t(md
->name
, MKDEV(_major
, minor
));
2385 md
->wq
= alloc_workqueue("kdmflush", WQ_MEM_RECLAIM
, 0);
2389 md
->bdev
= bdget_disk(md
->disk
, 0);
2393 bio_init(&md
->flush_bio
);
2394 md
->flush_bio
.bi_bdev
= md
->bdev
;
2395 md
->flush_bio
.bi_rw
= WRITE_FLUSH
;
2397 dm_stats_init(&md
->stats
);
2399 /* Populate the mapping, nobody knows we exist yet */
2400 spin_lock(&_minor_lock
);
2401 old_md
= idr_replace(&_minor_idr
, md
, minor
);
2402 spin_unlock(&_minor_lock
);
2404 BUG_ON(old_md
!= MINOR_ALLOCED
);
2409 cleanup_mapped_device(md
);
2413 module_put(THIS_MODULE
);
2419 static void unlock_fs(struct mapped_device
*md
);
2421 static void free_dev(struct mapped_device
*md
)
2423 int minor
= MINOR(disk_devt(md
->disk
));
2427 cleanup_mapped_device(md
);
2429 blk_mq_free_tag_set(&md
->tag_set
);
2431 free_table_devices(&md
->table_devices
);
2432 dm_stats_cleanup(&md
->stats
);
2435 module_put(THIS_MODULE
);
2439 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
2441 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
2444 /* The md already has necessary mempools. */
2445 if (dm_table_get_type(t
) == DM_TYPE_BIO_BASED
) {
2447 * Reload bioset because front_pad may have changed
2448 * because a different table was loaded.
2450 bioset_free(md
->bs
);
2455 * There's no need to reload with request-based dm
2456 * because the size of front_pad doesn't change.
2457 * Note for future: If you are to reload bioset,
2458 * prep-ed requests in the queue may refer
2459 * to bio from the old bioset, so you must walk
2460 * through the queue to unprep.
2465 BUG_ON(!p
|| md
->io_pool
|| md
->rq_pool
|| md
->bs
);
2467 md
->io_pool
= p
->io_pool
;
2469 md
->rq_pool
= p
->rq_pool
;
2475 /* mempool bind completed, no longer need any mempools in the table */
2476 dm_table_free_md_mempools(t
);
2480 * Bind a table to the device.
2482 static void event_callback(void *context
)
2484 unsigned long flags
;
2486 struct mapped_device
*md
= (struct mapped_device
*) context
;
2488 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2489 list_splice_init(&md
->uevent_list
, &uevents
);
2490 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2492 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
2494 atomic_inc(&md
->event_nr
);
2495 wake_up(&md
->eventq
);
2499 * Protected by md->suspend_lock obtained by dm_swap_table().
2501 static void __set_size(struct mapped_device
*md
, sector_t size
)
2503 set_capacity(md
->disk
, size
);
2505 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2509 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2511 * If this function returns 0, then the device is either a non-dm
2512 * device without a merge_bvec_fn, or it is a dm device that is
2513 * able to split any bios it receives that are too big.
2515 int dm_queue_merge_is_compulsory(struct request_queue
*q
)
2517 struct mapped_device
*dev_md
;
2519 if (!q
->merge_bvec_fn
)
2522 if (q
->make_request_fn
== dm_make_request
) {
2523 dev_md
= q
->queuedata
;
2524 if (test_bit(DMF_MERGE_IS_OPTIONAL
, &dev_md
->flags
))
2531 static int dm_device_merge_is_compulsory(struct dm_target
*ti
,
2532 struct dm_dev
*dev
, sector_t start
,
2533 sector_t len
, void *data
)
2535 struct block_device
*bdev
= dev
->bdev
;
2536 struct request_queue
*q
= bdev_get_queue(bdev
);
2538 return dm_queue_merge_is_compulsory(q
);
2542 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2543 * on the properties of the underlying devices.
2545 static int dm_table_merge_is_optional(struct dm_table
*table
)
2548 struct dm_target
*ti
;
2550 while (i
< dm_table_get_num_targets(table
)) {
2551 ti
= dm_table_get_target(table
, i
++);
2553 if (ti
->type
->iterate_devices
&&
2554 ti
->type
->iterate_devices(ti
, dm_device_merge_is_compulsory
, NULL
))
2562 * Returns old map, which caller must destroy.
2564 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2565 struct queue_limits
*limits
)
2567 struct dm_table
*old_map
;
2568 struct request_queue
*q
= md
->queue
;
2570 int merge_is_optional
;
2572 size
= dm_table_get_size(t
);
2575 * Wipe any geometry if the size of the table changed.
2577 if (size
!= dm_get_size(md
))
2578 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2580 __set_size(md
, size
);
2582 dm_table_event_callback(t
, event_callback
, md
);
2585 * The queue hasn't been stopped yet, if the old table type wasn't
2586 * for request-based during suspension. So stop it to prevent
2587 * I/O mapping before resume.
2588 * This must be done before setting the queue restrictions,
2589 * because request-based dm may be run just after the setting.
2591 if (dm_table_request_based(t
))
2594 __bind_mempools(md
, t
);
2596 merge_is_optional
= dm_table_merge_is_optional(t
);
2598 old_map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2599 rcu_assign_pointer(md
->map
, t
);
2600 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
2602 dm_table_set_restrictions(t
, q
, limits
);
2603 if (merge_is_optional
)
2604 set_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2606 clear_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2614 * Returns unbound table for the caller to free.
2616 static struct dm_table
*__unbind(struct mapped_device
*md
)
2618 struct dm_table
*map
= rcu_dereference_protected(md
->map
, 1);
2623 dm_table_event_callback(map
, NULL
, NULL
);
2624 RCU_INIT_POINTER(md
->map
, NULL
);
2631 * Constructor for a new device.
2633 int dm_create(int minor
, struct mapped_device
**result
)
2635 struct mapped_device
*md
;
2637 md
= alloc_dev(minor
);
2648 * Functions to manage md->type.
2649 * All are required to hold md->type_lock.
2651 void dm_lock_md_type(struct mapped_device
*md
)
2653 mutex_lock(&md
->type_lock
);
2656 void dm_unlock_md_type(struct mapped_device
*md
)
2658 mutex_unlock(&md
->type_lock
);
2661 void dm_set_md_type(struct mapped_device
*md
, unsigned type
)
2663 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2667 unsigned dm_get_md_type(struct mapped_device
*md
)
2669 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2673 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2675 return md
->immutable_target_type
;
2679 * The queue_limits are only valid as long as you have a reference
2682 struct queue_limits
*dm_get_queue_limits(struct mapped_device
*md
)
2684 BUG_ON(!atomic_read(&md
->holders
));
2685 return &md
->queue
->limits
;
2687 EXPORT_SYMBOL_GPL(dm_get_queue_limits
);
2689 static void init_rq_based_worker_thread(struct mapped_device
*md
)
2691 /* Initialize the request-based DM worker thread */
2692 init_kthread_worker(&md
->kworker
);
2693 md
->kworker_task
= kthread_run(kthread_worker_fn
, &md
->kworker
,
2694 "kdmwork-%s", dm_device_name(md
));
2698 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2700 static int dm_init_request_based_queue(struct mapped_device
*md
)
2702 struct request_queue
*q
= NULL
;
2704 /* Fully initialize the queue */
2705 q
= blk_init_allocated_queue(md
->queue
, dm_request_fn
, NULL
);
2709 /* disable dm_request_fn's merge heuristic by default */
2710 md
->seq_rq_merge_deadline_usecs
= 0;
2713 dm_init_old_md_queue(md
);
2714 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
2715 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
2717 init_rq_based_worker_thread(md
);
2719 elv_register_queue(md
->queue
);
2724 static int dm_mq_init_request(void *data
, struct request
*rq
,
2725 unsigned int hctx_idx
, unsigned int request_idx
,
2726 unsigned int numa_node
)
2728 struct mapped_device
*md
= data
;
2729 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
2732 * Must initialize md member of tio, otherwise it won't
2733 * be available in dm_mq_queue_rq.
2740 static int dm_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2741 const struct blk_mq_queue_data
*bd
)
2743 struct request
*rq
= bd
->rq
;
2744 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
2745 struct mapped_device
*md
= tio
->md
;
2747 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
2748 struct dm_target
*ti
;
2751 /* always use block 0 to find the target for flushes for now */
2753 if (!(rq
->cmd_flags
& REQ_FLUSH
))
2754 pos
= blk_rq_pos(rq
);
2756 ti
= dm_table_find_target(map
, pos
);
2757 if (!dm_target_is_valid(ti
)) {
2758 dm_put_live_table(md
, srcu_idx
);
2759 DMERR_LIMIT("request attempted access beyond the end of device");
2761 * Must perform setup, that rq_completed() requires,
2762 * before returning BLK_MQ_RQ_QUEUE_ERROR
2764 dm_start_request(md
, rq
);
2765 return BLK_MQ_RQ_QUEUE_ERROR
;
2767 dm_put_live_table(md
, srcu_idx
);
2769 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
2770 return BLK_MQ_RQ_QUEUE_BUSY
;
2772 dm_start_request(md
, rq
);
2774 /* Init tio using md established in .init_request */
2775 init_tio(tio
, rq
, md
);
2778 * Establish tio->ti before queuing work (map_tio_request)
2779 * or making direct call to map_request().
2783 /* Clone the request if underlying devices aren't blk-mq */
2784 if (dm_table_get_type(map
) == DM_TYPE_REQUEST_BASED
) {
2785 /* clone request is allocated at the end of the pdu */
2786 tio
->clone
= (void *)blk_mq_rq_to_pdu(rq
) + sizeof(struct dm_rq_target_io
);
2787 (void) clone_rq(rq
, md
, tio
, GFP_ATOMIC
);
2788 queue_kthread_work(&md
->kworker
, &tio
->work
);
2790 /* Direct call is fine since .queue_rq allows allocations */
2791 if (map_request(tio
, rq
, md
) == DM_MAPIO_REQUEUE
) {
2792 /* Undo dm_start_request() before requeuing */
2793 rq_end_stats(md
, rq
);
2794 rq_completed(md
, rq_data_dir(rq
), false);
2795 return BLK_MQ_RQ_QUEUE_BUSY
;
2799 return BLK_MQ_RQ_QUEUE_OK
;
2802 static struct blk_mq_ops dm_mq_ops
= {
2803 .queue_rq
= dm_mq_queue_rq
,
2804 .map_queue
= blk_mq_map_queue
,
2805 .complete
= dm_softirq_done
,
2806 .init_request
= dm_mq_init_request
,
2809 static int dm_init_request_based_blk_mq_queue(struct mapped_device
*md
)
2811 unsigned md_type
= dm_get_md_type(md
);
2812 struct request_queue
*q
;
2815 memset(&md
->tag_set
, 0, sizeof(md
->tag_set
));
2816 md
->tag_set
.ops
= &dm_mq_ops
;
2817 md
->tag_set
.queue_depth
= BLKDEV_MAX_RQ
;
2818 md
->tag_set
.numa_node
= NUMA_NO_NODE
;
2819 md
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
2820 md
->tag_set
.nr_hw_queues
= 1;
2821 if (md_type
== DM_TYPE_REQUEST_BASED
) {
2822 /* make the memory for non-blk-mq clone part of the pdu */
2823 md
->tag_set
.cmd_size
= sizeof(struct dm_rq_target_io
) + sizeof(struct request
);
2825 md
->tag_set
.cmd_size
= sizeof(struct dm_rq_target_io
);
2826 md
->tag_set
.driver_data
= md
;
2828 err
= blk_mq_alloc_tag_set(&md
->tag_set
);
2832 q
= blk_mq_init_allocated_queue(&md
->tag_set
, md
->queue
);
2838 dm_init_md_queue(md
);
2840 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
2841 blk_mq_register_disk(md
->disk
);
2843 if (md_type
== DM_TYPE_REQUEST_BASED
)
2844 init_rq_based_worker_thread(md
);
2849 blk_mq_free_tag_set(&md
->tag_set
);
2853 static unsigned filter_md_type(unsigned type
, struct mapped_device
*md
)
2855 if (type
== DM_TYPE_BIO_BASED
)
2858 return !md
->use_blk_mq
? DM_TYPE_REQUEST_BASED
: DM_TYPE_MQ_REQUEST_BASED
;
2862 * Setup the DM device's queue based on md's type
2864 int dm_setup_md_queue(struct mapped_device
*md
)
2867 unsigned md_type
= filter_md_type(dm_get_md_type(md
), md
);
2870 case DM_TYPE_REQUEST_BASED
:
2871 r
= dm_init_request_based_queue(md
);
2873 DMWARN("Cannot initialize queue for request-based mapped device");
2877 case DM_TYPE_MQ_REQUEST_BASED
:
2878 r
= dm_init_request_based_blk_mq_queue(md
);
2880 DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
2884 case DM_TYPE_BIO_BASED
:
2885 dm_init_old_md_queue(md
);
2886 blk_queue_make_request(md
->queue
, dm_make_request
);
2887 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
2894 struct mapped_device
*dm_get_md(dev_t dev
)
2896 struct mapped_device
*md
;
2897 unsigned minor
= MINOR(dev
);
2899 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2902 spin_lock(&_minor_lock
);
2904 md
= idr_find(&_minor_idr
, minor
);
2906 if ((md
== MINOR_ALLOCED
||
2907 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2908 dm_deleting_md(md
) ||
2909 test_bit(DMF_FREEING
, &md
->flags
))) {
2917 spin_unlock(&_minor_lock
);
2921 EXPORT_SYMBOL_GPL(dm_get_md
);
2923 void *dm_get_mdptr(struct mapped_device
*md
)
2925 return md
->interface_ptr
;
2928 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2930 md
->interface_ptr
= ptr
;
2933 void dm_get(struct mapped_device
*md
)
2935 atomic_inc(&md
->holders
);
2936 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2939 int dm_hold(struct mapped_device
*md
)
2941 spin_lock(&_minor_lock
);
2942 if (test_bit(DMF_FREEING
, &md
->flags
)) {
2943 spin_unlock(&_minor_lock
);
2947 spin_unlock(&_minor_lock
);
2950 EXPORT_SYMBOL_GPL(dm_hold
);
2952 const char *dm_device_name(struct mapped_device
*md
)
2956 EXPORT_SYMBOL_GPL(dm_device_name
);
2958 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2960 struct dm_table
*map
;
2965 map
= dm_get_live_table(md
, &srcu_idx
);
2967 spin_lock(&_minor_lock
);
2968 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2969 set_bit(DMF_FREEING
, &md
->flags
);
2970 spin_unlock(&_minor_lock
);
2972 if (dm_request_based(md
) && md
->kworker_task
)
2973 flush_kthread_worker(&md
->kworker
);
2976 * Take suspend_lock so that presuspend and postsuspend methods
2977 * do not race with internal suspend.
2979 mutex_lock(&md
->suspend_lock
);
2980 if (!dm_suspended_md(md
)) {
2981 dm_table_presuspend_targets(map
);
2982 dm_table_postsuspend_targets(map
);
2984 mutex_unlock(&md
->suspend_lock
);
2986 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2987 dm_put_live_table(md
, srcu_idx
);
2990 * Rare, but there may be I/O requests still going to complete,
2991 * for example. Wait for all references to disappear.
2992 * No one should increment the reference count of the mapped_device,
2993 * after the mapped_device state becomes DMF_FREEING.
2996 while (atomic_read(&md
->holders
))
2998 else if (atomic_read(&md
->holders
))
2999 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
3000 dm_device_name(md
), atomic_read(&md
->holders
));
3003 dm_table_destroy(__unbind(md
));
3007 void dm_destroy(struct mapped_device
*md
)
3009 __dm_destroy(md
, true);
3012 void dm_destroy_immediate(struct mapped_device
*md
)
3014 __dm_destroy(md
, false);
3017 void dm_put(struct mapped_device
*md
)
3019 atomic_dec(&md
->holders
);
3021 EXPORT_SYMBOL_GPL(dm_put
);
3023 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
3026 DECLARE_WAITQUEUE(wait
, current
);
3028 add_wait_queue(&md
->wait
, &wait
);
3031 set_current_state(interruptible
);
3033 if (!md_in_flight(md
))
3036 if (interruptible
== TASK_INTERRUPTIBLE
&&
3037 signal_pending(current
)) {
3044 set_current_state(TASK_RUNNING
);
3046 remove_wait_queue(&md
->wait
, &wait
);
3052 * Process the deferred bios
3054 static void dm_wq_work(struct work_struct
*work
)
3056 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
3060 struct dm_table
*map
;
3062 map
= dm_get_live_table(md
, &srcu_idx
);
3064 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
3065 spin_lock_irq(&md
->deferred_lock
);
3066 c
= bio_list_pop(&md
->deferred
);
3067 spin_unlock_irq(&md
->deferred_lock
);
3072 if (dm_request_based(md
))
3073 generic_make_request(c
);
3075 __split_and_process_bio(md
, map
, c
);
3078 dm_put_live_table(md
, srcu_idx
);
3081 static void dm_queue_flush(struct mapped_device
*md
)
3083 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
3084 smp_mb__after_atomic();
3085 queue_work(md
->wq
, &md
->work
);
3089 * Swap in a new table, returning the old one for the caller to destroy.
3091 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
3093 struct dm_table
*live_map
= NULL
, *map
= ERR_PTR(-EINVAL
);
3094 struct queue_limits limits
;
3097 mutex_lock(&md
->suspend_lock
);
3099 /* device must be suspended */
3100 if (!dm_suspended_md(md
))
3104 * If the new table has no data devices, retain the existing limits.
3105 * This helps multipath with queue_if_no_path if all paths disappear,
3106 * then new I/O is queued based on these limits, and then some paths
3109 if (dm_table_has_no_data_devices(table
)) {
3110 live_map
= dm_get_live_table_fast(md
);
3112 limits
= md
->queue
->limits
;
3113 dm_put_live_table_fast(md
);
3117 r
= dm_calculate_queue_limits(table
, &limits
);
3124 map
= __bind(md
, table
, &limits
);
3127 mutex_unlock(&md
->suspend_lock
);
3132 * Functions to lock and unlock any filesystem running on the
3135 static int lock_fs(struct mapped_device
*md
)
3139 WARN_ON(md
->frozen_sb
);
3141 md
->frozen_sb
= freeze_bdev(md
->bdev
);
3142 if (IS_ERR(md
->frozen_sb
)) {
3143 r
= PTR_ERR(md
->frozen_sb
);
3144 md
->frozen_sb
= NULL
;
3148 set_bit(DMF_FROZEN
, &md
->flags
);
3153 static void unlock_fs(struct mapped_device
*md
)
3155 if (!test_bit(DMF_FROZEN
, &md
->flags
))
3158 thaw_bdev(md
->bdev
, md
->frozen_sb
);
3159 md
->frozen_sb
= NULL
;
3160 clear_bit(DMF_FROZEN
, &md
->flags
);
3164 * If __dm_suspend returns 0, the device is completely quiescent
3165 * now. There is no request-processing activity. All new requests
3166 * are being added to md->deferred list.
3168 * Caller must hold md->suspend_lock
3170 static int __dm_suspend(struct mapped_device
*md
, struct dm_table
*map
,
3171 unsigned suspend_flags
, int interruptible
)
3173 bool do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
;
3174 bool noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
;
3178 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
3179 * This flag is cleared before dm_suspend returns.
3182 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
3185 * This gets reverted if there's an error later and the targets
3186 * provide the .presuspend_undo hook.
3188 dm_table_presuspend_targets(map
);
3191 * Flush I/O to the device.
3192 * Any I/O submitted after lock_fs() may not be flushed.
3193 * noflush takes precedence over do_lockfs.
3194 * (lock_fs() flushes I/Os and waits for them to complete.)
3196 if (!noflush
&& do_lockfs
) {
3199 dm_table_presuspend_undo_targets(map
);
3205 * Here we must make sure that no processes are submitting requests
3206 * to target drivers i.e. no one may be executing
3207 * __split_and_process_bio. This is called from dm_request and
3210 * To get all processes out of __split_and_process_bio in dm_request,
3211 * we take the write lock. To prevent any process from reentering
3212 * __split_and_process_bio from dm_request and quiesce the thread
3213 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
3214 * flush_workqueue(md->wq).
3216 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
3218 synchronize_srcu(&md
->io_barrier
);
3221 * Stop md->queue before flushing md->wq in case request-based
3222 * dm defers requests to md->wq from md->queue.
3224 if (dm_request_based(md
)) {
3225 stop_queue(md
->queue
);
3226 if (md
->kworker_task
)
3227 flush_kthread_worker(&md
->kworker
);
3230 flush_workqueue(md
->wq
);
3233 * At this point no more requests are entering target request routines.
3234 * We call dm_wait_for_completion to wait for all existing requests
3237 r
= dm_wait_for_completion(md
, interruptible
);
3240 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
3242 synchronize_srcu(&md
->io_barrier
);
3244 /* were we interrupted ? */
3248 if (dm_request_based(md
))
3249 start_queue(md
->queue
);
3252 dm_table_presuspend_undo_targets(map
);
3253 /* pushback list is already flushed, so skip flush */
3260 * We need to be able to change a mapping table under a mounted
3261 * filesystem. For example we might want to move some data in
3262 * the background. Before the table can be swapped with
3263 * dm_bind_table, dm_suspend must be called to flush any in
3264 * flight bios and ensure that any further io gets deferred.
3267 * Suspend mechanism in request-based dm.
3269 * 1. Flush all I/Os by lock_fs() if needed.
3270 * 2. Stop dispatching any I/O by stopping the request_queue.
3271 * 3. Wait for all in-flight I/Os to be completed or requeued.
3273 * To abort suspend, start the request_queue.
3275 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
3277 struct dm_table
*map
= NULL
;
3281 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
3283 if (dm_suspended_md(md
)) {
3288 if (dm_suspended_internally_md(md
)) {
3289 /* already internally suspended, wait for internal resume */
3290 mutex_unlock(&md
->suspend_lock
);
3291 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
3297 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
3299 r
= __dm_suspend(md
, map
, suspend_flags
, TASK_INTERRUPTIBLE
);
3303 set_bit(DMF_SUSPENDED
, &md
->flags
);
3305 dm_table_postsuspend_targets(map
);
3308 mutex_unlock(&md
->suspend_lock
);
3312 static int __dm_resume(struct mapped_device
*md
, struct dm_table
*map
)
3315 int r
= dm_table_resume_targets(map
);
3323 * Flushing deferred I/Os must be done after targets are resumed
3324 * so that mapping of targets can work correctly.
3325 * Request-based dm is queueing the deferred I/Os in its request_queue.
3327 if (dm_request_based(md
))
3328 start_queue(md
->queue
);
3335 int dm_resume(struct mapped_device
*md
)
3338 struct dm_table
*map
= NULL
;
3341 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
3343 if (!dm_suspended_md(md
))
3346 if (dm_suspended_internally_md(md
)) {
3347 /* already internally suspended, wait for internal resume */
3348 mutex_unlock(&md
->suspend_lock
);
3349 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
3355 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
3356 if (!map
|| !dm_table_get_size(map
))
3359 r
= __dm_resume(md
, map
);
3363 clear_bit(DMF_SUSPENDED
, &md
->flags
);
3367 mutex_unlock(&md
->suspend_lock
);
3373 * Internal suspend/resume works like userspace-driven suspend. It waits
3374 * until all bios finish and prevents issuing new bios to the target drivers.
3375 * It may be used only from the kernel.
3378 static void __dm_internal_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
3380 struct dm_table
*map
= NULL
;
3382 if (md
->internal_suspend_count
++)
3383 return; /* nested internal suspend */
3385 if (dm_suspended_md(md
)) {
3386 set_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3387 return; /* nest suspend */
3390 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
3393 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3394 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
3395 * would require changing .presuspend to return an error -- avoid this
3396 * until there is a need for more elaborate variants of internal suspend.
3398 (void) __dm_suspend(md
, map
, suspend_flags
, TASK_UNINTERRUPTIBLE
);
3400 set_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3402 dm_table_postsuspend_targets(map
);
3405 static void __dm_internal_resume(struct mapped_device
*md
)
3407 BUG_ON(!md
->internal_suspend_count
);
3409 if (--md
->internal_suspend_count
)
3410 return; /* resume from nested internal suspend */
3412 if (dm_suspended_md(md
))
3413 goto done
; /* resume from nested suspend */
3416 * NOTE: existing callers don't need to call dm_table_resume_targets
3417 * (which may fail -- so best to avoid it for now by passing NULL map)
3419 (void) __dm_resume(md
, NULL
);
3422 clear_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3423 smp_mb__after_atomic();
3424 wake_up_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
);
3427 void dm_internal_suspend_noflush(struct mapped_device
*md
)
3429 mutex_lock(&md
->suspend_lock
);
3430 __dm_internal_suspend(md
, DM_SUSPEND_NOFLUSH_FLAG
);
3431 mutex_unlock(&md
->suspend_lock
);
3433 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush
);
3435 void dm_internal_resume(struct mapped_device
*md
)
3437 mutex_lock(&md
->suspend_lock
);
3438 __dm_internal_resume(md
);
3439 mutex_unlock(&md
->suspend_lock
);
3441 EXPORT_SYMBOL_GPL(dm_internal_resume
);
3444 * Fast variants of internal suspend/resume hold md->suspend_lock,
3445 * which prevents interaction with userspace-driven suspend.
3448 void dm_internal_suspend_fast(struct mapped_device
*md
)
3450 mutex_lock(&md
->suspend_lock
);
3451 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
3454 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
3455 synchronize_srcu(&md
->io_barrier
);
3456 flush_workqueue(md
->wq
);
3457 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
3459 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast
);
3461 void dm_internal_resume_fast(struct mapped_device
*md
)
3463 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
3469 mutex_unlock(&md
->suspend_lock
);
3471 EXPORT_SYMBOL_GPL(dm_internal_resume_fast
);
3473 /*-----------------------------------------------------------------
3474 * Event notification.
3475 *---------------------------------------------------------------*/
3476 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
3479 char udev_cookie
[DM_COOKIE_LENGTH
];
3480 char *envp
[] = { udev_cookie
, NULL
};
3483 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
3485 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
3486 DM_COOKIE_ENV_VAR_NAME
, cookie
);
3487 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
3492 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
3494 return atomic_add_return(1, &md
->uevent_seq
);
3497 uint32_t dm_get_event_nr(struct mapped_device
*md
)
3499 return atomic_read(&md
->event_nr
);
3502 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
3504 return wait_event_interruptible(md
->eventq
,
3505 (event_nr
!= atomic_read(&md
->event_nr
)));
3508 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
3510 unsigned long flags
;
3512 spin_lock_irqsave(&md
->uevent_lock
, flags
);
3513 list_add(elist
, &md
->uevent_list
);
3514 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
3518 * The gendisk is only valid as long as you have a reference
3521 struct gendisk
*dm_disk(struct mapped_device
*md
)
3525 EXPORT_SYMBOL_GPL(dm_disk
);
3527 struct kobject
*dm_kobject(struct mapped_device
*md
)
3529 return &md
->kobj_holder
.kobj
;
3532 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
3534 struct mapped_device
*md
;
3536 md
= container_of(kobj
, struct mapped_device
, kobj_holder
.kobj
);
3538 if (test_bit(DMF_FREEING
, &md
->flags
) ||
3546 int dm_suspended_md(struct mapped_device
*md
)
3548 return test_bit(DMF_SUSPENDED
, &md
->flags
);
3551 int dm_suspended_internally_md(struct mapped_device
*md
)
3553 return test_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3556 int dm_test_deferred_remove_flag(struct mapped_device
*md
)
3558 return test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
3561 int dm_suspended(struct dm_target
*ti
)
3563 return dm_suspended_md(dm_table_get_md(ti
->table
));
3565 EXPORT_SYMBOL_GPL(dm_suspended
);
3567 int dm_noflush_suspending(struct dm_target
*ti
)
3569 return __noflush_suspending(dm_table_get_md(ti
->table
));
3571 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
3573 struct dm_md_mempools
*dm_alloc_md_mempools(struct mapped_device
*md
, unsigned type
,
3574 unsigned integrity
, unsigned per_bio_data_size
)
3576 struct dm_md_mempools
*pools
= kzalloc(sizeof(*pools
), GFP_KERNEL
);
3577 struct kmem_cache
*cachep
= NULL
;
3578 unsigned int pool_size
= 0;
3579 unsigned int front_pad
;
3584 type
= filter_md_type(type
, md
);
3587 case DM_TYPE_BIO_BASED
:
3589 pool_size
= dm_get_reserved_bio_based_ios();
3590 front_pad
= roundup(per_bio_data_size
, __alignof__(struct dm_target_io
)) + offsetof(struct dm_target_io
, clone
);
3592 case DM_TYPE_REQUEST_BASED
:
3593 cachep
= _rq_tio_cache
;
3594 pool_size
= dm_get_reserved_rq_based_ios();
3595 pools
->rq_pool
= mempool_create_slab_pool(pool_size
, _rq_cache
);
3596 if (!pools
->rq_pool
)
3598 /* fall through to setup remaining rq-based pools */
3599 case DM_TYPE_MQ_REQUEST_BASED
:
3601 pool_size
= dm_get_reserved_rq_based_ios();
3602 front_pad
= offsetof(struct dm_rq_clone_bio_info
, clone
);
3603 /* per_bio_data_size is not used. See __bind_mempools(). */
3604 WARN_ON(per_bio_data_size
!= 0);
3611 pools
->io_pool
= mempool_create_slab_pool(pool_size
, cachep
);
3612 if (!pools
->io_pool
)
3616 pools
->bs
= bioset_create_nobvec(pool_size
, front_pad
);
3620 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
3626 dm_free_md_mempools(pools
);
3631 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
3637 mempool_destroy(pools
->io_pool
);
3640 mempool_destroy(pools
->rq_pool
);
3643 bioset_free(pools
->bs
);
3648 static const struct block_device_operations dm_blk_dops
= {
3649 .open
= dm_blk_open
,
3650 .release
= dm_blk_close
,
3651 .ioctl
= dm_blk_ioctl
,
3652 .getgeo
= dm_blk_getgeo
,
3653 .owner
= THIS_MODULE
3659 module_init(dm_init
);
3660 module_exit(dm_exit
);
3662 module_param(major
, uint
, 0);
3663 MODULE_PARM_DESC(major
, "The major number of the device mapper");
3665 module_param(reserved_bio_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3666 MODULE_PARM_DESC(reserved_bio_based_ios
, "Reserved IOs in bio-based mempools");
3668 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3669 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
3671 module_param(use_blk_mq
, bool, S_IRUGO
| S_IWUSR
);
3672 MODULE_PARM_DESC(use_blk_mq
, "Use block multiqueue for request-based DM devices");
3674 MODULE_DESCRIPTION(DM_NAME
" driver");
3675 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3676 MODULE_LICENSE("GPL");