2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include "dm-uevent.h"
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/sched/signal.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/mempool.h>
19 #include <linux/dax.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/uio.h>
23 #include <linux/hdreg.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
27 #include <linux/refcount.h>
29 #define DM_MSG_PREFIX "core"
32 * Cookies are numeric values sent with CHANGE and REMOVE
33 * uevents while resuming, removing or renaming the device.
35 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
36 #define DM_COOKIE_LENGTH 24
38 static const char *_name
= DM_NAME
;
40 static unsigned int major
= 0;
41 static unsigned int _major
= 0;
43 static DEFINE_IDR(_minor_idr
);
45 static DEFINE_SPINLOCK(_minor_lock
);
47 static void do_deferred_remove(struct work_struct
*w
);
49 static DECLARE_WORK(deferred_remove_work
, do_deferred_remove
);
51 static struct workqueue_struct
*deferred_remove_workqueue
;
53 atomic_t dm_global_event_nr
= ATOMIC_INIT(0);
54 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq
);
56 void dm_issue_global_event(void)
58 atomic_inc(&dm_global_event_nr
);
59 wake_up(&dm_global_eventq
);
63 * One of these is allocated (on-stack) per original bio.
70 unsigned sector_count
;
74 * One of these is allocated per clone bio.
76 #define DM_TIO_MAGIC 7282014
81 unsigned target_bio_nr
;
88 * One of these is allocated per original bio.
89 * It contains the first clone used for that original.
91 #define DM_IO_MAGIC 5191977
94 struct mapped_device
*md
;
98 unsigned long start_time
;
99 spinlock_t endio_lock
;
100 struct dm_stats_aux stats_aux
;
101 /* last member of dm_target_io is 'struct bio' */
102 struct dm_target_io tio
;
105 void *dm_per_bio_data(struct bio
*bio
, size_t data_size
)
107 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
108 if (!tio
->inside_dm_io
)
109 return (char *)bio
- offsetof(struct dm_target_io
, clone
) - data_size
;
110 return (char *)bio
- offsetof(struct dm_target_io
, clone
) - offsetof(struct dm_io
, tio
) - data_size
;
112 EXPORT_SYMBOL_GPL(dm_per_bio_data
);
114 struct bio
*dm_bio_from_per_bio_data(void *data
, size_t data_size
)
116 struct dm_io
*io
= (struct dm_io
*)((char *)data
+ data_size
);
117 if (io
->magic
== DM_IO_MAGIC
)
118 return (struct bio
*)((char *)io
+ offsetof(struct dm_io
, tio
) + offsetof(struct dm_target_io
, clone
));
119 BUG_ON(io
->magic
!= DM_TIO_MAGIC
);
120 return (struct bio
*)((char *)io
+ offsetof(struct dm_target_io
, clone
));
122 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data
);
124 unsigned dm_bio_get_target_bio_nr(const struct bio
*bio
)
126 return container_of(bio
, struct dm_target_io
, clone
)->target_bio_nr
;
128 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr
);
130 #define MINOR_ALLOCED ((void *)-1)
133 * Bits for the md->flags field.
135 #define DMF_BLOCK_IO_FOR_SUSPEND 0
136 #define DMF_SUSPENDED 1
138 #define DMF_FREEING 3
139 #define DMF_DELETING 4
140 #define DMF_NOFLUSH_SUSPENDING 5
141 #define DMF_DEFERRED_REMOVE 6
142 #define DMF_SUSPENDED_INTERNALLY 7
144 #define DM_NUMA_NODE NUMA_NO_NODE
145 static int dm_numa_node
= DM_NUMA_NODE
;
148 * For mempools pre-allocation at the table loading time.
150 struct dm_md_mempools
{
152 struct bio_set io_bs
;
155 struct table_device
{
156 struct list_head list
;
158 struct dm_dev dm_dev
;
161 static struct kmem_cache
*_rq_tio_cache
;
162 static struct kmem_cache
*_rq_cache
;
165 * Bio-based DM's mempools' reserved IOs set by the user.
167 #define RESERVED_BIO_BASED_IOS 16
168 static unsigned reserved_bio_based_ios
= RESERVED_BIO_BASED_IOS
;
170 static int __dm_get_module_param_int(int *module_param
, int min
, int max
)
172 int param
= READ_ONCE(*module_param
);
173 int modified_param
= 0;
174 bool modified
= true;
177 modified_param
= min
;
178 else if (param
> max
)
179 modified_param
= max
;
184 (void)cmpxchg(module_param
, param
, modified_param
);
185 param
= modified_param
;
191 unsigned __dm_get_module_param(unsigned *module_param
,
192 unsigned def
, unsigned max
)
194 unsigned param
= READ_ONCE(*module_param
);
195 unsigned modified_param
= 0;
198 modified_param
= def
;
199 else if (param
> max
)
200 modified_param
= max
;
202 if (modified_param
) {
203 (void)cmpxchg(module_param
, param
, modified_param
);
204 param
= modified_param
;
210 unsigned dm_get_reserved_bio_based_ios(void)
212 return __dm_get_module_param(&reserved_bio_based_ios
,
213 RESERVED_BIO_BASED_IOS
, DM_RESERVED_MAX_IOS
);
215 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios
);
217 static unsigned dm_get_numa_node(void)
219 return __dm_get_module_param_int(&dm_numa_node
,
220 DM_NUMA_NODE
, num_online_nodes() - 1);
223 static int __init
local_init(void)
227 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
231 _rq_cache
= kmem_cache_create("dm_old_clone_request", sizeof(struct request
),
232 __alignof__(struct request
), 0, NULL
);
234 goto out_free_rq_tio_cache
;
236 r
= dm_uevent_init();
238 goto out_free_rq_cache
;
240 deferred_remove_workqueue
= alloc_workqueue("kdmremove", WQ_UNBOUND
, 1);
241 if (!deferred_remove_workqueue
) {
243 goto out_uevent_exit
;
247 r
= register_blkdev(_major
, _name
);
249 goto out_free_workqueue
;
257 destroy_workqueue(deferred_remove_workqueue
);
261 kmem_cache_destroy(_rq_cache
);
262 out_free_rq_tio_cache
:
263 kmem_cache_destroy(_rq_tio_cache
);
268 static void local_exit(void)
270 flush_scheduled_work();
271 destroy_workqueue(deferred_remove_workqueue
);
273 kmem_cache_destroy(_rq_cache
);
274 kmem_cache_destroy(_rq_tio_cache
);
275 unregister_blkdev(_major
, _name
);
280 DMINFO("cleaned up");
283 static int (*_inits
[])(void) __initdata
= {
294 static void (*_exits
[])(void) = {
305 static int __init
dm_init(void)
307 const int count
= ARRAY_SIZE(_inits
);
311 for (i
= 0; i
< count
; i
++) {
326 static void __exit
dm_exit(void)
328 int i
= ARRAY_SIZE(_exits
);
334 * Should be empty by this point.
336 idr_destroy(&_minor_idr
);
340 * Block device functions
342 int dm_deleting_md(struct mapped_device
*md
)
344 return test_bit(DMF_DELETING
, &md
->flags
);
347 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
349 struct mapped_device
*md
;
351 spin_lock(&_minor_lock
);
353 md
= bdev
->bd_disk
->private_data
;
357 if (test_bit(DMF_FREEING
, &md
->flags
) ||
358 dm_deleting_md(md
)) {
364 atomic_inc(&md
->open_count
);
366 spin_unlock(&_minor_lock
);
368 return md
? 0 : -ENXIO
;
371 static void dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
373 struct mapped_device
*md
;
375 spin_lock(&_minor_lock
);
377 md
= disk
->private_data
;
381 if (atomic_dec_and_test(&md
->open_count
) &&
382 (test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
)))
383 queue_work(deferred_remove_workqueue
, &deferred_remove_work
);
387 spin_unlock(&_minor_lock
);
390 int dm_open_count(struct mapped_device
*md
)
392 return atomic_read(&md
->open_count
);
396 * Guarantees nothing is using the device before it's deleted.
398 int dm_lock_for_deletion(struct mapped_device
*md
, bool mark_deferred
, bool only_deferred
)
402 spin_lock(&_minor_lock
);
404 if (dm_open_count(md
)) {
407 set_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
408 } else if (only_deferred
&& !test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
))
411 set_bit(DMF_DELETING
, &md
->flags
);
413 spin_unlock(&_minor_lock
);
418 int dm_cancel_deferred_remove(struct mapped_device
*md
)
422 spin_lock(&_minor_lock
);
424 if (test_bit(DMF_DELETING
, &md
->flags
))
427 clear_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
429 spin_unlock(&_minor_lock
);
434 static void do_deferred_remove(struct work_struct
*w
)
436 dm_deferred_remove();
439 sector_t
dm_get_size(struct mapped_device
*md
)
441 return get_capacity(md
->disk
);
444 struct request_queue
*dm_get_md_queue(struct mapped_device
*md
)
449 struct dm_stats
*dm_get_stats(struct mapped_device
*md
)
454 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
456 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
458 return dm_get_geometry(md
, geo
);
461 static int dm_blk_report_zones(struct gendisk
*disk
, sector_t sector
,
462 struct blk_zone
*zones
, unsigned int *nr_zones
,
465 #ifdef CONFIG_BLK_DEV_ZONED
466 struct mapped_device
*md
= disk
->private_data
;
467 struct dm_target
*tgt
;
468 struct dm_table
*map
;
471 if (dm_suspended_md(md
))
474 map
= dm_get_live_table(md
, &srcu_idx
);
478 tgt
= dm_table_find_target(map
, sector
);
479 if (!dm_target_is_valid(tgt
)) {
485 * If we are executing this, we already know that the block device
486 * is a zoned device and so each target should have support for that
487 * type of drive. A missing report_zones method means that the target
488 * driver has a problem.
490 if (WARN_ON(!tgt
->type
->report_zones
)) {
496 * blkdev_report_zones() will loop and call this again to cover all the
497 * zones of the target, eventually moving on to the next target.
498 * So there is no need to loop here trying to fill the entire array
501 ret
= tgt
->type
->report_zones(tgt
, sector
, zones
,
505 dm_put_live_table(md
, srcu_idx
);
512 static int dm_prepare_ioctl(struct mapped_device
*md
, int *srcu_idx
,
513 struct block_device
**bdev
)
514 __acquires(md
->io_barrier
)
516 struct dm_target
*tgt
;
517 struct dm_table
*map
;
522 map
= dm_get_live_table(md
, srcu_idx
);
523 if (!map
|| !dm_table_get_size(map
))
526 /* We only support devices that have a single target */
527 if (dm_table_get_num_targets(map
) != 1)
530 tgt
= dm_table_get_target(map
, 0);
531 if (!tgt
->type
->prepare_ioctl
)
534 if (dm_suspended_md(md
))
537 r
= tgt
->type
->prepare_ioctl(tgt
, bdev
);
538 if (r
== -ENOTCONN
&& !fatal_signal_pending(current
)) {
539 dm_put_live_table(md
, *srcu_idx
);
547 static void dm_unprepare_ioctl(struct mapped_device
*md
, int srcu_idx
)
548 __releases(md
->io_barrier
)
550 dm_put_live_table(md
, srcu_idx
);
553 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
554 unsigned int cmd
, unsigned long arg
)
556 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
559 r
= dm_prepare_ioctl(md
, &srcu_idx
, &bdev
);
565 * Target determined this ioctl is being issued against a
566 * subset of the parent bdev; require extra privileges.
568 if (!capable(CAP_SYS_RAWIO
)) {
570 "%s: sending ioctl %x to DM device without required privilege.",
577 r
= __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
579 dm_unprepare_ioctl(md
, srcu_idx
);
583 static void start_io_acct(struct dm_io
*io
);
585 static struct dm_io
*alloc_io(struct mapped_device
*md
, struct bio
*bio
)
588 struct dm_target_io
*tio
;
591 clone
= bio_alloc_bioset(GFP_NOIO
, 0, &md
->io_bs
);
595 tio
= container_of(clone
, struct dm_target_io
, clone
);
596 tio
->inside_dm_io
= true;
599 io
= container_of(tio
, struct dm_io
, tio
);
600 io
->magic
= DM_IO_MAGIC
;
602 atomic_set(&io
->io_count
, 1);
605 spin_lock_init(&io
->endio_lock
);
612 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
614 bio_put(&io
->tio
.clone
);
617 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
, struct dm_target
*ti
,
618 unsigned target_bio_nr
, gfp_t gfp_mask
)
620 struct dm_target_io
*tio
;
622 if (!ci
->io
->tio
.io
) {
623 /* the dm_target_io embedded in ci->io is available */
626 struct bio
*clone
= bio_alloc_bioset(gfp_mask
, 0, &ci
->io
->md
->bs
);
630 tio
= container_of(clone
, struct dm_target_io
, clone
);
631 tio
->inside_dm_io
= false;
634 tio
->magic
= DM_TIO_MAGIC
;
637 tio
->target_bio_nr
= target_bio_nr
;
642 static void free_tio(struct dm_target_io
*tio
)
644 if (tio
->inside_dm_io
)
646 bio_put(&tio
->clone
);
649 static bool md_in_flight_bios(struct mapped_device
*md
)
652 struct hd_struct
*part
= &dm_disk(md
)->part0
;
655 for_each_possible_cpu(cpu
) {
656 sum
+= part_stat_local_read_cpu(part
, in_flight
[0], cpu
);
657 sum
+= part_stat_local_read_cpu(part
, in_flight
[1], cpu
);
663 static bool md_in_flight(struct mapped_device
*md
)
665 if (queue_is_mq(md
->queue
))
666 return blk_mq_queue_inflight(md
->queue
);
668 return md_in_flight_bios(md
);
671 static void start_io_acct(struct dm_io
*io
)
673 struct mapped_device
*md
= io
->md
;
674 struct bio
*bio
= io
->orig_bio
;
676 io
->start_time
= jiffies
;
678 generic_start_io_acct(md
->queue
, bio_op(bio
), bio_sectors(bio
),
679 &dm_disk(md
)->part0
);
681 if (unlikely(dm_stats_used(&md
->stats
)))
682 dm_stats_account_io(&md
->stats
, bio_data_dir(bio
),
683 bio
->bi_iter
.bi_sector
, bio_sectors(bio
),
684 false, 0, &io
->stats_aux
);
687 static void end_io_acct(struct dm_io
*io
)
689 struct mapped_device
*md
= io
->md
;
690 struct bio
*bio
= io
->orig_bio
;
691 unsigned long duration
= jiffies
- io
->start_time
;
693 generic_end_io_acct(md
->queue
, bio_op(bio
), &dm_disk(md
)->part0
,
696 if (unlikely(dm_stats_used(&md
->stats
)))
697 dm_stats_account_io(&md
->stats
, bio_data_dir(bio
),
698 bio
->bi_iter
.bi_sector
, bio_sectors(bio
),
699 true, duration
, &io
->stats_aux
);
701 /* nudge anyone waiting on suspend queue */
702 if (unlikely(waitqueue_active(&md
->wait
)))
707 * Add the bio to the list of deferred io.
709 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
713 spin_lock_irqsave(&md
->deferred_lock
, flags
);
714 bio_list_add(&md
->deferred
, bio
);
715 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
716 queue_work(md
->wq
, &md
->work
);
720 * Everyone (including functions in this file), should use this
721 * function to access the md->map field, and make sure they call
722 * dm_put_live_table() when finished.
724 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
) __acquires(md
->io_barrier
)
726 *srcu_idx
= srcu_read_lock(&md
->io_barrier
);
728 return srcu_dereference(md
->map
, &md
->io_barrier
);
731 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
) __releases(md
->io_barrier
)
733 srcu_read_unlock(&md
->io_barrier
, srcu_idx
);
736 void dm_sync_table(struct mapped_device
*md
)
738 synchronize_srcu(&md
->io_barrier
);
739 synchronize_rcu_expedited();
743 * A fast alternative to dm_get_live_table/dm_put_live_table.
744 * The caller must not block between these two functions.
746 static struct dm_table
*dm_get_live_table_fast(struct mapped_device
*md
) __acquires(RCU
)
749 return rcu_dereference(md
->map
);
752 static void dm_put_live_table_fast(struct mapped_device
*md
) __releases(RCU
)
757 static char *_dm_claim_ptr
= "I belong to device-mapper";
760 * Open a table device so we can use it as a map destination.
762 static int open_table_device(struct table_device
*td
, dev_t dev
,
763 struct mapped_device
*md
)
765 struct block_device
*bdev
;
769 BUG_ON(td
->dm_dev
.bdev
);
771 bdev
= blkdev_get_by_dev(dev
, td
->dm_dev
.mode
| FMODE_EXCL
, _dm_claim_ptr
);
773 return PTR_ERR(bdev
);
775 r
= bd_link_disk_holder(bdev
, dm_disk(md
));
777 blkdev_put(bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
781 td
->dm_dev
.bdev
= bdev
;
782 td
->dm_dev
.dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
787 * Close a table device that we've been using.
789 static void close_table_device(struct table_device
*td
, struct mapped_device
*md
)
791 if (!td
->dm_dev
.bdev
)
794 bd_unlink_disk_holder(td
->dm_dev
.bdev
, dm_disk(md
));
795 blkdev_put(td
->dm_dev
.bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
796 put_dax(td
->dm_dev
.dax_dev
);
797 td
->dm_dev
.bdev
= NULL
;
798 td
->dm_dev
.dax_dev
= NULL
;
801 static struct table_device
*find_table_device(struct list_head
*l
, dev_t dev
,
803 struct table_device
*td
;
805 list_for_each_entry(td
, l
, list
)
806 if (td
->dm_dev
.bdev
->bd_dev
== dev
&& td
->dm_dev
.mode
== mode
)
812 int dm_get_table_device(struct mapped_device
*md
, dev_t dev
, fmode_t mode
,
813 struct dm_dev
**result
) {
815 struct table_device
*td
;
817 mutex_lock(&md
->table_devices_lock
);
818 td
= find_table_device(&md
->table_devices
, dev
, mode
);
820 td
= kmalloc_node(sizeof(*td
), GFP_KERNEL
, md
->numa_node_id
);
822 mutex_unlock(&md
->table_devices_lock
);
826 td
->dm_dev
.mode
= mode
;
827 td
->dm_dev
.bdev
= NULL
;
829 if ((r
= open_table_device(td
, dev
, md
))) {
830 mutex_unlock(&md
->table_devices_lock
);
835 format_dev_t(td
->dm_dev
.name
, dev
);
837 refcount_set(&td
->count
, 1);
838 list_add(&td
->list
, &md
->table_devices
);
840 refcount_inc(&td
->count
);
842 mutex_unlock(&md
->table_devices_lock
);
844 *result
= &td
->dm_dev
;
847 EXPORT_SYMBOL_GPL(dm_get_table_device
);
849 void dm_put_table_device(struct mapped_device
*md
, struct dm_dev
*d
)
851 struct table_device
*td
= container_of(d
, struct table_device
, dm_dev
);
853 mutex_lock(&md
->table_devices_lock
);
854 if (refcount_dec_and_test(&td
->count
)) {
855 close_table_device(td
, md
);
859 mutex_unlock(&md
->table_devices_lock
);
861 EXPORT_SYMBOL(dm_put_table_device
);
863 static void free_table_devices(struct list_head
*devices
)
865 struct list_head
*tmp
, *next
;
867 list_for_each_safe(tmp
, next
, devices
) {
868 struct table_device
*td
= list_entry(tmp
, struct table_device
, list
);
870 DMWARN("dm_destroy: %s still exists with %d references",
871 td
->dm_dev
.name
, refcount_read(&td
->count
));
877 * Get the geometry associated with a dm device
879 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
887 * Set the geometry of a device.
889 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
891 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
893 if (geo
->start
> sz
) {
894 DMWARN("Start sector is beyond the geometry limits.");
903 static int __noflush_suspending(struct mapped_device
*md
)
905 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
909 * Decrements the number of outstanding ios that a bio has been
910 * cloned into, completing the original io if necc.
912 static void dec_pending(struct dm_io
*io
, blk_status_t error
)
915 blk_status_t io_error
;
917 struct mapped_device
*md
= io
->md
;
919 /* Push-back supersedes any I/O errors */
920 if (unlikely(error
)) {
921 spin_lock_irqsave(&io
->endio_lock
, flags
);
922 if (!(io
->status
== BLK_STS_DM_REQUEUE
&& __noflush_suspending(md
)))
924 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
927 if (atomic_dec_and_test(&io
->io_count
)) {
928 if (io
->status
== BLK_STS_DM_REQUEUE
) {
930 * Target requested pushing back the I/O.
932 spin_lock_irqsave(&md
->deferred_lock
, flags
);
933 if (__noflush_suspending(md
))
934 /* NOTE early return due to BLK_STS_DM_REQUEUE below */
935 bio_list_add_head(&md
->deferred
, io
->orig_bio
);
937 /* noflush suspend was interrupted. */
938 io
->status
= BLK_STS_IOERR
;
939 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
942 io_error
= io
->status
;
947 if (io_error
== BLK_STS_DM_REQUEUE
)
950 if ((bio
->bi_opf
& REQ_PREFLUSH
) && bio
->bi_iter
.bi_size
) {
952 * Preflush done for flush with data, reissue
953 * without REQ_PREFLUSH.
955 bio
->bi_opf
&= ~REQ_PREFLUSH
;
958 /* done with normal IO or empty flush */
960 bio
->bi_status
= io_error
;
966 void disable_write_same(struct mapped_device
*md
)
968 struct queue_limits
*limits
= dm_get_queue_limits(md
);
970 /* device doesn't really support WRITE SAME, disable it */
971 limits
->max_write_same_sectors
= 0;
974 void disable_write_zeroes(struct mapped_device
*md
)
976 struct queue_limits
*limits
= dm_get_queue_limits(md
);
978 /* device doesn't really support WRITE ZEROES, disable it */
979 limits
->max_write_zeroes_sectors
= 0;
982 static void clone_endio(struct bio
*bio
)
984 blk_status_t error
= bio
->bi_status
;
985 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
986 struct dm_io
*io
= tio
->io
;
987 struct mapped_device
*md
= tio
->io
->md
;
988 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
990 if (unlikely(error
== BLK_STS_TARGET
) && md
->type
!= DM_TYPE_NVME_BIO_BASED
) {
991 if (bio_op(bio
) == REQ_OP_WRITE_SAME
&&
992 !bio
->bi_disk
->queue
->limits
.max_write_same_sectors
)
993 disable_write_same(md
);
994 if (bio_op(bio
) == REQ_OP_WRITE_ZEROES
&&
995 !bio
->bi_disk
->queue
->limits
.max_write_zeroes_sectors
)
996 disable_write_zeroes(md
);
1000 int r
= endio(tio
->ti
, bio
, &error
);
1002 case DM_ENDIO_REQUEUE
:
1003 error
= BLK_STS_DM_REQUEUE
;
1007 case DM_ENDIO_INCOMPLETE
:
1008 /* The target will handle the io */
1011 DMWARN("unimplemented target endio return value: %d", r
);
1017 dec_pending(io
, error
);
1021 * Return maximum size of I/O possible at the supplied sector up to the current
1024 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
1026 sector_t target_offset
= dm_target_offset(ti
, sector
);
1028 return ti
->len
- target_offset
;
1031 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
1033 sector_t len
= max_io_len_target_boundary(sector
, ti
);
1034 sector_t offset
, max_len
;
1037 * Does the target need to split even further?
1039 if (ti
->max_io_len
) {
1040 offset
= dm_target_offset(ti
, sector
);
1041 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
1042 max_len
= sector_div(offset
, ti
->max_io_len
);
1044 max_len
= offset
& (ti
->max_io_len
- 1);
1045 max_len
= ti
->max_io_len
- max_len
;
1054 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
1056 if (len
> UINT_MAX
) {
1057 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1058 (unsigned long long)len
, UINT_MAX
);
1059 ti
->error
= "Maximum size of target IO is too large";
1064 * BIO based queue uses its own splitting. When multipage bvecs
1065 * is switched on, size of the incoming bio may be too big to
1066 * be handled in some targets, such as crypt.
1068 * When these targets are ready for the big bio, we can remove
1071 ti
->max_io_len
= min_t(uint32_t, len
, BIO_MAX_PAGES
* PAGE_SIZE
);
1075 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
1077 static struct dm_target
*dm_dax_get_live_target(struct mapped_device
*md
,
1078 sector_t sector
, int *srcu_idx
)
1079 __acquires(md
->io_barrier
)
1081 struct dm_table
*map
;
1082 struct dm_target
*ti
;
1084 map
= dm_get_live_table(md
, srcu_idx
);
1088 ti
= dm_table_find_target(map
, sector
);
1089 if (!dm_target_is_valid(ti
))
1095 static long dm_dax_direct_access(struct dax_device
*dax_dev
, pgoff_t pgoff
,
1096 long nr_pages
, void **kaddr
, pfn_t
*pfn
)
1098 struct mapped_device
*md
= dax_get_private(dax_dev
);
1099 sector_t sector
= pgoff
* PAGE_SECTORS
;
1100 struct dm_target
*ti
;
1101 long len
, ret
= -EIO
;
1104 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
1108 if (!ti
->type
->direct_access
)
1110 len
= max_io_len(sector
, ti
) / PAGE_SECTORS
;
1113 nr_pages
= min(len
, nr_pages
);
1114 ret
= ti
->type
->direct_access(ti
, pgoff
, nr_pages
, kaddr
, pfn
);
1117 dm_put_live_table(md
, srcu_idx
);
1122 static size_t dm_dax_copy_from_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
,
1123 void *addr
, size_t bytes
, struct iov_iter
*i
)
1125 struct mapped_device
*md
= dax_get_private(dax_dev
);
1126 sector_t sector
= pgoff
* PAGE_SECTORS
;
1127 struct dm_target
*ti
;
1131 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
1135 if (!ti
->type
->dax_copy_from_iter
) {
1136 ret
= copy_from_iter(addr
, bytes
, i
);
1139 ret
= ti
->type
->dax_copy_from_iter(ti
, pgoff
, addr
, bytes
, i
);
1141 dm_put_live_table(md
, srcu_idx
);
1146 static size_t dm_dax_copy_to_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
,
1147 void *addr
, size_t bytes
, struct iov_iter
*i
)
1149 struct mapped_device
*md
= dax_get_private(dax_dev
);
1150 sector_t sector
= pgoff
* PAGE_SECTORS
;
1151 struct dm_target
*ti
;
1155 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
1159 if (!ti
->type
->dax_copy_to_iter
) {
1160 ret
= copy_to_iter(addr
, bytes
, i
);
1163 ret
= ti
->type
->dax_copy_to_iter(ti
, pgoff
, addr
, bytes
, i
);
1165 dm_put_live_table(md
, srcu_idx
);
1171 * A target may call dm_accept_partial_bio only from the map routine. It is
1172 * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
1174 * dm_accept_partial_bio informs the dm that the target only wants to process
1175 * additional n_sectors sectors of the bio and the rest of the data should be
1176 * sent in a next bio.
1178 * A diagram that explains the arithmetics:
1179 * +--------------------+---------------+-------+
1181 * +--------------------+---------------+-------+
1183 * <-------------- *tio->len_ptr --------------->
1184 * <------- bi_size ------->
1187 * Region 1 was already iterated over with bio_advance or similar function.
1188 * (it may be empty if the target doesn't use bio_advance)
1189 * Region 2 is the remaining bio size that the target wants to process.
1190 * (it may be empty if region 1 is non-empty, although there is no reason
1192 * The target requires that region 3 is to be sent in the next bio.
1194 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1195 * the partially processed part (the sum of regions 1+2) must be the same for all
1196 * copies of the bio.
1198 void dm_accept_partial_bio(struct bio
*bio
, unsigned n_sectors
)
1200 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1201 unsigned bi_size
= bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
;
1202 BUG_ON(bio
->bi_opf
& REQ_PREFLUSH
);
1203 BUG_ON(bi_size
> *tio
->len_ptr
);
1204 BUG_ON(n_sectors
> bi_size
);
1205 *tio
->len_ptr
-= bi_size
- n_sectors
;
1206 bio
->bi_iter
.bi_size
= n_sectors
<< SECTOR_SHIFT
;
1208 EXPORT_SYMBOL_GPL(dm_accept_partial_bio
);
1211 * The zone descriptors obtained with a zone report indicate
1212 * zone positions within the underlying device of the target. The zone
1213 * descriptors must be remapped to match their position within the dm device.
1214 * The caller target should obtain the zones information using
1215 * blkdev_report_zones() to ensure that remapping for partition offset is
1218 void dm_remap_zone_report(struct dm_target
*ti
, sector_t start
,
1219 struct blk_zone
*zones
, unsigned int *nr_zones
)
1221 #ifdef CONFIG_BLK_DEV_ZONED
1222 struct blk_zone
*zone
;
1223 unsigned int nrz
= *nr_zones
;
1227 * Remap the start sector and write pointer position of the zones in
1228 * the array. Since we may have obtained from the target underlying
1229 * device more zones that the target size, also adjust the number
1232 for (i
= 0; i
< nrz
; i
++) {
1234 if (zone
->start
>= start
+ ti
->len
) {
1235 memset(zone
, 0, sizeof(struct blk_zone
) * (nrz
- i
));
1239 zone
->start
= zone
->start
+ ti
->begin
- start
;
1240 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
1243 if (zone
->cond
== BLK_ZONE_COND_FULL
)
1244 zone
->wp
= zone
->start
+ zone
->len
;
1245 else if (zone
->cond
== BLK_ZONE_COND_EMPTY
)
1246 zone
->wp
= zone
->start
;
1248 zone
->wp
= zone
->wp
+ ti
->begin
- start
;
1252 #else /* !CONFIG_BLK_DEV_ZONED */
1256 EXPORT_SYMBOL_GPL(dm_remap_zone_report
);
1258 static blk_qc_t
__map_bio(struct dm_target_io
*tio
)
1262 struct bio
*clone
= &tio
->clone
;
1263 struct dm_io
*io
= tio
->io
;
1264 struct mapped_device
*md
= io
->md
;
1265 struct dm_target
*ti
= tio
->ti
;
1266 blk_qc_t ret
= BLK_QC_T_NONE
;
1268 clone
->bi_end_io
= clone_endio
;
1271 * Map the clone. If r == 0 we don't need to do
1272 * anything, the target has assumed ownership of
1275 atomic_inc(&io
->io_count
);
1276 sector
= clone
->bi_iter
.bi_sector
;
1278 r
= ti
->type
->map(ti
, clone
);
1280 case DM_MAPIO_SUBMITTED
:
1282 case DM_MAPIO_REMAPPED
:
1283 /* the bio has been remapped so dispatch it */
1284 trace_block_bio_remap(clone
->bi_disk
->queue
, clone
,
1285 bio_dev(io
->orig_bio
), sector
);
1286 if (md
->type
== DM_TYPE_NVME_BIO_BASED
)
1287 ret
= direct_make_request(clone
);
1289 ret
= generic_make_request(clone
);
1293 dec_pending(io
, BLK_STS_IOERR
);
1295 case DM_MAPIO_REQUEUE
:
1297 dec_pending(io
, BLK_STS_DM_REQUEUE
);
1300 DMWARN("unimplemented target map return value: %d", r
);
1307 static void bio_setup_sector(struct bio
*bio
, sector_t sector
, unsigned len
)
1309 bio
->bi_iter
.bi_sector
= sector
;
1310 bio
->bi_iter
.bi_size
= to_bytes(len
);
1314 * Creates a bio that consists of range of complete bvecs.
1316 static int clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1317 sector_t sector
, unsigned len
)
1319 struct bio
*clone
= &tio
->clone
;
1321 __bio_clone_fast(clone
, bio
);
1323 if (unlikely(bio_integrity(bio
) != NULL
)) {
1326 if (unlikely(!dm_target_has_integrity(tio
->ti
->type
) &&
1327 !dm_target_passes_integrity(tio
->ti
->type
))) {
1328 DMWARN("%s: the target %s doesn't support integrity data.",
1329 dm_device_name(tio
->io
->md
),
1330 tio
->ti
->type
->name
);
1334 r
= bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1339 bio_advance(clone
, to_bytes(sector
- clone
->bi_iter
.bi_sector
));
1340 clone
->bi_iter
.bi_size
= to_bytes(len
);
1342 if (unlikely(bio_integrity(bio
) != NULL
))
1343 bio_integrity_trim(clone
);
1348 static void alloc_multiple_bios(struct bio_list
*blist
, struct clone_info
*ci
,
1349 struct dm_target
*ti
, unsigned num_bios
)
1351 struct dm_target_io
*tio
;
1357 if (num_bios
== 1) {
1358 tio
= alloc_tio(ci
, ti
, 0, GFP_NOIO
);
1359 bio_list_add(blist
, &tio
->clone
);
1363 for (try = 0; try < 2; try++) {
1368 mutex_lock(&ci
->io
->md
->table_devices_lock
);
1369 for (bio_nr
= 0; bio_nr
< num_bios
; bio_nr
++) {
1370 tio
= alloc_tio(ci
, ti
, bio_nr
, try ? GFP_NOIO
: GFP_NOWAIT
);
1374 bio_list_add(blist
, &tio
->clone
);
1377 mutex_unlock(&ci
->io
->md
->table_devices_lock
);
1378 if (bio_nr
== num_bios
)
1381 while ((bio
= bio_list_pop(blist
))) {
1382 tio
= container_of(bio
, struct dm_target_io
, clone
);
1388 static blk_qc_t
__clone_and_map_simple_bio(struct clone_info
*ci
,
1389 struct dm_target_io
*tio
, unsigned *len
)
1391 struct bio
*clone
= &tio
->clone
;
1395 __bio_clone_fast(clone
, ci
->bio
);
1397 bio_setup_sector(clone
, ci
->sector
, *len
);
1399 return __map_bio(tio
);
1402 static void __send_duplicate_bios(struct clone_info
*ci
, struct dm_target
*ti
,
1403 unsigned num_bios
, unsigned *len
)
1405 struct bio_list blist
= BIO_EMPTY_LIST
;
1407 struct dm_target_io
*tio
;
1409 alloc_multiple_bios(&blist
, ci
, ti
, num_bios
);
1411 while ((bio
= bio_list_pop(&blist
))) {
1412 tio
= container_of(bio
, struct dm_target_io
, clone
);
1413 (void) __clone_and_map_simple_bio(ci
, tio
, len
);
1417 static int __send_empty_flush(struct clone_info
*ci
)
1419 unsigned target_nr
= 0;
1420 struct dm_target
*ti
;
1423 * Empty flush uses a statically initialized bio, as the base for
1424 * cloning. However, blkg association requires that a bdev is
1425 * associated with a gendisk, which doesn't happen until the bdev is
1426 * opened. So, blkg association is done at issue time of the flush
1427 * rather than when the device is created in alloc_dev().
1429 bio_set_dev(ci
->bio
, ci
->io
->md
->bdev
);
1431 BUG_ON(bio_has_data(ci
->bio
));
1432 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1433 __send_duplicate_bios(ci
, ti
, ti
->num_flush_bios
, NULL
);
1435 bio_disassociate_blkg(ci
->bio
);
1440 static int __clone_and_map_data_bio(struct clone_info
*ci
, struct dm_target
*ti
,
1441 sector_t sector
, unsigned *len
)
1443 struct bio
*bio
= ci
->bio
;
1444 struct dm_target_io
*tio
;
1447 tio
= alloc_tio(ci
, ti
, 0, GFP_NOIO
);
1449 r
= clone_bio(tio
, bio
, sector
, *len
);
1454 (void) __map_bio(tio
);
1459 typedef unsigned (*get_num_bios_fn
)(struct dm_target
*ti
);
1461 static unsigned get_num_discard_bios(struct dm_target
*ti
)
1463 return ti
->num_discard_bios
;
1466 static unsigned get_num_secure_erase_bios(struct dm_target
*ti
)
1468 return ti
->num_secure_erase_bios
;
1471 static unsigned get_num_write_same_bios(struct dm_target
*ti
)
1473 return ti
->num_write_same_bios
;
1476 static unsigned get_num_write_zeroes_bios(struct dm_target
*ti
)
1478 return ti
->num_write_zeroes_bios
;
1481 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1483 static bool is_split_required_for_discard(struct dm_target
*ti
)
1485 return ti
->split_discard_bios
;
1488 static int __send_changing_extent_only(struct clone_info
*ci
, struct dm_target
*ti
,
1489 unsigned num_bios
, bool is_split_required
)
1494 * Even though the device advertised support for this type of
1495 * request, that does not mean every target supports it, and
1496 * reconfiguration might also have changed that since the
1497 * check was performed.
1502 if (!is_split_required
)
1503 len
= min((sector_t
)ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1505 len
= min((sector_t
)ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1507 __send_duplicate_bios(ci
, ti
, num_bios
, &len
);
1510 ci
->sector_count
-= len
;
1515 static int __send_discard(struct clone_info
*ci
, struct dm_target
*ti
)
1517 return __send_changing_extent_only(ci
, ti
, get_num_discard_bios(ti
),
1518 is_split_required_for_discard(ti
));
1521 static int __send_secure_erase(struct clone_info
*ci
, struct dm_target
*ti
)
1523 return __send_changing_extent_only(ci
, ti
, get_num_secure_erase_bios(ti
), false);
1526 static int __send_write_same(struct clone_info
*ci
, struct dm_target
*ti
)
1528 return __send_changing_extent_only(ci
, ti
, get_num_write_same_bios(ti
), false);
1531 static int __send_write_zeroes(struct clone_info
*ci
, struct dm_target
*ti
)
1533 return __send_changing_extent_only(ci
, ti
, get_num_write_zeroes_bios(ti
), false);
1536 static bool __process_abnormal_io(struct clone_info
*ci
, struct dm_target
*ti
,
1539 struct bio
*bio
= ci
->bio
;
1541 if (bio_op(bio
) == REQ_OP_DISCARD
)
1542 *result
= __send_discard(ci
, ti
);
1543 else if (bio_op(bio
) == REQ_OP_SECURE_ERASE
)
1544 *result
= __send_secure_erase(ci
, ti
);
1545 else if (bio_op(bio
) == REQ_OP_WRITE_SAME
)
1546 *result
= __send_write_same(ci
, ti
);
1547 else if (bio_op(bio
) == REQ_OP_WRITE_ZEROES
)
1548 *result
= __send_write_zeroes(ci
, ti
);
1556 * Select the correct strategy for processing a non-flush bio.
1558 static int __split_and_process_non_flush(struct clone_info
*ci
)
1560 struct dm_target
*ti
;
1564 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1565 if (!dm_target_is_valid(ti
))
1568 if (unlikely(__process_abnormal_io(ci
, ti
, &r
)))
1571 len
= min_t(sector_t
, max_io_len(ci
->sector
, ti
), ci
->sector_count
);
1573 r
= __clone_and_map_data_bio(ci
, ti
, ci
->sector
, &len
);
1578 ci
->sector_count
-= len
;
1583 static void init_clone_info(struct clone_info
*ci
, struct mapped_device
*md
,
1584 struct dm_table
*map
, struct bio
*bio
)
1587 ci
->io
= alloc_io(md
, bio
);
1588 ci
->sector
= bio
->bi_iter
.bi_sector
;
1592 * Entry point to split a bio into clones and submit them to the targets.
1594 static blk_qc_t
__split_and_process_bio(struct mapped_device
*md
,
1595 struct dm_table
*map
, struct bio
*bio
)
1597 struct clone_info ci
;
1598 blk_qc_t ret
= BLK_QC_T_NONE
;
1601 if (unlikely(!map
)) {
1606 blk_queue_split(md
->queue
, &bio
);
1608 init_clone_info(&ci
, md
, map
, bio
);
1610 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1611 struct bio flush_bio
;
1614 * Use an on-stack bio for this, it's safe since we don't
1615 * need to reference it after submit. It's just used as
1616 * the basis for the clone(s).
1618 bio_init(&flush_bio
, NULL
, 0);
1619 flush_bio
.bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
| REQ_SYNC
;
1620 ci
.bio
= &flush_bio
;
1621 ci
.sector_count
= 0;
1622 error
= __send_empty_flush(&ci
);
1623 /* dec_pending submits any data associated with flush */
1624 } else if (bio_op(bio
) == REQ_OP_ZONE_RESET
) {
1626 ci
.sector_count
= 0;
1627 error
= __split_and_process_non_flush(&ci
);
1630 ci
.sector_count
= bio_sectors(bio
);
1631 while (ci
.sector_count
&& !error
) {
1632 error
= __split_and_process_non_flush(&ci
);
1633 if (current
->bio_list
&& ci
.sector_count
&& !error
) {
1635 * Remainder must be passed to generic_make_request()
1636 * so that it gets handled *after* bios already submitted
1637 * have been completely processed.
1638 * We take a clone of the original to store in
1639 * ci.io->orig_bio to be used by end_io_acct() and
1640 * for dec_pending to use for completion handling.
1642 struct bio
*b
= bio_split(bio
, bio_sectors(bio
) - ci
.sector_count
,
1643 GFP_NOIO
, &md
->queue
->bio_split
);
1644 ci
.io
->orig_bio
= b
;
1646 ret
= generic_make_request(bio
);
1652 /* drop the extra reference count */
1653 dec_pending(ci
.io
, errno_to_blk_status(error
));
1658 * Optimized variant of __split_and_process_bio that leverages the
1659 * fact that targets that use it do _not_ have a need to split bios.
1661 static blk_qc_t
__process_bio(struct mapped_device
*md
,
1662 struct dm_table
*map
, struct bio
*bio
)
1664 struct clone_info ci
;
1665 blk_qc_t ret
= BLK_QC_T_NONE
;
1668 if (unlikely(!map
)) {
1673 init_clone_info(&ci
, md
, map
, bio
);
1675 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1676 struct bio flush_bio
;
1679 * Use an on-stack bio for this, it's safe since we don't
1680 * need to reference it after submit. It's just used as
1681 * the basis for the clone(s).
1683 bio_init(&flush_bio
, NULL
, 0);
1684 flush_bio
.bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
| REQ_SYNC
;
1685 ci
.bio
= &flush_bio
;
1686 ci
.sector_count
= 0;
1687 error
= __send_empty_flush(&ci
);
1688 /* dec_pending submits any data associated with flush */
1690 struct dm_target
*ti
= md
->immutable_target
;
1691 struct dm_target_io
*tio
;
1694 * Defend against IO still getting in during teardown
1695 * - as was seen for a time with nvme-fcloop
1697 if (WARN_ON_ONCE(!ti
|| !dm_target_is_valid(ti
))) {
1703 ci
.sector_count
= bio_sectors(bio
);
1704 if (unlikely(__process_abnormal_io(&ci
, ti
, &error
)))
1707 tio
= alloc_tio(&ci
, ti
, 0, GFP_NOIO
);
1708 ret
= __clone_and_map_simple_bio(&ci
, tio
, NULL
);
1711 /* drop the extra reference count */
1712 dec_pending(ci
.io
, errno_to_blk_status(error
));
1716 static blk_qc_t
dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1718 struct mapped_device
*md
= q
->queuedata
;
1719 blk_qc_t ret
= BLK_QC_T_NONE
;
1721 struct dm_table
*map
;
1723 map
= dm_get_live_table(md
, &srcu_idx
);
1725 /* if we're suspended, we have to queue this io for later */
1726 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1727 dm_put_live_table(md
, srcu_idx
);
1729 if (!(bio
->bi_opf
& REQ_RAHEAD
))
1736 if (dm_get_md_type(md
) == DM_TYPE_NVME_BIO_BASED
)
1737 ret
= __process_bio(md
, map
, bio
);
1739 ret
= __split_and_process_bio(md
, map
, bio
);
1741 dm_put_live_table(md
, srcu_idx
);
1745 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1748 struct mapped_device
*md
= congested_data
;
1749 struct dm_table
*map
;
1751 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1752 if (dm_request_based(md
)) {
1754 * With request-based DM we only need to check the
1755 * top-level queue for congestion.
1757 r
= md
->queue
->backing_dev_info
->wb
.state
& bdi_bits
;
1759 map
= dm_get_live_table_fast(md
);
1761 r
= dm_table_any_congested(map
, bdi_bits
);
1762 dm_put_live_table_fast(md
);
1769 /*-----------------------------------------------------------------
1770 * An IDR is used to keep track of allocated minor numbers.
1771 *---------------------------------------------------------------*/
1772 static void free_minor(int minor
)
1774 spin_lock(&_minor_lock
);
1775 idr_remove(&_minor_idr
, minor
);
1776 spin_unlock(&_minor_lock
);
1780 * See if the device with a specific minor # is free.
1782 static int specific_minor(int minor
)
1786 if (minor
>= (1 << MINORBITS
))
1789 idr_preload(GFP_KERNEL
);
1790 spin_lock(&_minor_lock
);
1792 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
1794 spin_unlock(&_minor_lock
);
1797 return r
== -ENOSPC
? -EBUSY
: r
;
1801 static int next_free_minor(int *minor
)
1805 idr_preload(GFP_KERNEL
);
1806 spin_lock(&_minor_lock
);
1808 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
1810 spin_unlock(&_minor_lock
);
1818 static const struct block_device_operations dm_blk_dops
;
1819 static const struct dax_operations dm_dax_ops
;
1821 static void dm_wq_work(struct work_struct
*work
);
1823 static void dm_init_normal_md_queue(struct mapped_device
*md
)
1826 * Initialize aspects of queue that aren't relevant for blk-mq
1828 md
->queue
->backing_dev_info
->congested_fn
= dm_any_congested
;
1831 static void cleanup_mapped_device(struct mapped_device
*md
)
1834 destroy_workqueue(md
->wq
);
1835 bioset_exit(&md
->bs
);
1836 bioset_exit(&md
->io_bs
);
1839 kill_dax(md
->dax_dev
);
1840 put_dax(md
->dax_dev
);
1845 spin_lock(&_minor_lock
);
1846 md
->disk
->private_data
= NULL
;
1847 spin_unlock(&_minor_lock
);
1848 del_gendisk(md
->disk
);
1853 blk_cleanup_queue(md
->queue
);
1855 cleanup_srcu_struct(&md
->io_barrier
);
1862 mutex_destroy(&md
->suspend_lock
);
1863 mutex_destroy(&md
->type_lock
);
1864 mutex_destroy(&md
->table_devices_lock
);
1866 dm_mq_cleanup_mapped_device(md
);
1870 * Allocate and initialise a blank device with a given minor.
1872 static struct mapped_device
*alloc_dev(int minor
)
1874 int r
, numa_node_id
= dm_get_numa_node();
1875 struct dax_device
*dax_dev
= NULL
;
1876 struct mapped_device
*md
;
1879 md
= kvzalloc_node(sizeof(*md
), GFP_KERNEL
, numa_node_id
);
1881 DMWARN("unable to allocate device, out of memory.");
1885 if (!try_module_get(THIS_MODULE
))
1886 goto bad_module_get
;
1888 /* get a minor number for the dev */
1889 if (minor
== DM_ANY_MINOR
)
1890 r
= next_free_minor(&minor
);
1892 r
= specific_minor(minor
);
1896 r
= init_srcu_struct(&md
->io_barrier
);
1898 goto bad_io_barrier
;
1900 md
->numa_node_id
= numa_node_id
;
1901 md
->init_tio_pdu
= false;
1902 md
->type
= DM_TYPE_NONE
;
1903 mutex_init(&md
->suspend_lock
);
1904 mutex_init(&md
->type_lock
);
1905 mutex_init(&md
->table_devices_lock
);
1906 spin_lock_init(&md
->deferred_lock
);
1907 atomic_set(&md
->holders
, 1);
1908 atomic_set(&md
->open_count
, 0);
1909 atomic_set(&md
->event_nr
, 0);
1910 atomic_set(&md
->uevent_seq
, 0);
1911 INIT_LIST_HEAD(&md
->uevent_list
);
1912 INIT_LIST_HEAD(&md
->table_devices
);
1913 spin_lock_init(&md
->uevent_lock
);
1915 md
->queue
= blk_alloc_queue_node(GFP_KERNEL
, numa_node_id
);
1918 md
->queue
->queuedata
= md
;
1919 md
->queue
->backing_dev_info
->congested_data
= md
;
1921 md
->disk
= alloc_disk_node(1, md
->numa_node_id
);
1925 init_waitqueue_head(&md
->wait
);
1926 INIT_WORK(&md
->work
, dm_wq_work
);
1927 init_waitqueue_head(&md
->eventq
);
1928 init_completion(&md
->kobj_holder
.completion
);
1930 md
->disk
->major
= _major
;
1931 md
->disk
->first_minor
= minor
;
1932 md
->disk
->fops
= &dm_blk_dops
;
1933 md
->disk
->queue
= md
->queue
;
1934 md
->disk
->private_data
= md
;
1935 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1937 if (IS_ENABLED(CONFIG_DAX_DRIVER
)) {
1938 dax_dev
= alloc_dax(md
, md
->disk
->disk_name
, &dm_dax_ops
);
1942 md
->dax_dev
= dax_dev
;
1944 add_disk_no_queue_reg(md
->disk
);
1945 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1947 md
->wq
= alloc_workqueue("kdmflush", WQ_MEM_RECLAIM
, 0);
1951 md
->bdev
= bdget_disk(md
->disk
, 0);
1955 dm_stats_init(&md
->stats
);
1957 /* Populate the mapping, nobody knows we exist yet */
1958 spin_lock(&_minor_lock
);
1959 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1960 spin_unlock(&_minor_lock
);
1962 BUG_ON(old_md
!= MINOR_ALLOCED
);
1967 cleanup_mapped_device(md
);
1971 module_put(THIS_MODULE
);
1977 static void unlock_fs(struct mapped_device
*md
);
1979 static void free_dev(struct mapped_device
*md
)
1981 int minor
= MINOR(disk_devt(md
->disk
));
1985 cleanup_mapped_device(md
);
1987 free_table_devices(&md
->table_devices
);
1988 dm_stats_cleanup(&md
->stats
);
1991 module_put(THIS_MODULE
);
1995 static int __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
1997 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
2000 if (dm_table_bio_based(t
)) {
2002 * The md may already have mempools that need changing.
2003 * If so, reload bioset because front_pad may have changed
2004 * because a different table was loaded.
2006 bioset_exit(&md
->bs
);
2007 bioset_exit(&md
->io_bs
);
2009 } else if (bioset_initialized(&md
->bs
)) {
2011 * There's no need to reload with request-based dm
2012 * because the size of front_pad doesn't change.
2013 * Note for future: If you are to reload bioset,
2014 * prep-ed requests in the queue may refer
2015 * to bio from the old bioset, so you must walk
2016 * through the queue to unprep.
2022 bioset_initialized(&md
->bs
) ||
2023 bioset_initialized(&md
->io_bs
));
2025 ret
= bioset_init_from_src(&md
->bs
, &p
->bs
);
2028 ret
= bioset_init_from_src(&md
->io_bs
, &p
->io_bs
);
2030 bioset_exit(&md
->bs
);
2032 /* mempool bind completed, no longer need any mempools in the table */
2033 dm_table_free_md_mempools(t
);
2038 * Bind a table to the device.
2040 static void event_callback(void *context
)
2042 unsigned long flags
;
2044 struct mapped_device
*md
= (struct mapped_device
*) context
;
2046 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2047 list_splice_init(&md
->uevent_list
, &uevents
);
2048 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2050 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
2052 atomic_inc(&md
->event_nr
);
2053 wake_up(&md
->eventq
);
2054 dm_issue_global_event();
2058 * Protected by md->suspend_lock obtained by dm_swap_table().
2060 static void __set_size(struct mapped_device
*md
, sector_t size
)
2062 lockdep_assert_held(&md
->suspend_lock
);
2064 set_capacity(md
->disk
, size
);
2066 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2070 * Returns old map, which caller must destroy.
2072 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2073 struct queue_limits
*limits
)
2075 struct dm_table
*old_map
;
2076 struct request_queue
*q
= md
->queue
;
2077 bool request_based
= dm_table_request_based(t
);
2081 lockdep_assert_held(&md
->suspend_lock
);
2083 size
= dm_table_get_size(t
);
2086 * Wipe any geometry if the size of the table changed.
2088 if (size
!= dm_get_size(md
))
2089 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2091 __set_size(md
, size
);
2093 dm_table_event_callback(t
, event_callback
, md
);
2096 * The queue hasn't been stopped yet, if the old table type wasn't
2097 * for request-based during suspension. So stop it to prevent
2098 * I/O mapping before resume.
2099 * This must be done before setting the queue restrictions,
2100 * because request-based dm may be run just after the setting.
2105 if (request_based
|| md
->type
== DM_TYPE_NVME_BIO_BASED
) {
2107 * Leverage the fact that request-based DM targets and
2108 * NVMe bio based targets are immutable singletons
2109 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
2110 * and __process_bio.
2112 md
->immutable_target
= dm_table_get_immutable_target(t
);
2115 ret
= __bind_mempools(md
, t
);
2117 old_map
= ERR_PTR(ret
);
2121 old_map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2122 rcu_assign_pointer(md
->map
, (void *)t
);
2123 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
2125 dm_table_set_restrictions(t
, q
, limits
);
2134 * Returns unbound table for the caller to free.
2136 static struct dm_table
*__unbind(struct mapped_device
*md
)
2138 struct dm_table
*map
= rcu_dereference_protected(md
->map
, 1);
2143 dm_table_event_callback(map
, NULL
, NULL
);
2144 RCU_INIT_POINTER(md
->map
, NULL
);
2151 * Constructor for a new device.
2153 int dm_create(int minor
, struct mapped_device
**result
)
2156 struct mapped_device
*md
;
2158 md
= alloc_dev(minor
);
2162 r
= dm_sysfs_init(md
);
2173 * Functions to manage md->type.
2174 * All are required to hold md->type_lock.
2176 void dm_lock_md_type(struct mapped_device
*md
)
2178 mutex_lock(&md
->type_lock
);
2181 void dm_unlock_md_type(struct mapped_device
*md
)
2183 mutex_unlock(&md
->type_lock
);
2186 void dm_set_md_type(struct mapped_device
*md
, enum dm_queue_mode type
)
2188 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2192 enum dm_queue_mode
dm_get_md_type(struct mapped_device
*md
)
2197 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2199 return md
->immutable_target_type
;
2203 * The queue_limits are only valid as long as you have a reference
2206 struct queue_limits
*dm_get_queue_limits(struct mapped_device
*md
)
2208 BUG_ON(!atomic_read(&md
->holders
));
2209 return &md
->queue
->limits
;
2211 EXPORT_SYMBOL_GPL(dm_get_queue_limits
);
2214 * Setup the DM device's queue based on md's type
2216 int dm_setup_md_queue(struct mapped_device
*md
, struct dm_table
*t
)
2219 struct queue_limits limits
;
2220 enum dm_queue_mode type
= dm_get_md_type(md
);
2223 case DM_TYPE_REQUEST_BASED
:
2224 r
= dm_mq_init_request_queue(md
, t
);
2226 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2230 case DM_TYPE_BIO_BASED
:
2231 case DM_TYPE_DAX_BIO_BASED
:
2232 case DM_TYPE_NVME_BIO_BASED
:
2233 dm_init_normal_md_queue(md
);
2234 blk_queue_make_request(md
->queue
, dm_make_request
);
2241 r
= dm_calculate_queue_limits(t
, &limits
);
2243 DMERR("Cannot calculate initial queue limits");
2246 dm_table_set_restrictions(t
, md
->queue
, &limits
);
2247 blk_register_queue(md
->disk
);
2252 struct mapped_device
*dm_get_md(dev_t dev
)
2254 struct mapped_device
*md
;
2255 unsigned minor
= MINOR(dev
);
2257 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2260 spin_lock(&_minor_lock
);
2262 md
= idr_find(&_minor_idr
, minor
);
2263 if (!md
|| md
== MINOR_ALLOCED
|| (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2264 test_bit(DMF_FREEING
, &md
->flags
) || dm_deleting_md(md
)) {
2270 spin_unlock(&_minor_lock
);
2274 EXPORT_SYMBOL_GPL(dm_get_md
);
2276 void *dm_get_mdptr(struct mapped_device
*md
)
2278 return md
->interface_ptr
;
2281 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2283 md
->interface_ptr
= ptr
;
2286 void dm_get(struct mapped_device
*md
)
2288 atomic_inc(&md
->holders
);
2289 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2292 int dm_hold(struct mapped_device
*md
)
2294 spin_lock(&_minor_lock
);
2295 if (test_bit(DMF_FREEING
, &md
->flags
)) {
2296 spin_unlock(&_minor_lock
);
2300 spin_unlock(&_minor_lock
);
2303 EXPORT_SYMBOL_GPL(dm_hold
);
2305 const char *dm_device_name(struct mapped_device
*md
)
2309 EXPORT_SYMBOL_GPL(dm_device_name
);
2311 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2313 struct dm_table
*map
;
2318 spin_lock(&_minor_lock
);
2319 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2320 set_bit(DMF_FREEING
, &md
->flags
);
2321 spin_unlock(&_minor_lock
);
2323 blk_set_queue_dying(md
->queue
);
2326 * Take suspend_lock so that presuspend and postsuspend methods
2327 * do not race with internal suspend.
2329 mutex_lock(&md
->suspend_lock
);
2330 map
= dm_get_live_table(md
, &srcu_idx
);
2331 if (!dm_suspended_md(md
)) {
2332 dm_table_presuspend_targets(map
);
2333 dm_table_postsuspend_targets(map
);
2335 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2336 dm_put_live_table(md
, srcu_idx
);
2337 mutex_unlock(&md
->suspend_lock
);
2340 * Rare, but there may be I/O requests still going to complete,
2341 * for example. Wait for all references to disappear.
2342 * No one should increment the reference count of the mapped_device,
2343 * after the mapped_device state becomes DMF_FREEING.
2346 while (atomic_read(&md
->holders
))
2348 else if (atomic_read(&md
->holders
))
2349 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2350 dm_device_name(md
), atomic_read(&md
->holders
));
2353 dm_table_destroy(__unbind(md
));
2357 void dm_destroy(struct mapped_device
*md
)
2359 __dm_destroy(md
, true);
2362 void dm_destroy_immediate(struct mapped_device
*md
)
2364 __dm_destroy(md
, false);
2367 void dm_put(struct mapped_device
*md
)
2369 atomic_dec(&md
->holders
);
2371 EXPORT_SYMBOL_GPL(dm_put
);
2373 static int dm_wait_for_completion(struct mapped_device
*md
, long task_state
)
2379 prepare_to_wait(&md
->wait
, &wait
, task_state
);
2381 if (!md_in_flight(md
))
2384 if (signal_pending_state(task_state
, current
)) {
2391 finish_wait(&md
->wait
, &wait
);
2397 * Process the deferred bios
2399 static void dm_wq_work(struct work_struct
*work
)
2401 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2405 struct dm_table
*map
;
2407 map
= dm_get_live_table(md
, &srcu_idx
);
2409 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2410 spin_lock_irq(&md
->deferred_lock
);
2411 c
= bio_list_pop(&md
->deferred
);
2412 spin_unlock_irq(&md
->deferred_lock
);
2417 if (dm_request_based(md
))
2418 generic_make_request(c
);
2420 __split_and_process_bio(md
, map
, c
);
2423 dm_put_live_table(md
, srcu_idx
);
2426 static void dm_queue_flush(struct mapped_device
*md
)
2428 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2429 smp_mb__after_atomic();
2430 queue_work(md
->wq
, &md
->work
);
2434 * Swap in a new table, returning the old one for the caller to destroy.
2436 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2438 struct dm_table
*live_map
= NULL
, *map
= ERR_PTR(-EINVAL
);
2439 struct queue_limits limits
;
2442 mutex_lock(&md
->suspend_lock
);
2444 /* device must be suspended */
2445 if (!dm_suspended_md(md
))
2449 * If the new table has no data devices, retain the existing limits.
2450 * This helps multipath with queue_if_no_path if all paths disappear,
2451 * then new I/O is queued based on these limits, and then some paths
2454 if (dm_table_has_no_data_devices(table
)) {
2455 live_map
= dm_get_live_table_fast(md
);
2457 limits
= md
->queue
->limits
;
2458 dm_put_live_table_fast(md
);
2462 r
= dm_calculate_queue_limits(table
, &limits
);
2469 map
= __bind(md
, table
, &limits
);
2470 dm_issue_global_event();
2473 mutex_unlock(&md
->suspend_lock
);
2478 * Functions to lock and unlock any filesystem running on the
2481 static int lock_fs(struct mapped_device
*md
)
2485 WARN_ON(md
->frozen_sb
);
2487 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2488 if (IS_ERR(md
->frozen_sb
)) {
2489 r
= PTR_ERR(md
->frozen_sb
);
2490 md
->frozen_sb
= NULL
;
2494 set_bit(DMF_FROZEN
, &md
->flags
);
2499 static void unlock_fs(struct mapped_device
*md
)
2501 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2504 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2505 md
->frozen_sb
= NULL
;
2506 clear_bit(DMF_FROZEN
, &md
->flags
);
2510 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2511 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2512 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2514 * If __dm_suspend returns 0, the device is completely quiescent
2515 * now. There is no request-processing activity. All new requests
2516 * are being added to md->deferred list.
2518 static int __dm_suspend(struct mapped_device
*md
, struct dm_table
*map
,
2519 unsigned suspend_flags
, long task_state
,
2520 int dmf_suspended_flag
)
2522 bool do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
;
2523 bool noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
;
2526 lockdep_assert_held(&md
->suspend_lock
);
2529 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2530 * This flag is cleared before dm_suspend returns.
2533 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2535 pr_debug("%s: suspending with flush\n", dm_device_name(md
));
2538 * This gets reverted if there's an error later and the targets
2539 * provide the .presuspend_undo hook.
2541 dm_table_presuspend_targets(map
);
2544 * Flush I/O to the device.
2545 * Any I/O submitted after lock_fs() may not be flushed.
2546 * noflush takes precedence over do_lockfs.
2547 * (lock_fs() flushes I/Os and waits for them to complete.)
2549 if (!noflush
&& do_lockfs
) {
2552 dm_table_presuspend_undo_targets(map
);
2558 * Here we must make sure that no processes are submitting requests
2559 * to target drivers i.e. no one may be executing
2560 * __split_and_process_bio. This is called from dm_request and
2563 * To get all processes out of __split_and_process_bio in dm_request,
2564 * we take the write lock. To prevent any process from reentering
2565 * __split_and_process_bio from dm_request and quiesce the thread
2566 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2567 * flush_workqueue(md->wq).
2569 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2571 synchronize_srcu(&md
->io_barrier
);
2574 * Stop md->queue before flushing md->wq in case request-based
2575 * dm defers requests to md->wq from md->queue.
2577 if (dm_request_based(md
))
2578 dm_stop_queue(md
->queue
);
2580 flush_workqueue(md
->wq
);
2583 * At this point no more requests are entering target request routines.
2584 * We call dm_wait_for_completion to wait for all existing requests
2587 r
= dm_wait_for_completion(md
, task_state
);
2589 set_bit(dmf_suspended_flag
, &md
->flags
);
2592 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2594 synchronize_srcu(&md
->io_barrier
);
2596 /* were we interrupted ? */
2600 if (dm_request_based(md
))
2601 dm_start_queue(md
->queue
);
2604 dm_table_presuspend_undo_targets(map
);
2605 /* pushback list is already flushed, so skip flush */
2612 * We need to be able to change a mapping table under a mounted
2613 * filesystem. For example we might want to move some data in
2614 * the background. Before the table can be swapped with
2615 * dm_bind_table, dm_suspend must be called to flush any in
2616 * flight bios and ensure that any further io gets deferred.
2619 * Suspend mechanism in request-based dm.
2621 * 1. Flush all I/Os by lock_fs() if needed.
2622 * 2. Stop dispatching any I/O by stopping the request_queue.
2623 * 3. Wait for all in-flight I/Os to be completed or requeued.
2625 * To abort suspend, start the request_queue.
2627 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2629 struct dm_table
*map
= NULL
;
2633 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
2635 if (dm_suspended_md(md
)) {
2640 if (dm_suspended_internally_md(md
)) {
2641 /* already internally suspended, wait for internal resume */
2642 mutex_unlock(&md
->suspend_lock
);
2643 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
2649 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2651 r
= __dm_suspend(md
, map
, suspend_flags
, TASK_INTERRUPTIBLE
, DMF_SUSPENDED
);
2655 dm_table_postsuspend_targets(map
);
2658 mutex_unlock(&md
->suspend_lock
);
2662 static int __dm_resume(struct mapped_device
*md
, struct dm_table
*map
)
2665 int r
= dm_table_resume_targets(map
);
2673 * Flushing deferred I/Os must be done after targets are resumed
2674 * so that mapping of targets can work correctly.
2675 * Request-based dm is queueing the deferred I/Os in its request_queue.
2677 if (dm_request_based(md
))
2678 dm_start_queue(md
->queue
);
2685 int dm_resume(struct mapped_device
*md
)
2688 struct dm_table
*map
= NULL
;
2692 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
2694 if (!dm_suspended_md(md
))
2697 if (dm_suspended_internally_md(md
)) {
2698 /* already internally suspended, wait for internal resume */
2699 mutex_unlock(&md
->suspend_lock
);
2700 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
2706 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2707 if (!map
|| !dm_table_get_size(map
))
2710 r
= __dm_resume(md
, map
);
2714 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2716 mutex_unlock(&md
->suspend_lock
);
2722 * Internal suspend/resume works like userspace-driven suspend. It waits
2723 * until all bios finish and prevents issuing new bios to the target drivers.
2724 * It may be used only from the kernel.
2727 static void __dm_internal_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2729 struct dm_table
*map
= NULL
;
2731 lockdep_assert_held(&md
->suspend_lock
);
2733 if (md
->internal_suspend_count
++)
2734 return; /* nested internal suspend */
2736 if (dm_suspended_md(md
)) {
2737 set_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2738 return; /* nest suspend */
2741 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2744 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2745 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2746 * would require changing .presuspend to return an error -- avoid this
2747 * until there is a need for more elaborate variants of internal suspend.
2749 (void) __dm_suspend(md
, map
, suspend_flags
, TASK_UNINTERRUPTIBLE
,
2750 DMF_SUSPENDED_INTERNALLY
);
2752 dm_table_postsuspend_targets(map
);
2755 static void __dm_internal_resume(struct mapped_device
*md
)
2757 BUG_ON(!md
->internal_suspend_count
);
2759 if (--md
->internal_suspend_count
)
2760 return; /* resume from nested internal suspend */
2762 if (dm_suspended_md(md
))
2763 goto done
; /* resume from nested suspend */
2766 * NOTE: existing callers don't need to call dm_table_resume_targets
2767 * (which may fail -- so best to avoid it for now by passing NULL map)
2769 (void) __dm_resume(md
, NULL
);
2772 clear_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2773 smp_mb__after_atomic();
2774 wake_up_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
);
2777 void dm_internal_suspend_noflush(struct mapped_device
*md
)
2779 mutex_lock(&md
->suspend_lock
);
2780 __dm_internal_suspend(md
, DM_SUSPEND_NOFLUSH_FLAG
);
2781 mutex_unlock(&md
->suspend_lock
);
2783 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush
);
2785 void dm_internal_resume(struct mapped_device
*md
)
2787 mutex_lock(&md
->suspend_lock
);
2788 __dm_internal_resume(md
);
2789 mutex_unlock(&md
->suspend_lock
);
2791 EXPORT_SYMBOL_GPL(dm_internal_resume
);
2794 * Fast variants of internal suspend/resume hold md->suspend_lock,
2795 * which prevents interaction with userspace-driven suspend.
2798 void dm_internal_suspend_fast(struct mapped_device
*md
)
2800 mutex_lock(&md
->suspend_lock
);
2801 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
2804 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2805 synchronize_srcu(&md
->io_barrier
);
2806 flush_workqueue(md
->wq
);
2807 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2809 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast
);
2811 void dm_internal_resume_fast(struct mapped_device
*md
)
2813 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
2819 mutex_unlock(&md
->suspend_lock
);
2821 EXPORT_SYMBOL_GPL(dm_internal_resume_fast
);
2823 /*-----------------------------------------------------------------
2824 * Event notification.
2825 *---------------------------------------------------------------*/
2826 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2829 char udev_cookie
[DM_COOKIE_LENGTH
];
2830 char *envp
[] = { udev_cookie
, NULL
};
2833 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2835 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2836 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2837 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2842 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2844 return atomic_add_return(1, &md
->uevent_seq
);
2847 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2849 return atomic_read(&md
->event_nr
);
2852 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2854 return wait_event_interruptible(md
->eventq
,
2855 (event_nr
!= atomic_read(&md
->event_nr
)));
2858 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2860 unsigned long flags
;
2862 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2863 list_add(elist
, &md
->uevent_list
);
2864 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2868 * The gendisk is only valid as long as you have a reference
2871 struct gendisk
*dm_disk(struct mapped_device
*md
)
2875 EXPORT_SYMBOL_GPL(dm_disk
);
2877 struct kobject
*dm_kobject(struct mapped_device
*md
)
2879 return &md
->kobj_holder
.kobj
;
2882 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2884 struct mapped_device
*md
;
2886 md
= container_of(kobj
, struct mapped_device
, kobj_holder
.kobj
);
2888 spin_lock(&_minor_lock
);
2889 if (test_bit(DMF_FREEING
, &md
->flags
) || dm_deleting_md(md
)) {
2895 spin_unlock(&_minor_lock
);
2900 int dm_suspended_md(struct mapped_device
*md
)
2902 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2905 int dm_suspended_internally_md(struct mapped_device
*md
)
2907 return test_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2910 int dm_test_deferred_remove_flag(struct mapped_device
*md
)
2912 return test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
2915 int dm_suspended(struct dm_target
*ti
)
2917 return dm_suspended_md(dm_table_get_md(ti
->table
));
2919 EXPORT_SYMBOL_GPL(dm_suspended
);
2921 int dm_noflush_suspending(struct dm_target
*ti
)
2923 return __noflush_suspending(dm_table_get_md(ti
->table
));
2925 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2927 struct dm_md_mempools
*dm_alloc_md_mempools(struct mapped_device
*md
, enum dm_queue_mode type
,
2928 unsigned integrity
, unsigned per_io_data_size
,
2929 unsigned min_pool_size
)
2931 struct dm_md_mempools
*pools
= kzalloc_node(sizeof(*pools
), GFP_KERNEL
, md
->numa_node_id
);
2932 unsigned int pool_size
= 0;
2933 unsigned int front_pad
, io_front_pad
;
2940 case DM_TYPE_BIO_BASED
:
2941 case DM_TYPE_DAX_BIO_BASED
:
2942 case DM_TYPE_NVME_BIO_BASED
:
2943 pool_size
= max(dm_get_reserved_bio_based_ios(), min_pool_size
);
2944 front_pad
= roundup(per_io_data_size
, __alignof__(struct dm_target_io
)) + offsetof(struct dm_target_io
, clone
);
2945 io_front_pad
= roundup(front_pad
, __alignof__(struct dm_io
)) + offsetof(struct dm_io
, tio
);
2946 ret
= bioset_init(&pools
->io_bs
, pool_size
, io_front_pad
, 0);
2949 if (integrity
&& bioset_integrity_create(&pools
->io_bs
, pool_size
))
2952 case DM_TYPE_REQUEST_BASED
:
2953 pool_size
= max(dm_get_reserved_rq_based_ios(), min_pool_size
);
2954 front_pad
= offsetof(struct dm_rq_clone_bio_info
, clone
);
2955 /* per_io_data_size is used for blk-mq pdu at queue allocation */
2961 ret
= bioset_init(&pools
->bs
, pool_size
, front_pad
, 0);
2965 if (integrity
&& bioset_integrity_create(&pools
->bs
, pool_size
))
2971 dm_free_md_mempools(pools
);
2976 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2981 bioset_exit(&pools
->bs
);
2982 bioset_exit(&pools
->io_bs
);
2994 static int dm_call_pr(struct block_device
*bdev
, iterate_devices_callout_fn fn
,
2997 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2998 struct dm_table
*table
;
2999 struct dm_target
*ti
;
3000 int ret
= -ENOTTY
, srcu_idx
;
3002 table
= dm_get_live_table(md
, &srcu_idx
);
3003 if (!table
|| !dm_table_get_size(table
))
3006 /* We only support devices that have a single target */
3007 if (dm_table_get_num_targets(table
) != 1)
3009 ti
= dm_table_get_target(table
, 0);
3012 if (!ti
->type
->iterate_devices
)
3015 ret
= ti
->type
->iterate_devices(ti
, fn
, data
);
3017 dm_put_live_table(md
, srcu_idx
);
3022 * For register / unregister we need to manually call out to every path.
3024 static int __dm_pr_register(struct dm_target
*ti
, struct dm_dev
*dev
,
3025 sector_t start
, sector_t len
, void *data
)
3027 struct dm_pr
*pr
= data
;
3028 const struct pr_ops
*ops
= dev
->bdev
->bd_disk
->fops
->pr_ops
;
3030 if (!ops
|| !ops
->pr_register
)
3032 return ops
->pr_register(dev
->bdev
, pr
->old_key
, pr
->new_key
, pr
->flags
);
3035 static int dm_pr_register(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
3046 ret
= dm_call_pr(bdev
, __dm_pr_register
, &pr
);
3047 if (ret
&& new_key
) {
3048 /* unregister all paths if we failed to register any path */
3049 pr
.old_key
= new_key
;
3052 pr
.fail_early
= false;
3053 dm_call_pr(bdev
, __dm_pr_register
, &pr
);
3059 static int dm_pr_reserve(struct block_device
*bdev
, u64 key
, enum pr_type type
,
3062 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3063 const struct pr_ops
*ops
;
3066 r
= dm_prepare_ioctl(md
, &srcu_idx
, &bdev
);
3070 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3071 if (ops
&& ops
->pr_reserve
)
3072 r
= ops
->pr_reserve(bdev
, key
, type
, flags
);
3076 dm_unprepare_ioctl(md
, srcu_idx
);
3080 static int dm_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
3082 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3083 const struct pr_ops
*ops
;
3086 r
= dm_prepare_ioctl(md
, &srcu_idx
, &bdev
);
3090 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3091 if (ops
&& ops
->pr_release
)
3092 r
= ops
->pr_release(bdev
, key
, type
);
3096 dm_unprepare_ioctl(md
, srcu_idx
);
3100 static int dm_pr_preempt(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
3101 enum pr_type type
, bool abort
)
3103 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3104 const struct pr_ops
*ops
;
3107 r
= dm_prepare_ioctl(md
, &srcu_idx
, &bdev
);
3111 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3112 if (ops
&& ops
->pr_preempt
)
3113 r
= ops
->pr_preempt(bdev
, old_key
, new_key
, type
, abort
);
3117 dm_unprepare_ioctl(md
, srcu_idx
);
3121 static int dm_pr_clear(struct block_device
*bdev
, u64 key
)
3123 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3124 const struct pr_ops
*ops
;
3127 r
= dm_prepare_ioctl(md
, &srcu_idx
, &bdev
);
3131 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3132 if (ops
&& ops
->pr_clear
)
3133 r
= ops
->pr_clear(bdev
, key
);
3137 dm_unprepare_ioctl(md
, srcu_idx
);
3141 static const struct pr_ops dm_pr_ops
= {
3142 .pr_register
= dm_pr_register
,
3143 .pr_reserve
= dm_pr_reserve
,
3144 .pr_release
= dm_pr_release
,
3145 .pr_preempt
= dm_pr_preempt
,
3146 .pr_clear
= dm_pr_clear
,
3149 static const struct block_device_operations dm_blk_dops
= {
3150 .open
= dm_blk_open
,
3151 .release
= dm_blk_close
,
3152 .ioctl
= dm_blk_ioctl
,
3153 .getgeo
= dm_blk_getgeo
,
3154 .report_zones
= dm_blk_report_zones
,
3155 .pr_ops
= &dm_pr_ops
,
3156 .owner
= THIS_MODULE
3159 static const struct dax_operations dm_dax_ops
= {
3160 .direct_access
= dm_dax_direct_access
,
3161 .copy_from_iter
= dm_dax_copy_from_iter
,
3162 .copy_to_iter
= dm_dax_copy_to_iter
,
3168 module_init(dm_init
);
3169 module_exit(dm_exit
);
3171 module_param(major
, uint
, 0);
3172 MODULE_PARM_DESC(major
, "The major number of the device mapper");
3174 module_param(reserved_bio_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3175 MODULE_PARM_DESC(reserved_bio_based_ios
, "Reserved IOs in bio-based mempools");
3177 module_param(dm_numa_node
, int, S_IRUGO
| S_IWUSR
);
3178 MODULE_PARM_DESC(dm_numa_node
, "NUMA node for DM device memory allocations");
3180 MODULE_DESCRIPTION(DM_NAME
" driver");
3181 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3182 MODULE_LICENSE("GPL");