2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include "dm-uevent.h"
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/sched/signal.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/mempool.h>
19 #include <linux/dax.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/uio.h>
23 #include <linux/hdreg.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
28 #define DM_MSG_PREFIX "core"
32 * ratelimit state to be used in DMXXX_LIMIT().
34 DEFINE_RATELIMIT_STATE(dm_ratelimit_state
,
35 DEFAULT_RATELIMIT_INTERVAL
,
36 DEFAULT_RATELIMIT_BURST
);
37 EXPORT_SYMBOL(dm_ratelimit_state
);
41 * Cookies are numeric values sent with CHANGE and REMOVE
42 * uevents while resuming, removing or renaming the device.
44 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
45 #define DM_COOKIE_LENGTH 24
47 static const char *_name
= DM_NAME
;
49 static unsigned int major
= 0;
50 static unsigned int _major
= 0;
52 static DEFINE_IDR(_minor_idr
);
54 static DEFINE_SPINLOCK(_minor_lock
);
56 static void do_deferred_remove(struct work_struct
*w
);
58 static DECLARE_WORK(deferred_remove_work
, do_deferred_remove
);
60 static struct workqueue_struct
*deferred_remove_workqueue
;
62 atomic_t dm_global_event_nr
= ATOMIC_INIT(0);
63 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq
);
66 * One of these is allocated per bio.
69 struct mapped_device
*md
;
73 unsigned long start_time
;
74 spinlock_t endio_lock
;
75 struct dm_stats_aux stats_aux
;
78 #define MINOR_ALLOCED ((void *)-1)
81 * Bits for the md->flags field.
83 #define DMF_BLOCK_IO_FOR_SUSPEND 0
84 #define DMF_SUSPENDED 1
87 #define DMF_DELETING 4
88 #define DMF_NOFLUSH_SUSPENDING 5
89 #define DMF_DEFERRED_REMOVE 6
90 #define DMF_SUSPENDED_INTERNALLY 7
92 #define DM_NUMA_NODE NUMA_NO_NODE
93 static int dm_numa_node
= DM_NUMA_NODE
;
96 * For mempools pre-allocation at the table loading time.
98 struct dm_md_mempools
{
103 struct table_device
{
104 struct list_head list
;
106 struct dm_dev dm_dev
;
109 static struct kmem_cache
*_io_cache
;
110 static struct kmem_cache
*_rq_tio_cache
;
111 static struct kmem_cache
*_rq_cache
;
114 * Bio-based DM's mempools' reserved IOs set by the user.
116 #define RESERVED_BIO_BASED_IOS 16
117 static unsigned reserved_bio_based_ios
= RESERVED_BIO_BASED_IOS
;
119 static int __dm_get_module_param_int(int *module_param
, int min
, int max
)
121 int param
= ACCESS_ONCE(*module_param
);
122 int modified_param
= 0;
123 bool modified
= true;
126 modified_param
= min
;
127 else if (param
> max
)
128 modified_param
= max
;
133 (void)cmpxchg(module_param
, param
, modified_param
);
134 param
= modified_param
;
140 unsigned __dm_get_module_param(unsigned *module_param
,
141 unsigned def
, unsigned max
)
143 unsigned param
= ACCESS_ONCE(*module_param
);
144 unsigned modified_param
= 0;
147 modified_param
= def
;
148 else if (param
> max
)
149 modified_param
= max
;
151 if (modified_param
) {
152 (void)cmpxchg(module_param
, param
, modified_param
);
153 param
= modified_param
;
159 unsigned dm_get_reserved_bio_based_ios(void)
161 return __dm_get_module_param(&reserved_bio_based_ios
,
162 RESERVED_BIO_BASED_IOS
, DM_RESERVED_MAX_IOS
);
164 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios
);
166 static unsigned dm_get_numa_node(void)
168 return __dm_get_module_param_int(&dm_numa_node
,
169 DM_NUMA_NODE
, num_online_nodes() - 1);
172 static int __init
local_init(void)
176 /* allocate a slab for the dm_ios */
177 _io_cache
= KMEM_CACHE(dm_io
, 0);
181 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
183 goto out_free_io_cache
;
185 _rq_cache
= kmem_cache_create("dm_old_clone_request", sizeof(struct request
),
186 __alignof__(struct request
), 0, NULL
);
188 goto out_free_rq_tio_cache
;
190 r
= dm_uevent_init();
192 goto out_free_rq_cache
;
194 deferred_remove_workqueue
= alloc_workqueue("kdmremove", WQ_UNBOUND
, 1);
195 if (!deferred_remove_workqueue
) {
197 goto out_uevent_exit
;
201 r
= register_blkdev(_major
, _name
);
203 goto out_free_workqueue
;
211 destroy_workqueue(deferred_remove_workqueue
);
215 kmem_cache_destroy(_rq_cache
);
216 out_free_rq_tio_cache
:
217 kmem_cache_destroy(_rq_tio_cache
);
219 kmem_cache_destroy(_io_cache
);
224 static void local_exit(void)
226 flush_scheduled_work();
227 destroy_workqueue(deferred_remove_workqueue
);
229 kmem_cache_destroy(_rq_cache
);
230 kmem_cache_destroy(_rq_tio_cache
);
231 kmem_cache_destroy(_io_cache
);
232 unregister_blkdev(_major
, _name
);
237 DMINFO("cleaned up");
240 static int (*_inits
[])(void) __initdata
= {
251 static void (*_exits
[])(void) = {
262 static int __init
dm_init(void)
264 const int count
= ARRAY_SIZE(_inits
);
268 for (i
= 0; i
< count
; i
++) {
283 static void __exit
dm_exit(void)
285 int i
= ARRAY_SIZE(_exits
);
291 * Should be empty by this point.
293 idr_destroy(&_minor_idr
);
297 * Block device functions
299 int dm_deleting_md(struct mapped_device
*md
)
301 return test_bit(DMF_DELETING
, &md
->flags
);
304 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
306 struct mapped_device
*md
;
308 spin_lock(&_minor_lock
);
310 md
= bdev
->bd_disk
->private_data
;
314 if (test_bit(DMF_FREEING
, &md
->flags
) ||
315 dm_deleting_md(md
)) {
321 atomic_inc(&md
->open_count
);
323 spin_unlock(&_minor_lock
);
325 return md
? 0 : -ENXIO
;
328 static void dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
330 struct mapped_device
*md
;
332 spin_lock(&_minor_lock
);
334 md
= disk
->private_data
;
338 if (atomic_dec_and_test(&md
->open_count
) &&
339 (test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
)))
340 queue_work(deferred_remove_workqueue
, &deferred_remove_work
);
344 spin_unlock(&_minor_lock
);
347 int dm_open_count(struct mapped_device
*md
)
349 return atomic_read(&md
->open_count
);
353 * Guarantees nothing is using the device before it's deleted.
355 int dm_lock_for_deletion(struct mapped_device
*md
, bool mark_deferred
, bool only_deferred
)
359 spin_lock(&_minor_lock
);
361 if (dm_open_count(md
)) {
364 set_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
365 } else if (only_deferred
&& !test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
))
368 set_bit(DMF_DELETING
, &md
->flags
);
370 spin_unlock(&_minor_lock
);
375 int dm_cancel_deferred_remove(struct mapped_device
*md
)
379 spin_lock(&_minor_lock
);
381 if (test_bit(DMF_DELETING
, &md
->flags
))
384 clear_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
386 spin_unlock(&_minor_lock
);
391 static void do_deferred_remove(struct work_struct
*w
)
393 dm_deferred_remove();
396 sector_t
dm_get_size(struct mapped_device
*md
)
398 return get_capacity(md
->disk
);
401 struct request_queue
*dm_get_md_queue(struct mapped_device
*md
)
406 struct dm_stats
*dm_get_stats(struct mapped_device
*md
)
411 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
413 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
415 return dm_get_geometry(md
, geo
);
418 static int dm_grab_bdev_for_ioctl(struct mapped_device
*md
,
419 struct block_device
**bdev
,
422 struct dm_target
*tgt
;
423 struct dm_table
*map
;
428 map
= dm_get_live_table(md
, &srcu_idx
);
429 if (!map
|| !dm_table_get_size(map
))
432 /* We only support devices that have a single target */
433 if (dm_table_get_num_targets(map
) != 1)
436 tgt
= dm_table_get_target(map
, 0);
437 if (!tgt
->type
->prepare_ioctl
)
440 if (dm_suspended_md(md
)) {
445 r
= tgt
->type
->prepare_ioctl(tgt
, bdev
, mode
);
450 dm_put_live_table(md
, srcu_idx
);
454 dm_put_live_table(md
, srcu_idx
);
455 if (r
== -ENOTCONN
&& !fatal_signal_pending(current
)) {
462 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
463 unsigned int cmd
, unsigned long arg
)
465 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
468 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
474 * Target determined this ioctl is being issued against a
475 * subset of the parent bdev; require extra privileges.
477 if (!capable(CAP_SYS_RAWIO
)) {
479 "%s: sending ioctl %x to DM device without required privilege.",
486 r
= __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
492 static struct dm_io
*alloc_io(struct mapped_device
*md
)
494 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
497 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
499 mempool_free(io
, md
->io_pool
);
502 static void free_tio(struct dm_target_io
*tio
)
504 bio_put(&tio
->clone
);
507 int md_in_flight(struct mapped_device
*md
)
509 return atomic_read(&md
->pending
[READ
]) +
510 atomic_read(&md
->pending
[WRITE
]);
513 static void start_io_acct(struct dm_io
*io
)
515 struct mapped_device
*md
= io
->md
;
516 struct bio
*bio
= io
->bio
;
518 int rw
= bio_data_dir(bio
);
520 io
->start_time
= jiffies
;
522 cpu
= part_stat_lock();
523 part_round_stats(cpu
, &dm_disk(md
)->part0
);
525 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
526 atomic_inc_return(&md
->pending
[rw
]));
528 if (unlikely(dm_stats_used(&md
->stats
)))
529 dm_stats_account_io(&md
->stats
, bio_data_dir(bio
),
530 bio
->bi_iter
.bi_sector
, bio_sectors(bio
),
531 false, 0, &io
->stats_aux
);
534 static void end_io_acct(struct dm_io
*io
)
536 struct mapped_device
*md
= io
->md
;
537 struct bio
*bio
= io
->bio
;
538 unsigned long duration
= jiffies
- io
->start_time
;
540 int rw
= bio_data_dir(bio
);
542 generic_end_io_acct(rw
, &dm_disk(md
)->part0
, io
->start_time
);
544 if (unlikely(dm_stats_used(&md
->stats
)))
545 dm_stats_account_io(&md
->stats
, bio_data_dir(bio
),
546 bio
->bi_iter
.bi_sector
, bio_sectors(bio
),
547 true, duration
, &io
->stats_aux
);
550 * After this is decremented the bio must not be touched if it is
553 pending
= atomic_dec_return(&md
->pending
[rw
]);
554 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
555 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
557 /* nudge anyone waiting on suspend queue */
563 * Add the bio to the list of deferred io.
565 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
569 spin_lock_irqsave(&md
->deferred_lock
, flags
);
570 bio_list_add(&md
->deferred
, bio
);
571 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
572 queue_work(md
->wq
, &md
->work
);
576 * Everyone (including functions in this file), should use this
577 * function to access the md->map field, and make sure they call
578 * dm_put_live_table() when finished.
580 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
) __acquires(md
->io_barrier
)
582 *srcu_idx
= srcu_read_lock(&md
->io_barrier
);
584 return srcu_dereference(md
->map
, &md
->io_barrier
);
587 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
) __releases(md
->io_barrier
)
589 srcu_read_unlock(&md
->io_barrier
, srcu_idx
);
592 void dm_sync_table(struct mapped_device
*md
)
594 synchronize_srcu(&md
->io_barrier
);
595 synchronize_rcu_expedited();
599 * A fast alternative to dm_get_live_table/dm_put_live_table.
600 * The caller must not block between these two functions.
602 static struct dm_table
*dm_get_live_table_fast(struct mapped_device
*md
) __acquires(RCU
)
605 return rcu_dereference(md
->map
);
608 static void dm_put_live_table_fast(struct mapped_device
*md
) __releases(RCU
)
614 * Open a table device so we can use it as a map destination.
616 static int open_table_device(struct table_device
*td
, dev_t dev
,
617 struct mapped_device
*md
)
619 static char *_claim_ptr
= "I belong to device-mapper";
620 struct block_device
*bdev
;
624 BUG_ON(td
->dm_dev
.bdev
);
626 bdev
= blkdev_get_by_dev(dev
, td
->dm_dev
.mode
| FMODE_EXCL
, _claim_ptr
);
628 return PTR_ERR(bdev
);
630 r
= bd_link_disk_holder(bdev
, dm_disk(md
));
632 blkdev_put(bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
636 td
->dm_dev
.bdev
= bdev
;
637 td
->dm_dev
.dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
642 * Close a table device that we've been using.
644 static void close_table_device(struct table_device
*td
, struct mapped_device
*md
)
646 if (!td
->dm_dev
.bdev
)
649 bd_unlink_disk_holder(td
->dm_dev
.bdev
, dm_disk(md
));
650 blkdev_put(td
->dm_dev
.bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
651 put_dax(td
->dm_dev
.dax_dev
);
652 td
->dm_dev
.bdev
= NULL
;
653 td
->dm_dev
.dax_dev
= NULL
;
656 static struct table_device
*find_table_device(struct list_head
*l
, dev_t dev
,
658 struct table_device
*td
;
660 list_for_each_entry(td
, l
, list
)
661 if (td
->dm_dev
.bdev
->bd_dev
== dev
&& td
->dm_dev
.mode
== mode
)
667 int dm_get_table_device(struct mapped_device
*md
, dev_t dev
, fmode_t mode
,
668 struct dm_dev
**result
) {
670 struct table_device
*td
;
672 mutex_lock(&md
->table_devices_lock
);
673 td
= find_table_device(&md
->table_devices
, dev
, mode
);
675 td
= kmalloc_node(sizeof(*td
), GFP_KERNEL
, md
->numa_node_id
);
677 mutex_unlock(&md
->table_devices_lock
);
681 td
->dm_dev
.mode
= mode
;
682 td
->dm_dev
.bdev
= NULL
;
684 if ((r
= open_table_device(td
, dev
, md
))) {
685 mutex_unlock(&md
->table_devices_lock
);
690 format_dev_t(td
->dm_dev
.name
, dev
);
692 atomic_set(&td
->count
, 0);
693 list_add(&td
->list
, &md
->table_devices
);
695 atomic_inc(&td
->count
);
696 mutex_unlock(&md
->table_devices_lock
);
698 *result
= &td
->dm_dev
;
701 EXPORT_SYMBOL_GPL(dm_get_table_device
);
703 void dm_put_table_device(struct mapped_device
*md
, struct dm_dev
*d
)
705 struct table_device
*td
= container_of(d
, struct table_device
, dm_dev
);
707 mutex_lock(&md
->table_devices_lock
);
708 if (atomic_dec_and_test(&td
->count
)) {
709 close_table_device(td
, md
);
713 mutex_unlock(&md
->table_devices_lock
);
715 EXPORT_SYMBOL(dm_put_table_device
);
717 static void free_table_devices(struct list_head
*devices
)
719 struct list_head
*tmp
, *next
;
721 list_for_each_safe(tmp
, next
, devices
) {
722 struct table_device
*td
= list_entry(tmp
, struct table_device
, list
);
724 DMWARN("dm_destroy: %s still exists with %d references",
725 td
->dm_dev
.name
, atomic_read(&td
->count
));
731 * Get the geometry associated with a dm device
733 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
741 * Set the geometry of a device.
743 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
745 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
747 if (geo
->start
> sz
) {
748 DMWARN("Start sector is beyond the geometry limits.");
757 /*-----------------------------------------------------------------
759 * A more elegant soln is in the works that uses the queue
760 * merge fn, unfortunately there are a couple of changes to
761 * the block layer that I want to make for this. So in the
762 * interests of getting something for people to use I give
763 * you this clearly demarcated crap.
764 *---------------------------------------------------------------*/
766 static int __noflush_suspending(struct mapped_device
*md
)
768 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
772 * Decrements the number of outstanding ios that a bio has been
773 * cloned into, completing the original io if necc.
775 static void dec_pending(struct dm_io
*io
, blk_status_t error
)
778 blk_status_t io_error
;
780 struct mapped_device
*md
= io
->md
;
782 /* Push-back supersedes any I/O errors */
783 if (unlikely(error
)) {
784 spin_lock_irqsave(&io
->endio_lock
, flags
);
785 if (!(io
->status
== BLK_STS_DM_REQUEUE
&&
786 __noflush_suspending(md
)))
788 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
791 if (atomic_dec_and_test(&io
->io_count
)) {
792 if (io
->status
== BLK_STS_DM_REQUEUE
) {
794 * Target requested pushing back the I/O.
796 spin_lock_irqsave(&md
->deferred_lock
, flags
);
797 if (__noflush_suspending(md
))
798 bio_list_add_head(&md
->deferred
, io
->bio
);
800 /* noflush suspend was interrupted. */
801 io
->status
= BLK_STS_IOERR
;
802 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
805 io_error
= io
->status
;
810 if (io_error
== BLK_STS_DM_REQUEUE
)
813 if ((bio
->bi_opf
& REQ_PREFLUSH
) && bio
->bi_iter
.bi_size
) {
815 * Preflush done for flush with data, reissue
816 * without REQ_PREFLUSH.
818 bio
->bi_opf
&= ~REQ_PREFLUSH
;
821 /* done with normal IO or empty flush */
822 bio
->bi_status
= io_error
;
828 void disable_write_same(struct mapped_device
*md
)
830 struct queue_limits
*limits
= dm_get_queue_limits(md
);
832 /* device doesn't really support WRITE SAME, disable it */
833 limits
->max_write_same_sectors
= 0;
836 void disable_write_zeroes(struct mapped_device
*md
)
838 struct queue_limits
*limits
= dm_get_queue_limits(md
);
840 /* device doesn't really support WRITE ZEROES, disable it */
841 limits
->max_write_zeroes_sectors
= 0;
844 static void clone_endio(struct bio
*bio
)
846 blk_status_t error
= bio
->bi_status
;
847 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
848 struct dm_io
*io
= tio
->io
;
849 struct mapped_device
*md
= tio
->io
->md
;
850 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
852 if (unlikely(error
== BLK_STS_TARGET
)) {
853 if (bio_op(bio
) == REQ_OP_WRITE_SAME
&&
854 !bdev_get_queue(bio
->bi_bdev
)->limits
.max_write_same_sectors
)
855 disable_write_same(md
);
856 if (bio_op(bio
) == REQ_OP_WRITE_ZEROES
&&
857 !bdev_get_queue(bio
->bi_bdev
)->limits
.max_write_zeroes_sectors
)
858 disable_write_zeroes(md
);
862 int r
= endio(tio
->ti
, bio
, &error
);
864 case DM_ENDIO_REQUEUE
:
865 error
= BLK_STS_DM_REQUEUE
;
869 case DM_ENDIO_INCOMPLETE
:
870 /* The target will handle the io */
873 DMWARN("unimplemented target endio return value: %d", r
);
879 dec_pending(io
, error
);
883 * Return maximum size of I/O possible at the supplied sector up to the current
886 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
888 sector_t target_offset
= dm_target_offset(ti
, sector
);
890 return ti
->len
- target_offset
;
893 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
895 sector_t len
= max_io_len_target_boundary(sector
, ti
);
896 sector_t offset
, max_len
;
899 * Does the target need to split even further?
901 if (ti
->max_io_len
) {
902 offset
= dm_target_offset(ti
, sector
);
903 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
904 max_len
= sector_div(offset
, ti
->max_io_len
);
906 max_len
= offset
& (ti
->max_io_len
- 1);
907 max_len
= ti
->max_io_len
- max_len
;
916 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
918 if (len
> UINT_MAX
) {
919 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
920 (unsigned long long)len
, UINT_MAX
);
921 ti
->error
= "Maximum size of target IO is too large";
925 ti
->max_io_len
= (uint32_t) len
;
929 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
931 static struct dm_target
*dm_dax_get_live_target(struct mapped_device
*md
,
932 sector_t sector
, int *srcu_idx
)
934 struct dm_table
*map
;
935 struct dm_target
*ti
;
937 map
= dm_get_live_table(md
, srcu_idx
);
941 ti
= dm_table_find_target(map
, sector
);
942 if (!dm_target_is_valid(ti
))
948 static long dm_dax_direct_access(struct dax_device
*dax_dev
, pgoff_t pgoff
,
949 long nr_pages
, void **kaddr
, pfn_t
*pfn
)
951 struct mapped_device
*md
= dax_get_private(dax_dev
);
952 sector_t sector
= pgoff
* PAGE_SECTORS
;
953 struct dm_target
*ti
;
954 long len
, ret
= -EIO
;
957 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
961 if (!ti
->type
->direct_access
)
963 len
= max_io_len(sector
, ti
) / PAGE_SECTORS
;
966 nr_pages
= min(len
, nr_pages
);
967 if (ti
->type
->direct_access
)
968 ret
= ti
->type
->direct_access(ti
, pgoff
, nr_pages
, kaddr
, pfn
);
971 dm_put_live_table(md
, srcu_idx
);
976 static size_t dm_dax_copy_from_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
,
977 void *addr
, size_t bytes
, struct iov_iter
*i
)
979 struct mapped_device
*md
= dax_get_private(dax_dev
);
980 sector_t sector
= pgoff
* PAGE_SECTORS
;
981 struct dm_target
*ti
;
985 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
989 if (!ti
->type
->dax_copy_from_iter
) {
990 ret
= copy_from_iter(addr
, bytes
, i
);
993 ret
= ti
->type
->dax_copy_from_iter(ti
, pgoff
, addr
, bytes
, i
);
995 dm_put_live_table(md
, srcu_idx
);
1000 static void dm_dax_flush(struct dax_device
*dax_dev
, pgoff_t pgoff
, void *addr
,
1003 struct mapped_device
*md
= dax_get_private(dax_dev
);
1004 sector_t sector
= pgoff
* PAGE_SECTORS
;
1005 struct dm_target
*ti
;
1008 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
1012 if (ti
->type
->dax_flush
)
1013 ti
->type
->dax_flush(ti
, pgoff
, addr
, size
);
1015 dm_put_live_table(md
, srcu_idx
);
1019 * A target may call dm_accept_partial_bio only from the map routine. It is
1020 * allowed for all bio types except REQ_PREFLUSH.
1022 * dm_accept_partial_bio informs the dm that the target only wants to process
1023 * additional n_sectors sectors of the bio and the rest of the data should be
1024 * sent in a next bio.
1026 * A diagram that explains the arithmetics:
1027 * +--------------------+---------------+-------+
1029 * +--------------------+---------------+-------+
1031 * <-------------- *tio->len_ptr --------------->
1032 * <------- bi_size ------->
1035 * Region 1 was already iterated over with bio_advance or similar function.
1036 * (it may be empty if the target doesn't use bio_advance)
1037 * Region 2 is the remaining bio size that the target wants to process.
1038 * (it may be empty if region 1 is non-empty, although there is no reason
1040 * The target requires that region 3 is to be sent in the next bio.
1042 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1043 * the partially processed part (the sum of regions 1+2) must be the same for all
1044 * copies of the bio.
1046 void dm_accept_partial_bio(struct bio
*bio
, unsigned n_sectors
)
1048 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1049 unsigned bi_size
= bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
;
1050 BUG_ON(bio
->bi_opf
& REQ_PREFLUSH
);
1051 BUG_ON(bi_size
> *tio
->len_ptr
);
1052 BUG_ON(n_sectors
> bi_size
);
1053 *tio
->len_ptr
-= bi_size
- n_sectors
;
1054 bio
->bi_iter
.bi_size
= n_sectors
<< SECTOR_SHIFT
;
1056 EXPORT_SYMBOL_GPL(dm_accept_partial_bio
);
1059 * The zone descriptors obtained with a zone report indicate
1060 * zone positions within the target device. The zone descriptors
1061 * must be remapped to match their position within the dm device.
1062 * A target may call dm_remap_zone_report after completion of a
1063 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
1064 * from the target device mapping to the dm device.
1066 void dm_remap_zone_report(struct dm_target
*ti
, struct bio
*bio
, sector_t start
)
1068 #ifdef CONFIG_BLK_DEV_ZONED
1069 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1070 struct bio
*report_bio
= tio
->io
->bio
;
1071 struct blk_zone_report_hdr
*hdr
= NULL
;
1072 struct blk_zone
*zone
;
1073 unsigned int nr_rep
= 0;
1075 struct bio_vec bvec
;
1076 struct bvec_iter iter
;
1083 * Remap the start sector of the reported zones. For sequential zones,
1084 * also remap the write pointer position.
1086 bio_for_each_segment(bvec
, report_bio
, iter
) {
1087 addr
= kmap_atomic(bvec
.bv_page
);
1089 /* Remember the report header in the first page */
1092 ofst
= sizeof(struct blk_zone_report_hdr
);
1096 /* Set zones start sector */
1097 while (hdr
->nr_zones
&& ofst
< bvec
.bv_len
) {
1099 if (zone
->start
>= start
+ ti
->len
) {
1103 zone
->start
= zone
->start
+ ti
->begin
- start
;
1104 if (zone
->type
!= BLK_ZONE_TYPE_CONVENTIONAL
) {
1105 if (zone
->cond
== BLK_ZONE_COND_FULL
)
1106 zone
->wp
= zone
->start
+ zone
->len
;
1107 else if (zone
->cond
== BLK_ZONE_COND_EMPTY
)
1108 zone
->wp
= zone
->start
;
1110 zone
->wp
= zone
->wp
+ ti
->begin
- start
;
1112 ofst
+= sizeof(struct blk_zone
);
1118 kunmap_atomic(addr
);
1125 hdr
->nr_zones
= nr_rep
;
1129 bio_advance(report_bio
, report_bio
->bi_iter
.bi_size
);
1131 #else /* !CONFIG_BLK_DEV_ZONED */
1132 bio
->bi_status
= BLK_STS_NOTSUPP
;
1135 EXPORT_SYMBOL_GPL(dm_remap_zone_report
);
1138 * Flush current->bio_list when the target map method blocks.
1139 * This fixes deadlocks in snapshot and possibly in other targets.
1142 struct blk_plug plug
;
1143 struct blk_plug_cb cb
;
1146 static void flush_current_bio_list(struct blk_plug_cb
*cb
, bool from_schedule
)
1148 struct dm_offload
*o
= container_of(cb
, struct dm_offload
, cb
);
1149 struct bio_list list
;
1153 INIT_LIST_HEAD(&o
->cb
.list
);
1155 if (unlikely(!current
->bio_list
))
1158 for (i
= 0; i
< 2; i
++) {
1159 list
= current
->bio_list
[i
];
1160 bio_list_init(¤t
->bio_list
[i
]);
1162 while ((bio
= bio_list_pop(&list
))) {
1163 struct bio_set
*bs
= bio
->bi_pool
;
1164 if (unlikely(!bs
) || bs
== fs_bio_set
||
1165 !bs
->rescue_workqueue
) {
1166 bio_list_add(¤t
->bio_list
[i
], bio
);
1170 spin_lock(&bs
->rescue_lock
);
1171 bio_list_add(&bs
->rescue_list
, bio
);
1172 queue_work(bs
->rescue_workqueue
, &bs
->rescue_work
);
1173 spin_unlock(&bs
->rescue_lock
);
1178 static void dm_offload_start(struct dm_offload
*o
)
1180 blk_start_plug(&o
->plug
);
1181 o
->cb
.callback
= flush_current_bio_list
;
1182 list_add(&o
->cb
.list
, ¤t
->plug
->cb_list
);
1185 static void dm_offload_end(struct dm_offload
*o
)
1187 list_del(&o
->cb
.list
);
1188 blk_finish_plug(&o
->plug
);
1191 static void __map_bio(struct dm_target_io
*tio
)
1195 struct dm_offload o
;
1196 struct bio
*clone
= &tio
->clone
;
1197 struct dm_target
*ti
= tio
->ti
;
1199 clone
->bi_end_io
= clone_endio
;
1202 * Map the clone. If r == 0 we don't need to do
1203 * anything, the target has assumed ownership of
1206 atomic_inc(&tio
->io
->io_count
);
1207 sector
= clone
->bi_iter
.bi_sector
;
1209 dm_offload_start(&o
);
1210 r
= ti
->type
->map(ti
, clone
);
1214 case DM_MAPIO_SUBMITTED
:
1216 case DM_MAPIO_REMAPPED
:
1217 /* the bio has been remapped so dispatch it */
1218 trace_block_bio_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1219 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1220 generic_make_request(clone
);
1223 dec_pending(tio
->io
, BLK_STS_IOERR
);
1226 case DM_MAPIO_REQUEUE
:
1227 dec_pending(tio
->io
, BLK_STS_DM_REQUEUE
);
1231 DMWARN("unimplemented target map return value: %d", r
);
1237 struct mapped_device
*md
;
1238 struct dm_table
*map
;
1242 unsigned sector_count
;
1245 static void bio_setup_sector(struct bio
*bio
, sector_t sector
, unsigned len
)
1247 bio
->bi_iter
.bi_sector
= sector
;
1248 bio
->bi_iter
.bi_size
= to_bytes(len
);
1252 * Creates a bio that consists of range of complete bvecs.
1254 static int clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1255 sector_t sector
, unsigned len
)
1257 struct bio
*clone
= &tio
->clone
;
1259 __bio_clone_fast(clone
, bio
);
1261 if (unlikely(bio_integrity(bio
) != NULL
)) {
1264 if (unlikely(!dm_target_has_integrity(tio
->ti
->type
) &&
1265 !dm_target_passes_integrity(tio
->ti
->type
))) {
1266 DMWARN("%s: the target %s doesn't support integrity data.",
1267 dm_device_name(tio
->io
->md
),
1268 tio
->ti
->type
->name
);
1272 r
= bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1277 if (bio_op(bio
) != REQ_OP_ZONE_REPORT
)
1278 bio_advance(clone
, to_bytes(sector
- clone
->bi_iter
.bi_sector
));
1279 clone
->bi_iter
.bi_size
= to_bytes(len
);
1281 if (unlikely(bio_integrity(bio
) != NULL
))
1282 bio_integrity_trim(clone
);
1287 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1288 struct dm_target
*ti
,
1289 unsigned target_bio_nr
)
1291 struct dm_target_io
*tio
;
1294 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1295 tio
= container_of(clone
, struct dm_target_io
, clone
);
1299 tio
->target_bio_nr
= target_bio_nr
;
1304 static void __clone_and_map_simple_bio(struct clone_info
*ci
,
1305 struct dm_target
*ti
,
1306 unsigned target_bio_nr
, unsigned *len
)
1308 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1309 struct bio
*clone
= &tio
->clone
;
1313 __bio_clone_fast(clone
, ci
->bio
);
1315 bio_setup_sector(clone
, ci
->sector
, *len
);
1320 static void __send_duplicate_bios(struct clone_info
*ci
, struct dm_target
*ti
,
1321 unsigned num_bios
, unsigned *len
)
1323 unsigned target_bio_nr
;
1325 for (target_bio_nr
= 0; target_bio_nr
< num_bios
; target_bio_nr
++)
1326 __clone_and_map_simple_bio(ci
, ti
, target_bio_nr
, len
);
1329 static int __send_empty_flush(struct clone_info
*ci
)
1331 unsigned target_nr
= 0;
1332 struct dm_target
*ti
;
1334 BUG_ON(bio_has_data(ci
->bio
));
1335 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1336 __send_duplicate_bios(ci
, ti
, ti
->num_flush_bios
, NULL
);
1341 static int __clone_and_map_data_bio(struct clone_info
*ci
, struct dm_target
*ti
,
1342 sector_t sector
, unsigned *len
)
1344 struct bio
*bio
= ci
->bio
;
1345 struct dm_target_io
*tio
;
1346 unsigned target_bio_nr
;
1347 unsigned num_target_bios
= 1;
1351 * Does the target want to receive duplicate copies of the bio?
1353 if (bio_data_dir(bio
) == WRITE
&& ti
->num_write_bios
)
1354 num_target_bios
= ti
->num_write_bios(ti
, bio
);
1356 for (target_bio_nr
= 0; target_bio_nr
< num_target_bios
; target_bio_nr
++) {
1357 tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1359 r
= clone_bio(tio
, bio
, sector
, *len
);
1370 typedef unsigned (*get_num_bios_fn
)(struct dm_target
*ti
);
1372 static unsigned get_num_discard_bios(struct dm_target
*ti
)
1374 return ti
->num_discard_bios
;
1377 static unsigned get_num_write_same_bios(struct dm_target
*ti
)
1379 return ti
->num_write_same_bios
;
1382 static unsigned get_num_write_zeroes_bios(struct dm_target
*ti
)
1384 return ti
->num_write_zeroes_bios
;
1387 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1389 static bool is_split_required_for_discard(struct dm_target
*ti
)
1391 return ti
->split_discard_bios
;
1394 static int __send_changing_extent_only(struct clone_info
*ci
,
1395 get_num_bios_fn get_num_bios
,
1396 is_split_required_fn is_split_required
)
1398 struct dm_target
*ti
;
1403 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1404 if (!dm_target_is_valid(ti
))
1408 * Even though the device advertised support for this type of
1409 * request, that does not mean every target supports it, and
1410 * reconfiguration might also have changed that since the
1411 * check was performed.
1413 num_bios
= get_num_bios
? get_num_bios(ti
) : 0;
1417 if (is_split_required
&& !is_split_required(ti
))
1418 len
= min((sector_t
)ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1420 len
= min((sector_t
)ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1422 __send_duplicate_bios(ci
, ti
, num_bios
, &len
);
1425 } while (ci
->sector_count
-= len
);
1430 static int __send_discard(struct clone_info
*ci
)
1432 return __send_changing_extent_only(ci
, get_num_discard_bios
,
1433 is_split_required_for_discard
);
1436 static int __send_write_same(struct clone_info
*ci
)
1438 return __send_changing_extent_only(ci
, get_num_write_same_bios
, NULL
);
1441 static int __send_write_zeroes(struct clone_info
*ci
)
1443 return __send_changing_extent_only(ci
, get_num_write_zeroes_bios
, NULL
);
1447 * Select the correct strategy for processing a non-flush bio.
1449 static int __split_and_process_non_flush(struct clone_info
*ci
)
1451 struct bio
*bio
= ci
->bio
;
1452 struct dm_target
*ti
;
1456 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
))
1457 return __send_discard(ci
);
1458 else if (unlikely(bio_op(bio
) == REQ_OP_WRITE_SAME
))
1459 return __send_write_same(ci
);
1460 else if (unlikely(bio_op(bio
) == REQ_OP_WRITE_ZEROES
))
1461 return __send_write_zeroes(ci
);
1463 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1464 if (!dm_target_is_valid(ti
))
1467 if (bio_op(bio
) == REQ_OP_ZONE_REPORT
)
1468 len
= ci
->sector_count
;
1470 len
= min_t(sector_t
, max_io_len(ci
->sector
, ti
),
1473 r
= __clone_and_map_data_bio(ci
, ti
, ci
->sector
, &len
);
1478 ci
->sector_count
-= len
;
1484 * Entry point to split a bio into clones and submit them to the targets.
1486 static void __split_and_process_bio(struct mapped_device
*md
,
1487 struct dm_table
*map
, struct bio
*bio
)
1489 struct clone_info ci
;
1492 if (unlikely(!map
)) {
1499 ci
.io
= alloc_io(md
);
1501 atomic_set(&ci
.io
->io_count
, 1);
1504 spin_lock_init(&ci
.io
->endio_lock
);
1505 ci
.sector
= bio
->bi_iter
.bi_sector
;
1507 start_io_acct(ci
.io
);
1509 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1510 ci
.bio
= &ci
.md
->flush_bio
;
1511 ci
.sector_count
= 0;
1512 error
= __send_empty_flush(&ci
);
1513 /* dec_pending submits any data associated with flush */
1514 } else if (bio_op(bio
) == REQ_OP_ZONE_RESET
) {
1516 ci
.sector_count
= 0;
1517 error
= __split_and_process_non_flush(&ci
);
1520 ci
.sector_count
= bio_sectors(bio
);
1521 while (ci
.sector_count
&& !error
)
1522 error
= __split_and_process_non_flush(&ci
);
1525 /* drop the extra reference count */
1526 dec_pending(ci
.io
, error
);
1528 /*-----------------------------------------------------------------
1530 *---------------------------------------------------------------*/
1533 * The request function that just remaps the bio built up by
1536 static blk_qc_t
dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1538 int rw
= bio_data_dir(bio
);
1539 struct mapped_device
*md
= q
->queuedata
;
1541 struct dm_table
*map
;
1543 map
= dm_get_live_table(md
, &srcu_idx
);
1545 generic_start_io_acct(rw
, bio_sectors(bio
), &dm_disk(md
)->part0
);
1547 /* if we're suspended, we have to queue this io for later */
1548 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1549 dm_put_live_table(md
, srcu_idx
);
1551 if (!(bio
->bi_opf
& REQ_RAHEAD
))
1555 return BLK_QC_T_NONE
;
1558 __split_and_process_bio(md
, map
, bio
);
1559 dm_put_live_table(md
, srcu_idx
);
1560 return BLK_QC_T_NONE
;
1563 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1566 struct mapped_device
*md
= congested_data
;
1567 struct dm_table
*map
;
1569 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1570 if (dm_request_based(md
)) {
1572 * With request-based DM we only need to check the
1573 * top-level queue for congestion.
1575 r
= md
->queue
->backing_dev_info
->wb
.state
& bdi_bits
;
1577 map
= dm_get_live_table_fast(md
);
1579 r
= dm_table_any_congested(map
, bdi_bits
);
1580 dm_put_live_table_fast(md
);
1587 /*-----------------------------------------------------------------
1588 * An IDR is used to keep track of allocated minor numbers.
1589 *---------------------------------------------------------------*/
1590 static void free_minor(int minor
)
1592 spin_lock(&_minor_lock
);
1593 idr_remove(&_minor_idr
, minor
);
1594 spin_unlock(&_minor_lock
);
1598 * See if the device with a specific minor # is free.
1600 static int specific_minor(int minor
)
1604 if (minor
>= (1 << MINORBITS
))
1607 idr_preload(GFP_KERNEL
);
1608 spin_lock(&_minor_lock
);
1610 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
1612 spin_unlock(&_minor_lock
);
1615 return r
== -ENOSPC
? -EBUSY
: r
;
1619 static int next_free_minor(int *minor
)
1623 idr_preload(GFP_KERNEL
);
1624 spin_lock(&_minor_lock
);
1626 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
1628 spin_unlock(&_minor_lock
);
1636 static const struct block_device_operations dm_blk_dops
;
1637 static const struct dax_operations dm_dax_ops
;
1639 static void dm_wq_work(struct work_struct
*work
);
1641 void dm_init_md_queue(struct mapped_device
*md
)
1644 * Request-based dm devices cannot be stacked on top of bio-based dm
1645 * devices. The type of this dm device may not have been decided yet.
1646 * The type is decided at the first table loading time.
1647 * To prevent problematic device stacking, clear the queue flag
1648 * for request stacking support until then.
1650 * This queue is new, so no concurrency on the queue_flags.
1652 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
1655 * Initialize data that will only be used by a non-blk-mq DM queue
1656 * - must do so here (in alloc_dev callchain) before queue is used
1658 md
->queue
->queuedata
= md
;
1659 md
->queue
->backing_dev_info
->congested_data
= md
;
1662 void dm_init_normal_md_queue(struct mapped_device
*md
)
1664 md
->use_blk_mq
= false;
1665 dm_init_md_queue(md
);
1668 * Initialize aspects of queue that aren't relevant for blk-mq
1670 md
->queue
->backing_dev_info
->congested_fn
= dm_any_congested
;
1673 static void cleanup_mapped_device(struct mapped_device
*md
)
1676 destroy_workqueue(md
->wq
);
1677 if (md
->kworker_task
)
1678 kthread_stop(md
->kworker_task
);
1679 mempool_destroy(md
->io_pool
);
1681 bioset_free(md
->bs
);
1684 kill_dax(md
->dax_dev
);
1685 put_dax(md
->dax_dev
);
1690 spin_lock(&_minor_lock
);
1691 md
->disk
->private_data
= NULL
;
1692 spin_unlock(&_minor_lock
);
1693 del_gendisk(md
->disk
);
1698 blk_cleanup_queue(md
->queue
);
1700 cleanup_srcu_struct(&md
->io_barrier
);
1707 dm_mq_cleanup_mapped_device(md
);
1711 * Allocate and initialise a blank device with a given minor.
1713 static struct mapped_device
*alloc_dev(int minor
)
1715 int r
, numa_node_id
= dm_get_numa_node();
1716 struct dax_device
*dax_dev
;
1717 struct mapped_device
*md
;
1720 md
= kzalloc_node(sizeof(*md
), GFP_KERNEL
, numa_node_id
);
1722 DMWARN("unable to allocate device, out of memory.");
1726 if (!try_module_get(THIS_MODULE
))
1727 goto bad_module_get
;
1729 /* get a minor number for the dev */
1730 if (minor
== DM_ANY_MINOR
)
1731 r
= next_free_minor(&minor
);
1733 r
= specific_minor(minor
);
1737 r
= init_srcu_struct(&md
->io_barrier
);
1739 goto bad_io_barrier
;
1741 md
->numa_node_id
= numa_node_id
;
1742 md
->use_blk_mq
= dm_use_blk_mq_default();
1743 md
->init_tio_pdu
= false;
1744 md
->type
= DM_TYPE_NONE
;
1745 mutex_init(&md
->suspend_lock
);
1746 mutex_init(&md
->type_lock
);
1747 mutex_init(&md
->table_devices_lock
);
1748 spin_lock_init(&md
->deferred_lock
);
1749 atomic_set(&md
->holders
, 1);
1750 atomic_set(&md
->open_count
, 0);
1751 atomic_set(&md
->event_nr
, 0);
1752 atomic_set(&md
->uevent_seq
, 0);
1753 INIT_LIST_HEAD(&md
->uevent_list
);
1754 INIT_LIST_HEAD(&md
->table_devices
);
1755 spin_lock_init(&md
->uevent_lock
);
1757 md
->queue
= blk_alloc_queue_node(GFP_KERNEL
, numa_node_id
);
1761 dm_init_md_queue(md
);
1763 md
->disk
= alloc_disk_node(1, numa_node_id
);
1767 atomic_set(&md
->pending
[0], 0);
1768 atomic_set(&md
->pending
[1], 0);
1769 init_waitqueue_head(&md
->wait
);
1770 INIT_WORK(&md
->work
, dm_wq_work
);
1771 init_waitqueue_head(&md
->eventq
);
1772 init_completion(&md
->kobj_holder
.completion
);
1773 md
->kworker_task
= NULL
;
1775 md
->disk
->major
= _major
;
1776 md
->disk
->first_minor
= minor
;
1777 md
->disk
->fops
= &dm_blk_dops
;
1778 md
->disk
->queue
= md
->queue
;
1779 md
->disk
->private_data
= md
;
1780 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1782 dax_dev
= alloc_dax(md
, md
->disk
->disk_name
, &dm_dax_ops
);
1785 md
->dax_dev
= dax_dev
;
1788 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1790 md
->wq
= alloc_workqueue("kdmflush", WQ_MEM_RECLAIM
, 0);
1794 md
->bdev
= bdget_disk(md
->disk
, 0);
1798 bio_init(&md
->flush_bio
, NULL
, 0);
1799 md
->flush_bio
.bi_bdev
= md
->bdev
;
1800 md
->flush_bio
.bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
| REQ_SYNC
;
1802 dm_stats_init(&md
->stats
);
1804 /* Populate the mapping, nobody knows we exist yet */
1805 spin_lock(&_minor_lock
);
1806 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1807 spin_unlock(&_minor_lock
);
1809 BUG_ON(old_md
!= MINOR_ALLOCED
);
1814 cleanup_mapped_device(md
);
1818 module_put(THIS_MODULE
);
1824 static void unlock_fs(struct mapped_device
*md
);
1826 static void free_dev(struct mapped_device
*md
)
1828 int minor
= MINOR(disk_devt(md
->disk
));
1832 cleanup_mapped_device(md
);
1834 free_table_devices(&md
->table_devices
);
1835 dm_stats_cleanup(&md
->stats
);
1838 module_put(THIS_MODULE
);
1842 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
1844 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
1847 /* The md already has necessary mempools. */
1848 if (dm_table_bio_based(t
)) {
1850 * Reload bioset because front_pad may have changed
1851 * because a different table was loaded.
1853 bioset_free(md
->bs
);
1858 * There's no need to reload with request-based dm
1859 * because the size of front_pad doesn't change.
1860 * Note for future: If you are to reload bioset,
1861 * prep-ed requests in the queue may refer
1862 * to bio from the old bioset, so you must walk
1863 * through the queue to unprep.
1868 BUG_ON(!p
|| md
->io_pool
|| md
->bs
);
1870 md
->io_pool
= p
->io_pool
;
1876 /* mempool bind completed, no longer need any mempools in the table */
1877 dm_table_free_md_mempools(t
);
1881 * Bind a table to the device.
1883 static void event_callback(void *context
)
1885 unsigned long flags
;
1887 struct mapped_device
*md
= (struct mapped_device
*) context
;
1889 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1890 list_splice_init(&md
->uevent_list
, &uevents
);
1891 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1893 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
1895 atomic_inc(&md
->event_nr
);
1896 atomic_inc(&dm_global_event_nr
);
1897 wake_up(&md
->eventq
);
1898 wake_up(&dm_global_eventq
);
1902 * Protected by md->suspend_lock obtained by dm_swap_table().
1904 static void __set_size(struct mapped_device
*md
, sector_t size
)
1906 lockdep_assert_held(&md
->suspend_lock
);
1908 set_capacity(md
->disk
, size
);
1910 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1914 * Returns old map, which caller must destroy.
1916 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
1917 struct queue_limits
*limits
)
1919 struct dm_table
*old_map
;
1920 struct request_queue
*q
= md
->queue
;
1923 lockdep_assert_held(&md
->suspend_lock
);
1925 size
= dm_table_get_size(t
);
1928 * Wipe any geometry if the size of the table changed.
1930 if (size
!= dm_get_size(md
))
1931 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
1933 __set_size(md
, size
);
1935 dm_table_event_callback(t
, event_callback
, md
);
1938 * The queue hasn't been stopped yet, if the old table type wasn't
1939 * for request-based during suspension. So stop it to prevent
1940 * I/O mapping before resume.
1941 * This must be done before setting the queue restrictions,
1942 * because request-based dm may be run just after the setting.
1944 if (dm_table_request_based(t
)) {
1947 * Leverage the fact that request-based DM targets are
1948 * immutable singletons and establish md->immutable_target
1949 * - used to optimize both dm_request_fn and dm_mq_queue_rq
1951 md
->immutable_target
= dm_table_get_immutable_target(t
);
1954 __bind_mempools(md
, t
);
1956 old_map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
1957 rcu_assign_pointer(md
->map
, (void *)t
);
1958 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
1960 dm_table_set_restrictions(t
, q
, limits
);
1968 * Returns unbound table for the caller to free.
1970 static struct dm_table
*__unbind(struct mapped_device
*md
)
1972 struct dm_table
*map
= rcu_dereference_protected(md
->map
, 1);
1977 dm_table_event_callback(map
, NULL
, NULL
);
1978 RCU_INIT_POINTER(md
->map
, NULL
);
1985 * Constructor for a new device.
1987 int dm_create(int minor
, struct mapped_device
**result
)
1989 struct mapped_device
*md
;
1991 md
= alloc_dev(minor
);
2002 * Functions to manage md->type.
2003 * All are required to hold md->type_lock.
2005 void dm_lock_md_type(struct mapped_device
*md
)
2007 mutex_lock(&md
->type_lock
);
2010 void dm_unlock_md_type(struct mapped_device
*md
)
2012 mutex_unlock(&md
->type_lock
);
2015 void dm_set_md_type(struct mapped_device
*md
, enum dm_queue_mode type
)
2017 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2021 enum dm_queue_mode
dm_get_md_type(struct mapped_device
*md
)
2026 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2028 return md
->immutable_target_type
;
2032 * The queue_limits are only valid as long as you have a reference
2035 struct queue_limits
*dm_get_queue_limits(struct mapped_device
*md
)
2037 BUG_ON(!atomic_read(&md
->holders
));
2038 return &md
->queue
->limits
;
2040 EXPORT_SYMBOL_GPL(dm_get_queue_limits
);
2043 * Setup the DM device's queue based on md's type
2045 int dm_setup_md_queue(struct mapped_device
*md
, struct dm_table
*t
)
2048 enum dm_queue_mode type
= dm_get_md_type(md
);
2051 case DM_TYPE_REQUEST_BASED
:
2052 r
= dm_old_init_request_queue(md
, t
);
2054 DMERR("Cannot initialize queue for request-based mapped device");
2058 case DM_TYPE_MQ_REQUEST_BASED
:
2059 r
= dm_mq_init_request_queue(md
, t
);
2061 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2065 case DM_TYPE_BIO_BASED
:
2066 case DM_TYPE_DAX_BIO_BASED
:
2067 dm_init_normal_md_queue(md
);
2068 blk_queue_make_request(md
->queue
, dm_make_request
);
2070 * DM handles splitting bios as needed. Free the bio_split bioset
2071 * since it won't be used (saves 1 process per bio-based DM device).
2073 bioset_free(md
->queue
->bio_split
);
2074 md
->queue
->bio_split
= NULL
;
2076 if (type
== DM_TYPE_DAX_BIO_BASED
)
2077 queue_flag_set_unlocked(QUEUE_FLAG_DAX
, md
->queue
);
2087 struct mapped_device
*dm_get_md(dev_t dev
)
2089 struct mapped_device
*md
;
2090 unsigned minor
= MINOR(dev
);
2092 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2095 spin_lock(&_minor_lock
);
2097 md
= idr_find(&_minor_idr
, minor
);
2099 if ((md
== MINOR_ALLOCED
||
2100 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2101 dm_deleting_md(md
) ||
2102 test_bit(DMF_FREEING
, &md
->flags
))) {
2110 spin_unlock(&_minor_lock
);
2114 EXPORT_SYMBOL_GPL(dm_get_md
);
2116 void *dm_get_mdptr(struct mapped_device
*md
)
2118 return md
->interface_ptr
;
2121 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2123 md
->interface_ptr
= ptr
;
2126 void dm_get(struct mapped_device
*md
)
2128 atomic_inc(&md
->holders
);
2129 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2132 int dm_hold(struct mapped_device
*md
)
2134 spin_lock(&_minor_lock
);
2135 if (test_bit(DMF_FREEING
, &md
->flags
)) {
2136 spin_unlock(&_minor_lock
);
2140 spin_unlock(&_minor_lock
);
2143 EXPORT_SYMBOL_GPL(dm_hold
);
2145 const char *dm_device_name(struct mapped_device
*md
)
2149 EXPORT_SYMBOL_GPL(dm_device_name
);
2151 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2153 struct request_queue
*q
= dm_get_md_queue(md
);
2154 struct dm_table
*map
;
2159 spin_lock(&_minor_lock
);
2160 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2161 set_bit(DMF_FREEING
, &md
->flags
);
2162 spin_unlock(&_minor_lock
);
2164 blk_set_queue_dying(q
);
2166 if (dm_request_based(md
) && md
->kworker_task
)
2167 kthread_flush_worker(&md
->kworker
);
2170 * Take suspend_lock so that presuspend and postsuspend methods
2171 * do not race with internal suspend.
2173 mutex_lock(&md
->suspend_lock
);
2174 map
= dm_get_live_table(md
, &srcu_idx
);
2175 if (!dm_suspended_md(md
)) {
2176 dm_table_presuspend_targets(map
);
2177 dm_table_postsuspend_targets(map
);
2179 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2180 dm_put_live_table(md
, srcu_idx
);
2181 mutex_unlock(&md
->suspend_lock
);
2184 * Rare, but there may be I/O requests still going to complete,
2185 * for example. Wait for all references to disappear.
2186 * No one should increment the reference count of the mapped_device,
2187 * after the mapped_device state becomes DMF_FREEING.
2190 while (atomic_read(&md
->holders
))
2192 else if (atomic_read(&md
->holders
))
2193 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2194 dm_device_name(md
), atomic_read(&md
->holders
));
2197 dm_table_destroy(__unbind(md
));
2201 void dm_destroy(struct mapped_device
*md
)
2203 __dm_destroy(md
, true);
2206 void dm_destroy_immediate(struct mapped_device
*md
)
2208 __dm_destroy(md
, false);
2211 void dm_put(struct mapped_device
*md
)
2213 atomic_dec(&md
->holders
);
2215 EXPORT_SYMBOL_GPL(dm_put
);
2217 static int dm_wait_for_completion(struct mapped_device
*md
, long task_state
)
2223 prepare_to_wait(&md
->wait
, &wait
, task_state
);
2225 if (!md_in_flight(md
))
2228 if (signal_pending_state(task_state
, current
)) {
2235 finish_wait(&md
->wait
, &wait
);
2241 * Process the deferred bios
2243 static void dm_wq_work(struct work_struct
*work
)
2245 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2249 struct dm_table
*map
;
2251 map
= dm_get_live_table(md
, &srcu_idx
);
2253 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2254 spin_lock_irq(&md
->deferred_lock
);
2255 c
= bio_list_pop(&md
->deferred
);
2256 spin_unlock_irq(&md
->deferred_lock
);
2261 if (dm_request_based(md
))
2262 generic_make_request(c
);
2264 __split_and_process_bio(md
, map
, c
);
2267 dm_put_live_table(md
, srcu_idx
);
2270 static void dm_queue_flush(struct mapped_device
*md
)
2272 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2273 smp_mb__after_atomic();
2274 queue_work(md
->wq
, &md
->work
);
2278 * Swap in a new table, returning the old one for the caller to destroy.
2280 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2282 struct dm_table
*live_map
= NULL
, *map
= ERR_PTR(-EINVAL
);
2283 struct queue_limits limits
;
2286 mutex_lock(&md
->suspend_lock
);
2288 /* device must be suspended */
2289 if (!dm_suspended_md(md
))
2293 * If the new table has no data devices, retain the existing limits.
2294 * This helps multipath with queue_if_no_path if all paths disappear,
2295 * then new I/O is queued based on these limits, and then some paths
2298 if (dm_table_has_no_data_devices(table
)) {
2299 live_map
= dm_get_live_table_fast(md
);
2301 limits
= md
->queue
->limits
;
2302 dm_put_live_table_fast(md
);
2306 r
= dm_calculate_queue_limits(table
, &limits
);
2313 map
= __bind(md
, table
, &limits
);
2316 mutex_unlock(&md
->suspend_lock
);
2321 * Functions to lock and unlock any filesystem running on the
2324 static int lock_fs(struct mapped_device
*md
)
2328 WARN_ON(md
->frozen_sb
);
2330 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2331 if (IS_ERR(md
->frozen_sb
)) {
2332 r
= PTR_ERR(md
->frozen_sb
);
2333 md
->frozen_sb
= NULL
;
2337 set_bit(DMF_FROZEN
, &md
->flags
);
2342 static void unlock_fs(struct mapped_device
*md
)
2344 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2347 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2348 md
->frozen_sb
= NULL
;
2349 clear_bit(DMF_FROZEN
, &md
->flags
);
2353 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2354 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2355 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2357 * If __dm_suspend returns 0, the device is completely quiescent
2358 * now. There is no request-processing activity. All new requests
2359 * are being added to md->deferred list.
2361 static int __dm_suspend(struct mapped_device
*md
, struct dm_table
*map
,
2362 unsigned suspend_flags
, long task_state
,
2363 int dmf_suspended_flag
)
2365 bool do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
;
2366 bool noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
;
2369 lockdep_assert_held(&md
->suspend_lock
);
2372 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2373 * This flag is cleared before dm_suspend returns.
2376 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2378 pr_debug("%s: suspending with flush\n", dm_device_name(md
));
2381 * This gets reverted if there's an error later and the targets
2382 * provide the .presuspend_undo hook.
2384 dm_table_presuspend_targets(map
);
2387 * Flush I/O to the device.
2388 * Any I/O submitted after lock_fs() may not be flushed.
2389 * noflush takes precedence over do_lockfs.
2390 * (lock_fs() flushes I/Os and waits for them to complete.)
2392 if (!noflush
&& do_lockfs
) {
2395 dm_table_presuspend_undo_targets(map
);
2401 * Here we must make sure that no processes are submitting requests
2402 * to target drivers i.e. no one may be executing
2403 * __split_and_process_bio. This is called from dm_request and
2406 * To get all processes out of __split_and_process_bio in dm_request,
2407 * we take the write lock. To prevent any process from reentering
2408 * __split_and_process_bio from dm_request and quiesce the thread
2409 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2410 * flush_workqueue(md->wq).
2412 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2414 synchronize_srcu(&md
->io_barrier
);
2417 * Stop md->queue before flushing md->wq in case request-based
2418 * dm defers requests to md->wq from md->queue.
2420 if (dm_request_based(md
)) {
2421 dm_stop_queue(md
->queue
);
2422 if (md
->kworker_task
)
2423 kthread_flush_worker(&md
->kworker
);
2426 flush_workqueue(md
->wq
);
2429 * At this point no more requests are entering target request routines.
2430 * We call dm_wait_for_completion to wait for all existing requests
2433 r
= dm_wait_for_completion(md
, task_state
);
2435 set_bit(dmf_suspended_flag
, &md
->flags
);
2438 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2440 synchronize_srcu(&md
->io_barrier
);
2442 /* were we interrupted ? */
2446 if (dm_request_based(md
))
2447 dm_start_queue(md
->queue
);
2450 dm_table_presuspend_undo_targets(map
);
2451 /* pushback list is already flushed, so skip flush */
2458 * We need to be able to change a mapping table under a mounted
2459 * filesystem. For example we might want to move some data in
2460 * the background. Before the table can be swapped with
2461 * dm_bind_table, dm_suspend must be called to flush any in
2462 * flight bios and ensure that any further io gets deferred.
2465 * Suspend mechanism in request-based dm.
2467 * 1. Flush all I/Os by lock_fs() if needed.
2468 * 2. Stop dispatching any I/O by stopping the request_queue.
2469 * 3. Wait for all in-flight I/Os to be completed or requeued.
2471 * To abort suspend, start the request_queue.
2473 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2475 struct dm_table
*map
= NULL
;
2479 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
2481 if (dm_suspended_md(md
)) {
2486 if (dm_suspended_internally_md(md
)) {
2487 /* already internally suspended, wait for internal resume */
2488 mutex_unlock(&md
->suspend_lock
);
2489 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
2495 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2497 r
= __dm_suspend(md
, map
, suspend_flags
, TASK_INTERRUPTIBLE
, DMF_SUSPENDED
);
2501 dm_table_postsuspend_targets(map
);
2504 mutex_unlock(&md
->suspend_lock
);
2508 static int __dm_resume(struct mapped_device
*md
, struct dm_table
*map
)
2511 int r
= dm_table_resume_targets(map
);
2519 * Flushing deferred I/Os must be done after targets are resumed
2520 * so that mapping of targets can work correctly.
2521 * Request-based dm is queueing the deferred I/Os in its request_queue.
2523 if (dm_request_based(md
))
2524 dm_start_queue(md
->queue
);
2531 int dm_resume(struct mapped_device
*md
)
2534 struct dm_table
*map
= NULL
;
2538 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
2540 if (!dm_suspended_md(md
))
2543 if (dm_suspended_internally_md(md
)) {
2544 /* already internally suspended, wait for internal resume */
2545 mutex_unlock(&md
->suspend_lock
);
2546 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
2552 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2553 if (!map
|| !dm_table_get_size(map
))
2556 r
= __dm_resume(md
, map
);
2560 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2562 mutex_unlock(&md
->suspend_lock
);
2568 * Internal suspend/resume works like userspace-driven suspend. It waits
2569 * until all bios finish and prevents issuing new bios to the target drivers.
2570 * It may be used only from the kernel.
2573 static void __dm_internal_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2575 struct dm_table
*map
= NULL
;
2577 lockdep_assert_held(&md
->suspend_lock
);
2579 if (md
->internal_suspend_count
++)
2580 return; /* nested internal suspend */
2582 if (dm_suspended_md(md
)) {
2583 set_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2584 return; /* nest suspend */
2587 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2590 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2591 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2592 * would require changing .presuspend to return an error -- avoid this
2593 * until there is a need for more elaborate variants of internal suspend.
2595 (void) __dm_suspend(md
, map
, suspend_flags
, TASK_UNINTERRUPTIBLE
,
2596 DMF_SUSPENDED_INTERNALLY
);
2598 dm_table_postsuspend_targets(map
);
2601 static void __dm_internal_resume(struct mapped_device
*md
)
2603 BUG_ON(!md
->internal_suspend_count
);
2605 if (--md
->internal_suspend_count
)
2606 return; /* resume from nested internal suspend */
2608 if (dm_suspended_md(md
))
2609 goto done
; /* resume from nested suspend */
2612 * NOTE: existing callers don't need to call dm_table_resume_targets
2613 * (which may fail -- so best to avoid it for now by passing NULL map)
2615 (void) __dm_resume(md
, NULL
);
2618 clear_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2619 smp_mb__after_atomic();
2620 wake_up_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
);
2623 void dm_internal_suspend_noflush(struct mapped_device
*md
)
2625 mutex_lock(&md
->suspend_lock
);
2626 __dm_internal_suspend(md
, DM_SUSPEND_NOFLUSH_FLAG
);
2627 mutex_unlock(&md
->suspend_lock
);
2629 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush
);
2631 void dm_internal_resume(struct mapped_device
*md
)
2633 mutex_lock(&md
->suspend_lock
);
2634 __dm_internal_resume(md
);
2635 mutex_unlock(&md
->suspend_lock
);
2637 EXPORT_SYMBOL_GPL(dm_internal_resume
);
2640 * Fast variants of internal suspend/resume hold md->suspend_lock,
2641 * which prevents interaction with userspace-driven suspend.
2644 void dm_internal_suspend_fast(struct mapped_device
*md
)
2646 mutex_lock(&md
->suspend_lock
);
2647 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
2650 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2651 synchronize_srcu(&md
->io_barrier
);
2652 flush_workqueue(md
->wq
);
2653 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2655 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast
);
2657 void dm_internal_resume_fast(struct mapped_device
*md
)
2659 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
2665 mutex_unlock(&md
->suspend_lock
);
2667 EXPORT_SYMBOL_GPL(dm_internal_resume_fast
);
2669 /*-----------------------------------------------------------------
2670 * Event notification.
2671 *---------------------------------------------------------------*/
2672 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2675 char udev_cookie
[DM_COOKIE_LENGTH
];
2676 char *envp
[] = { udev_cookie
, NULL
};
2679 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2681 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2682 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2683 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2688 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2690 return atomic_add_return(1, &md
->uevent_seq
);
2693 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2695 return atomic_read(&md
->event_nr
);
2698 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2700 return wait_event_interruptible(md
->eventq
,
2701 (event_nr
!= atomic_read(&md
->event_nr
)));
2704 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2706 unsigned long flags
;
2708 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2709 list_add(elist
, &md
->uevent_list
);
2710 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2714 * The gendisk is only valid as long as you have a reference
2717 struct gendisk
*dm_disk(struct mapped_device
*md
)
2721 EXPORT_SYMBOL_GPL(dm_disk
);
2723 struct kobject
*dm_kobject(struct mapped_device
*md
)
2725 return &md
->kobj_holder
.kobj
;
2728 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2730 struct mapped_device
*md
;
2732 md
= container_of(kobj
, struct mapped_device
, kobj_holder
.kobj
);
2734 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2742 int dm_suspended_md(struct mapped_device
*md
)
2744 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2747 int dm_suspended_internally_md(struct mapped_device
*md
)
2749 return test_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2752 int dm_test_deferred_remove_flag(struct mapped_device
*md
)
2754 return test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
2757 int dm_suspended(struct dm_target
*ti
)
2759 return dm_suspended_md(dm_table_get_md(ti
->table
));
2761 EXPORT_SYMBOL_GPL(dm_suspended
);
2763 int dm_noflush_suspending(struct dm_target
*ti
)
2765 return __noflush_suspending(dm_table_get_md(ti
->table
));
2767 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2769 struct dm_md_mempools
*dm_alloc_md_mempools(struct mapped_device
*md
, enum dm_queue_mode type
,
2770 unsigned integrity
, unsigned per_io_data_size
)
2772 struct dm_md_mempools
*pools
= kzalloc_node(sizeof(*pools
), GFP_KERNEL
, md
->numa_node_id
);
2773 unsigned int pool_size
= 0;
2774 unsigned int front_pad
;
2780 case DM_TYPE_BIO_BASED
:
2781 case DM_TYPE_DAX_BIO_BASED
:
2782 pool_size
= dm_get_reserved_bio_based_ios();
2783 front_pad
= roundup(per_io_data_size
, __alignof__(struct dm_target_io
)) + offsetof(struct dm_target_io
, clone
);
2785 pools
->io_pool
= mempool_create_slab_pool(pool_size
, _io_cache
);
2786 if (!pools
->io_pool
)
2789 case DM_TYPE_REQUEST_BASED
:
2790 case DM_TYPE_MQ_REQUEST_BASED
:
2791 pool_size
= dm_get_reserved_rq_based_ios();
2792 front_pad
= offsetof(struct dm_rq_clone_bio_info
, clone
);
2793 /* per_io_data_size is used for blk-mq pdu at queue allocation */
2799 pools
->bs
= bioset_create(pool_size
, front_pad
, BIOSET_NEED_RESCUER
);
2803 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
2809 dm_free_md_mempools(pools
);
2814 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2819 mempool_destroy(pools
->io_pool
);
2822 bioset_free(pools
->bs
);
2834 static int dm_call_pr(struct block_device
*bdev
, iterate_devices_callout_fn fn
,
2837 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2838 struct dm_table
*table
;
2839 struct dm_target
*ti
;
2840 int ret
= -ENOTTY
, srcu_idx
;
2842 table
= dm_get_live_table(md
, &srcu_idx
);
2843 if (!table
|| !dm_table_get_size(table
))
2846 /* We only support devices that have a single target */
2847 if (dm_table_get_num_targets(table
) != 1)
2849 ti
= dm_table_get_target(table
, 0);
2852 if (!ti
->type
->iterate_devices
)
2855 ret
= ti
->type
->iterate_devices(ti
, fn
, data
);
2857 dm_put_live_table(md
, srcu_idx
);
2862 * For register / unregister we need to manually call out to every path.
2864 static int __dm_pr_register(struct dm_target
*ti
, struct dm_dev
*dev
,
2865 sector_t start
, sector_t len
, void *data
)
2867 struct dm_pr
*pr
= data
;
2868 const struct pr_ops
*ops
= dev
->bdev
->bd_disk
->fops
->pr_ops
;
2870 if (!ops
|| !ops
->pr_register
)
2872 return ops
->pr_register(dev
->bdev
, pr
->old_key
, pr
->new_key
, pr
->flags
);
2875 static int dm_pr_register(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
2886 ret
= dm_call_pr(bdev
, __dm_pr_register
, &pr
);
2887 if (ret
&& new_key
) {
2888 /* unregister all paths if we failed to register any path */
2889 pr
.old_key
= new_key
;
2892 pr
.fail_early
= false;
2893 dm_call_pr(bdev
, __dm_pr_register
, &pr
);
2899 static int dm_pr_reserve(struct block_device
*bdev
, u64 key
, enum pr_type type
,
2902 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2903 const struct pr_ops
*ops
;
2907 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2911 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2912 if (ops
&& ops
->pr_reserve
)
2913 r
= ops
->pr_reserve(bdev
, key
, type
, flags
);
2921 static int dm_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
2923 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2924 const struct pr_ops
*ops
;
2928 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2932 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2933 if (ops
&& ops
->pr_release
)
2934 r
= ops
->pr_release(bdev
, key
, type
);
2942 static int dm_pr_preempt(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
2943 enum pr_type type
, bool abort
)
2945 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2946 const struct pr_ops
*ops
;
2950 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2954 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2955 if (ops
&& ops
->pr_preempt
)
2956 r
= ops
->pr_preempt(bdev
, old_key
, new_key
, type
, abort
);
2964 static int dm_pr_clear(struct block_device
*bdev
, u64 key
)
2966 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2967 const struct pr_ops
*ops
;
2971 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2975 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2976 if (ops
&& ops
->pr_clear
)
2977 r
= ops
->pr_clear(bdev
, key
);
2985 static const struct pr_ops dm_pr_ops
= {
2986 .pr_register
= dm_pr_register
,
2987 .pr_reserve
= dm_pr_reserve
,
2988 .pr_release
= dm_pr_release
,
2989 .pr_preempt
= dm_pr_preempt
,
2990 .pr_clear
= dm_pr_clear
,
2993 static const struct block_device_operations dm_blk_dops
= {
2994 .open
= dm_blk_open
,
2995 .release
= dm_blk_close
,
2996 .ioctl
= dm_blk_ioctl
,
2997 .getgeo
= dm_blk_getgeo
,
2998 .pr_ops
= &dm_pr_ops
,
2999 .owner
= THIS_MODULE
3002 static const struct dax_operations dm_dax_ops
= {
3003 .direct_access
= dm_dax_direct_access
,
3004 .copy_from_iter
= dm_dax_copy_from_iter
,
3005 .flush
= dm_dax_flush
,
3011 module_init(dm_init
);
3012 module_exit(dm_exit
);
3014 module_param(major
, uint
, 0);
3015 MODULE_PARM_DESC(major
, "The major number of the device mapper");
3017 module_param(reserved_bio_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3018 MODULE_PARM_DESC(reserved_bio_based_ios
, "Reserved IOs in bio-based mempools");
3020 module_param(dm_numa_node
, int, S_IRUGO
| S_IWUSR
);
3021 MODULE_PARM_DESC(dm_numa_node
, "NUMA node for DM device memory allocations");
3023 MODULE_DESCRIPTION(DM_NAME
" driver");
3024 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3025 MODULE_LICENSE("GPL");