2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include "dm-uevent.h"
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/sched/signal.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/mempool.h>
19 #include <linux/dax.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/uio.h>
23 #include <linux/hdreg.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
27 #include <linux/refcount.h>
29 #define DM_MSG_PREFIX "core"
32 * Cookies are numeric values sent with CHANGE and REMOVE
33 * uevents while resuming, removing or renaming the device.
35 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
36 #define DM_COOKIE_LENGTH 24
38 static const char *_name
= DM_NAME
;
40 static unsigned int major
= 0;
41 static unsigned int _major
= 0;
43 static DEFINE_IDR(_minor_idr
);
45 static DEFINE_SPINLOCK(_minor_lock
);
47 static void do_deferred_remove(struct work_struct
*w
);
49 static DECLARE_WORK(deferred_remove_work
, do_deferred_remove
);
51 static struct workqueue_struct
*deferred_remove_workqueue
;
53 atomic_t dm_global_event_nr
= ATOMIC_INIT(0);
54 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq
);
56 void dm_issue_global_event(void)
58 atomic_inc(&dm_global_event_nr
);
59 wake_up(&dm_global_eventq
);
63 * One of these is allocated per bio.
66 struct mapped_device
*md
;
70 unsigned long start_time
;
71 spinlock_t endio_lock
;
72 struct dm_stats_aux stats_aux
;
75 #define MINOR_ALLOCED ((void *)-1)
78 * Bits for the md->flags field.
80 #define DMF_BLOCK_IO_FOR_SUSPEND 0
81 #define DMF_SUSPENDED 1
84 #define DMF_DELETING 4
85 #define DMF_NOFLUSH_SUSPENDING 5
86 #define DMF_DEFERRED_REMOVE 6
87 #define DMF_SUSPENDED_INTERNALLY 7
89 #define DM_NUMA_NODE NUMA_NO_NODE
90 static int dm_numa_node
= DM_NUMA_NODE
;
93 * For mempools pre-allocation at the table loading time.
95 struct dm_md_mempools
{
100 struct table_device
{
101 struct list_head list
;
103 struct dm_dev dm_dev
;
106 static struct kmem_cache
*_io_cache
;
107 static struct kmem_cache
*_rq_tio_cache
;
108 static struct kmem_cache
*_rq_cache
;
111 * Bio-based DM's mempools' reserved IOs set by the user.
113 #define RESERVED_BIO_BASED_IOS 16
114 static unsigned reserved_bio_based_ios
= RESERVED_BIO_BASED_IOS
;
116 static int __dm_get_module_param_int(int *module_param
, int min
, int max
)
118 int param
= READ_ONCE(*module_param
);
119 int modified_param
= 0;
120 bool modified
= true;
123 modified_param
= min
;
124 else if (param
> max
)
125 modified_param
= max
;
130 (void)cmpxchg(module_param
, param
, modified_param
);
131 param
= modified_param
;
137 unsigned __dm_get_module_param(unsigned *module_param
,
138 unsigned def
, unsigned max
)
140 unsigned param
= READ_ONCE(*module_param
);
141 unsigned modified_param
= 0;
144 modified_param
= def
;
145 else if (param
> max
)
146 modified_param
= max
;
148 if (modified_param
) {
149 (void)cmpxchg(module_param
, param
, modified_param
);
150 param
= modified_param
;
156 unsigned dm_get_reserved_bio_based_ios(void)
158 return __dm_get_module_param(&reserved_bio_based_ios
,
159 RESERVED_BIO_BASED_IOS
, DM_RESERVED_MAX_IOS
);
161 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios
);
163 static unsigned dm_get_numa_node(void)
165 return __dm_get_module_param_int(&dm_numa_node
,
166 DM_NUMA_NODE
, num_online_nodes() - 1);
169 static int __init
local_init(void)
173 /* allocate a slab for the dm_ios */
174 _io_cache
= KMEM_CACHE(dm_io
, 0);
178 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
180 goto out_free_io_cache
;
182 _rq_cache
= kmem_cache_create("dm_old_clone_request", sizeof(struct request
),
183 __alignof__(struct request
), 0, NULL
);
185 goto out_free_rq_tio_cache
;
187 r
= dm_uevent_init();
189 goto out_free_rq_cache
;
191 deferred_remove_workqueue
= alloc_workqueue("kdmremove", WQ_UNBOUND
, 1);
192 if (!deferred_remove_workqueue
) {
194 goto out_uevent_exit
;
198 r
= register_blkdev(_major
, _name
);
200 goto out_free_workqueue
;
208 destroy_workqueue(deferred_remove_workqueue
);
212 kmem_cache_destroy(_rq_cache
);
213 out_free_rq_tio_cache
:
214 kmem_cache_destroy(_rq_tio_cache
);
216 kmem_cache_destroy(_io_cache
);
221 static void local_exit(void)
223 flush_scheduled_work();
224 destroy_workqueue(deferred_remove_workqueue
);
226 kmem_cache_destroy(_rq_cache
);
227 kmem_cache_destroy(_rq_tio_cache
);
228 kmem_cache_destroy(_io_cache
);
229 unregister_blkdev(_major
, _name
);
234 DMINFO("cleaned up");
237 static int (*_inits
[])(void) __initdata
= {
248 static void (*_exits
[])(void) = {
259 static int __init
dm_init(void)
261 const int count
= ARRAY_SIZE(_inits
);
265 for (i
= 0; i
< count
; i
++) {
280 static void __exit
dm_exit(void)
282 int i
= ARRAY_SIZE(_exits
);
288 * Should be empty by this point.
290 idr_destroy(&_minor_idr
);
294 * Block device functions
296 int dm_deleting_md(struct mapped_device
*md
)
298 return test_bit(DMF_DELETING
, &md
->flags
);
301 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
303 struct mapped_device
*md
;
305 spin_lock(&_minor_lock
);
307 md
= bdev
->bd_disk
->private_data
;
311 if (test_bit(DMF_FREEING
, &md
->flags
) ||
312 dm_deleting_md(md
)) {
318 atomic_inc(&md
->open_count
);
320 spin_unlock(&_minor_lock
);
322 return md
? 0 : -ENXIO
;
325 static void dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
327 struct mapped_device
*md
;
329 spin_lock(&_minor_lock
);
331 md
= disk
->private_data
;
335 if (atomic_dec_and_test(&md
->open_count
) &&
336 (test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
)))
337 queue_work(deferred_remove_workqueue
, &deferred_remove_work
);
341 spin_unlock(&_minor_lock
);
344 int dm_open_count(struct mapped_device
*md
)
346 return atomic_read(&md
->open_count
);
350 * Guarantees nothing is using the device before it's deleted.
352 int dm_lock_for_deletion(struct mapped_device
*md
, bool mark_deferred
, bool only_deferred
)
356 spin_lock(&_minor_lock
);
358 if (dm_open_count(md
)) {
361 set_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
362 } else if (only_deferred
&& !test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
))
365 set_bit(DMF_DELETING
, &md
->flags
);
367 spin_unlock(&_minor_lock
);
372 int dm_cancel_deferred_remove(struct mapped_device
*md
)
376 spin_lock(&_minor_lock
);
378 if (test_bit(DMF_DELETING
, &md
->flags
))
381 clear_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
383 spin_unlock(&_minor_lock
);
388 static void do_deferred_remove(struct work_struct
*w
)
390 dm_deferred_remove();
393 sector_t
dm_get_size(struct mapped_device
*md
)
395 return get_capacity(md
->disk
);
398 struct request_queue
*dm_get_md_queue(struct mapped_device
*md
)
403 struct dm_stats
*dm_get_stats(struct mapped_device
*md
)
408 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
410 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
412 return dm_get_geometry(md
, geo
);
415 static int dm_grab_bdev_for_ioctl(struct mapped_device
*md
,
416 struct block_device
**bdev
,
419 struct dm_target
*tgt
;
420 struct dm_table
*map
;
425 map
= dm_get_live_table(md
, &srcu_idx
);
426 if (!map
|| !dm_table_get_size(map
))
429 /* We only support devices that have a single target */
430 if (dm_table_get_num_targets(map
) != 1)
433 tgt
= dm_table_get_target(map
, 0);
434 if (!tgt
->type
->prepare_ioctl
)
437 if (dm_suspended_md(md
)) {
442 r
= tgt
->type
->prepare_ioctl(tgt
, bdev
, mode
);
447 dm_put_live_table(md
, srcu_idx
);
451 dm_put_live_table(md
, srcu_idx
);
452 if (r
== -ENOTCONN
&& !fatal_signal_pending(current
)) {
459 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
460 unsigned int cmd
, unsigned long arg
)
462 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
465 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
471 * Target determined this ioctl is being issued against a
472 * subset of the parent bdev; require extra privileges.
474 if (!capable(CAP_SYS_RAWIO
)) {
476 "%s: sending ioctl %x to DM device without required privilege.",
483 r
= __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
489 static struct dm_io
*alloc_io(struct mapped_device
*md
)
491 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
494 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
496 mempool_free(io
, md
->io_pool
);
499 static void free_tio(struct dm_target_io
*tio
)
501 bio_put(&tio
->clone
);
504 int md_in_flight(struct mapped_device
*md
)
506 return atomic_read(&md
->pending
[READ
]) +
507 atomic_read(&md
->pending
[WRITE
]);
510 static void start_io_acct(struct dm_io
*io
)
512 struct mapped_device
*md
= io
->md
;
513 struct bio
*bio
= io
->bio
;
515 int rw
= bio_data_dir(bio
);
517 io
->start_time
= jiffies
;
519 cpu
= part_stat_lock();
520 part_round_stats(md
->queue
, cpu
, &dm_disk(md
)->part0
);
522 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
523 atomic_inc_return(&md
->pending
[rw
]));
525 if (unlikely(dm_stats_used(&md
->stats
)))
526 dm_stats_account_io(&md
->stats
, bio_data_dir(bio
),
527 bio
->bi_iter
.bi_sector
, bio_sectors(bio
),
528 false, 0, &io
->stats_aux
);
531 static void end_io_acct(struct dm_io
*io
)
533 struct mapped_device
*md
= io
->md
;
534 struct bio
*bio
= io
->bio
;
535 unsigned long duration
= jiffies
- io
->start_time
;
537 int rw
= bio_data_dir(bio
);
539 generic_end_io_acct(md
->queue
, rw
, &dm_disk(md
)->part0
, io
->start_time
);
541 if (unlikely(dm_stats_used(&md
->stats
)))
542 dm_stats_account_io(&md
->stats
, bio_data_dir(bio
),
543 bio
->bi_iter
.bi_sector
, bio_sectors(bio
),
544 true, duration
, &io
->stats_aux
);
547 * After this is decremented the bio must not be touched if it is
550 pending
= atomic_dec_return(&md
->pending
[rw
]);
551 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
552 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
554 /* nudge anyone waiting on suspend queue */
560 * Add the bio to the list of deferred io.
562 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
566 spin_lock_irqsave(&md
->deferred_lock
, flags
);
567 bio_list_add(&md
->deferred
, bio
);
568 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
569 queue_work(md
->wq
, &md
->work
);
573 * Everyone (including functions in this file), should use this
574 * function to access the md->map field, and make sure they call
575 * dm_put_live_table() when finished.
577 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
) __acquires(md
->io_barrier
)
579 *srcu_idx
= srcu_read_lock(&md
->io_barrier
);
581 return srcu_dereference(md
->map
, &md
->io_barrier
);
584 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
) __releases(md
->io_barrier
)
586 srcu_read_unlock(&md
->io_barrier
, srcu_idx
);
589 void dm_sync_table(struct mapped_device
*md
)
591 synchronize_srcu(&md
->io_barrier
);
592 synchronize_rcu_expedited();
596 * A fast alternative to dm_get_live_table/dm_put_live_table.
597 * The caller must not block between these two functions.
599 static struct dm_table
*dm_get_live_table_fast(struct mapped_device
*md
) __acquires(RCU
)
602 return rcu_dereference(md
->map
);
605 static void dm_put_live_table_fast(struct mapped_device
*md
) __releases(RCU
)
611 * Open a table device so we can use it as a map destination.
613 static int open_table_device(struct table_device
*td
, dev_t dev
,
614 struct mapped_device
*md
)
616 static char *_claim_ptr
= "I belong to device-mapper";
617 struct block_device
*bdev
;
621 BUG_ON(td
->dm_dev
.bdev
);
623 bdev
= blkdev_get_by_dev(dev
, td
->dm_dev
.mode
| FMODE_EXCL
, _claim_ptr
);
625 return PTR_ERR(bdev
);
627 r
= bd_link_disk_holder(bdev
, dm_disk(md
));
629 blkdev_put(bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
633 td
->dm_dev
.bdev
= bdev
;
634 td
->dm_dev
.dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
639 * Close a table device that we've been using.
641 static void close_table_device(struct table_device
*td
, struct mapped_device
*md
)
643 if (!td
->dm_dev
.bdev
)
646 bd_unlink_disk_holder(td
->dm_dev
.bdev
, dm_disk(md
));
647 blkdev_put(td
->dm_dev
.bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
648 put_dax(td
->dm_dev
.dax_dev
);
649 td
->dm_dev
.bdev
= NULL
;
650 td
->dm_dev
.dax_dev
= NULL
;
653 static struct table_device
*find_table_device(struct list_head
*l
, dev_t dev
,
655 struct table_device
*td
;
657 list_for_each_entry(td
, l
, list
)
658 if (td
->dm_dev
.bdev
->bd_dev
== dev
&& td
->dm_dev
.mode
== mode
)
664 int dm_get_table_device(struct mapped_device
*md
, dev_t dev
, fmode_t mode
,
665 struct dm_dev
**result
) {
667 struct table_device
*td
;
669 mutex_lock(&md
->table_devices_lock
);
670 td
= find_table_device(&md
->table_devices
, dev
, mode
);
672 td
= kmalloc_node(sizeof(*td
), GFP_KERNEL
, md
->numa_node_id
);
674 mutex_unlock(&md
->table_devices_lock
);
678 td
->dm_dev
.mode
= mode
;
679 td
->dm_dev
.bdev
= NULL
;
681 if ((r
= open_table_device(td
, dev
, md
))) {
682 mutex_unlock(&md
->table_devices_lock
);
687 format_dev_t(td
->dm_dev
.name
, dev
);
689 refcount_set(&td
->count
, 1);
690 list_add(&td
->list
, &md
->table_devices
);
692 refcount_inc(&td
->count
);
694 mutex_unlock(&md
->table_devices_lock
);
696 *result
= &td
->dm_dev
;
699 EXPORT_SYMBOL_GPL(dm_get_table_device
);
701 void dm_put_table_device(struct mapped_device
*md
, struct dm_dev
*d
)
703 struct table_device
*td
= container_of(d
, struct table_device
, dm_dev
);
705 mutex_lock(&md
->table_devices_lock
);
706 if (refcount_dec_and_test(&td
->count
)) {
707 close_table_device(td
, md
);
711 mutex_unlock(&md
->table_devices_lock
);
713 EXPORT_SYMBOL(dm_put_table_device
);
715 static void free_table_devices(struct list_head
*devices
)
717 struct list_head
*tmp
, *next
;
719 list_for_each_safe(tmp
, next
, devices
) {
720 struct table_device
*td
= list_entry(tmp
, struct table_device
, list
);
722 DMWARN("dm_destroy: %s still exists with %d references",
723 td
->dm_dev
.name
, refcount_read(&td
->count
));
729 * Get the geometry associated with a dm device
731 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
739 * Set the geometry of a device.
741 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
743 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
745 if (geo
->start
> sz
) {
746 DMWARN("Start sector is beyond the geometry limits.");
755 /*-----------------------------------------------------------------
757 * A more elegant soln is in the works that uses the queue
758 * merge fn, unfortunately there are a couple of changes to
759 * the block layer that I want to make for this. So in the
760 * interests of getting something for people to use I give
761 * you this clearly demarcated crap.
762 *---------------------------------------------------------------*/
764 static int __noflush_suspending(struct mapped_device
*md
)
766 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
770 * Decrements the number of outstanding ios that a bio has been
771 * cloned into, completing the original io if necc.
773 static void dec_pending(struct dm_io
*io
, blk_status_t error
)
776 blk_status_t io_error
;
778 struct mapped_device
*md
= io
->md
;
780 /* Push-back supersedes any I/O errors */
781 if (unlikely(error
)) {
782 spin_lock_irqsave(&io
->endio_lock
, flags
);
783 if (!(io
->status
== BLK_STS_DM_REQUEUE
&&
784 __noflush_suspending(md
)))
786 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
789 if (atomic_dec_and_test(&io
->io_count
)) {
790 if (io
->status
== BLK_STS_DM_REQUEUE
) {
792 * Target requested pushing back the I/O.
794 spin_lock_irqsave(&md
->deferred_lock
, flags
);
795 if (__noflush_suspending(md
))
796 bio_list_add_head(&md
->deferred
, io
->bio
);
798 /* noflush suspend was interrupted. */
799 io
->status
= BLK_STS_IOERR
;
800 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
803 io_error
= io
->status
;
808 if (io_error
== BLK_STS_DM_REQUEUE
)
811 if ((bio
->bi_opf
& REQ_PREFLUSH
) && bio
->bi_iter
.bi_size
) {
813 * Preflush done for flush with data, reissue
814 * without REQ_PREFLUSH.
816 bio
->bi_opf
&= ~REQ_PREFLUSH
;
819 /* done with normal IO or empty flush */
821 bio
->bi_status
= io_error
;
827 void disable_write_same(struct mapped_device
*md
)
829 struct queue_limits
*limits
= dm_get_queue_limits(md
);
831 /* device doesn't really support WRITE SAME, disable it */
832 limits
->max_write_same_sectors
= 0;
835 void disable_write_zeroes(struct mapped_device
*md
)
837 struct queue_limits
*limits
= dm_get_queue_limits(md
);
839 /* device doesn't really support WRITE ZEROES, disable it */
840 limits
->max_write_zeroes_sectors
= 0;
843 static void clone_endio(struct bio
*bio
)
845 blk_status_t error
= bio
->bi_status
;
846 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
847 struct dm_io
*io
= tio
->io
;
848 struct mapped_device
*md
= tio
->io
->md
;
849 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
851 if (unlikely(error
== BLK_STS_TARGET
)) {
852 if (bio_op(bio
) == REQ_OP_WRITE_SAME
&&
853 !bio
->bi_disk
->queue
->limits
.max_write_same_sectors
)
854 disable_write_same(md
);
855 if (bio_op(bio
) == REQ_OP_WRITE_ZEROES
&&
856 !bio
->bi_disk
->queue
->limits
.max_write_zeroes_sectors
)
857 disable_write_zeroes(md
);
861 int r
= endio(tio
->ti
, bio
, &error
);
863 case DM_ENDIO_REQUEUE
:
864 error
= BLK_STS_DM_REQUEUE
;
868 case DM_ENDIO_INCOMPLETE
:
869 /* The target will handle the io */
872 DMWARN("unimplemented target endio return value: %d", r
);
878 dec_pending(io
, error
);
882 * Return maximum size of I/O possible at the supplied sector up to the current
885 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
887 sector_t target_offset
= dm_target_offset(ti
, sector
);
889 return ti
->len
- target_offset
;
892 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
894 sector_t len
= max_io_len_target_boundary(sector
, ti
);
895 sector_t offset
, max_len
;
898 * Does the target need to split even further?
900 if (ti
->max_io_len
) {
901 offset
= dm_target_offset(ti
, sector
);
902 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
903 max_len
= sector_div(offset
, ti
->max_io_len
);
905 max_len
= offset
& (ti
->max_io_len
- 1);
906 max_len
= ti
->max_io_len
- max_len
;
915 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
917 if (len
> UINT_MAX
) {
918 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
919 (unsigned long long)len
, UINT_MAX
);
920 ti
->error
= "Maximum size of target IO is too large";
924 ti
->max_io_len
= (uint32_t) len
;
928 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
930 static struct dm_target
*dm_dax_get_live_target(struct mapped_device
*md
,
931 sector_t sector
, int *srcu_idx
)
933 struct dm_table
*map
;
934 struct dm_target
*ti
;
936 map
= dm_get_live_table(md
, srcu_idx
);
940 ti
= dm_table_find_target(map
, sector
);
941 if (!dm_target_is_valid(ti
))
947 static long dm_dax_direct_access(struct dax_device
*dax_dev
, pgoff_t pgoff
,
948 long nr_pages
, void **kaddr
, pfn_t
*pfn
)
950 struct mapped_device
*md
= dax_get_private(dax_dev
);
951 sector_t sector
= pgoff
* PAGE_SECTORS
;
952 struct dm_target
*ti
;
953 long len
, ret
= -EIO
;
956 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
960 if (!ti
->type
->direct_access
)
962 len
= max_io_len(sector
, ti
) / PAGE_SECTORS
;
965 nr_pages
= min(len
, nr_pages
);
966 if (ti
->type
->direct_access
)
967 ret
= ti
->type
->direct_access(ti
, pgoff
, nr_pages
, kaddr
, pfn
);
970 dm_put_live_table(md
, srcu_idx
);
975 static size_t dm_dax_copy_from_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
,
976 void *addr
, size_t bytes
, struct iov_iter
*i
)
978 struct mapped_device
*md
= dax_get_private(dax_dev
);
979 sector_t sector
= pgoff
* PAGE_SECTORS
;
980 struct dm_target
*ti
;
984 ti
= dm_dax_get_live_target(md
, sector
, &srcu_idx
);
988 if (!ti
->type
->dax_copy_from_iter
) {
989 ret
= copy_from_iter(addr
, bytes
, i
);
992 ret
= ti
->type
->dax_copy_from_iter(ti
, pgoff
, addr
, bytes
, i
);
994 dm_put_live_table(md
, srcu_idx
);
1000 * A target may call dm_accept_partial_bio only from the map routine. It is
1001 * allowed for all bio types except REQ_PREFLUSH.
1003 * dm_accept_partial_bio informs the dm that the target only wants to process
1004 * additional n_sectors sectors of the bio and the rest of the data should be
1005 * sent in a next bio.
1007 * A diagram that explains the arithmetics:
1008 * +--------------------+---------------+-------+
1010 * +--------------------+---------------+-------+
1012 * <-------------- *tio->len_ptr --------------->
1013 * <------- bi_size ------->
1016 * Region 1 was already iterated over with bio_advance or similar function.
1017 * (it may be empty if the target doesn't use bio_advance)
1018 * Region 2 is the remaining bio size that the target wants to process.
1019 * (it may be empty if region 1 is non-empty, although there is no reason
1021 * The target requires that region 3 is to be sent in the next bio.
1023 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1024 * the partially processed part (the sum of regions 1+2) must be the same for all
1025 * copies of the bio.
1027 void dm_accept_partial_bio(struct bio
*bio
, unsigned n_sectors
)
1029 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1030 unsigned bi_size
= bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
;
1031 BUG_ON(bio
->bi_opf
& REQ_PREFLUSH
);
1032 BUG_ON(bi_size
> *tio
->len_ptr
);
1033 BUG_ON(n_sectors
> bi_size
);
1034 *tio
->len_ptr
-= bi_size
- n_sectors
;
1035 bio
->bi_iter
.bi_size
= n_sectors
<< SECTOR_SHIFT
;
1037 EXPORT_SYMBOL_GPL(dm_accept_partial_bio
);
1040 * The zone descriptors obtained with a zone report indicate
1041 * zone positions within the target device. The zone descriptors
1042 * must be remapped to match their position within the dm device.
1043 * A target may call dm_remap_zone_report after completion of a
1044 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
1045 * from the target device mapping to the dm device.
1047 void dm_remap_zone_report(struct dm_target
*ti
, struct bio
*bio
, sector_t start
)
1049 #ifdef CONFIG_BLK_DEV_ZONED
1050 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1051 struct bio
*report_bio
= tio
->io
->bio
;
1052 struct blk_zone_report_hdr
*hdr
= NULL
;
1053 struct blk_zone
*zone
;
1054 unsigned int nr_rep
= 0;
1056 struct bio_vec bvec
;
1057 struct bvec_iter iter
;
1064 * Remap the start sector of the reported zones. For sequential zones,
1065 * also remap the write pointer position.
1067 bio_for_each_segment(bvec
, report_bio
, iter
) {
1068 addr
= kmap_atomic(bvec
.bv_page
);
1070 /* Remember the report header in the first page */
1073 ofst
= sizeof(struct blk_zone_report_hdr
);
1077 /* Set zones start sector */
1078 while (hdr
->nr_zones
&& ofst
< bvec
.bv_len
) {
1080 if (zone
->start
>= start
+ ti
->len
) {
1084 zone
->start
= zone
->start
+ ti
->begin
- start
;
1085 if (zone
->type
!= BLK_ZONE_TYPE_CONVENTIONAL
) {
1086 if (zone
->cond
== BLK_ZONE_COND_FULL
)
1087 zone
->wp
= zone
->start
+ zone
->len
;
1088 else if (zone
->cond
== BLK_ZONE_COND_EMPTY
)
1089 zone
->wp
= zone
->start
;
1091 zone
->wp
= zone
->wp
+ ti
->begin
- start
;
1093 ofst
+= sizeof(struct blk_zone
);
1099 kunmap_atomic(addr
);
1106 hdr
->nr_zones
= nr_rep
;
1110 bio_advance(report_bio
, report_bio
->bi_iter
.bi_size
);
1112 #else /* !CONFIG_BLK_DEV_ZONED */
1113 bio
->bi_status
= BLK_STS_NOTSUPP
;
1116 EXPORT_SYMBOL_GPL(dm_remap_zone_report
);
1119 * Flush current->bio_list when the target map method blocks.
1120 * This fixes deadlocks in snapshot and possibly in other targets.
1123 struct blk_plug plug
;
1124 struct blk_plug_cb cb
;
1127 static void flush_current_bio_list(struct blk_plug_cb
*cb
, bool from_schedule
)
1129 struct dm_offload
*o
= container_of(cb
, struct dm_offload
, cb
);
1130 struct bio_list list
;
1134 INIT_LIST_HEAD(&o
->cb
.list
);
1136 if (unlikely(!current
->bio_list
))
1139 for (i
= 0; i
< 2; i
++) {
1140 list
= current
->bio_list
[i
];
1141 bio_list_init(¤t
->bio_list
[i
]);
1143 while ((bio
= bio_list_pop(&list
))) {
1144 struct bio_set
*bs
= bio
->bi_pool
;
1145 if (unlikely(!bs
) || bs
== fs_bio_set
||
1146 !bs
->rescue_workqueue
) {
1147 bio_list_add(¤t
->bio_list
[i
], bio
);
1151 spin_lock(&bs
->rescue_lock
);
1152 bio_list_add(&bs
->rescue_list
, bio
);
1153 queue_work(bs
->rescue_workqueue
, &bs
->rescue_work
);
1154 spin_unlock(&bs
->rescue_lock
);
1159 static void dm_offload_start(struct dm_offload
*o
)
1161 blk_start_plug(&o
->plug
);
1162 o
->cb
.callback
= flush_current_bio_list
;
1163 list_add(&o
->cb
.list
, ¤t
->plug
->cb_list
);
1166 static void dm_offload_end(struct dm_offload
*o
)
1168 list_del(&o
->cb
.list
);
1169 blk_finish_plug(&o
->plug
);
1172 static void __map_bio(struct dm_target_io
*tio
)
1176 struct dm_offload o
;
1177 struct bio
*clone
= &tio
->clone
;
1178 struct dm_target
*ti
= tio
->ti
;
1180 clone
->bi_end_io
= clone_endio
;
1183 * Map the clone. If r == 0 we don't need to do
1184 * anything, the target has assumed ownership of
1187 atomic_inc(&tio
->io
->io_count
);
1188 sector
= clone
->bi_iter
.bi_sector
;
1190 dm_offload_start(&o
);
1191 r
= ti
->type
->map(ti
, clone
);
1195 case DM_MAPIO_SUBMITTED
:
1197 case DM_MAPIO_REMAPPED
:
1198 /* the bio has been remapped so dispatch it */
1199 trace_block_bio_remap(clone
->bi_disk
->queue
, clone
,
1200 bio_dev(tio
->io
->bio
), sector
);
1201 generic_make_request(clone
);
1204 dec_pending(tio
->io
, BLK_STS_IOERR
);
1207 case DM_MAPIO_REQUEUE
:
1208 dec_pending(tio
->io
, BLK_STS_DM_REQUEUE
);
1212 DMWARN("unimplemented target map return value: %d", r
);
1218 struct mapped_device
*md
;
1219 struct dm_table
*map
;
1223 unsigned sector_count
;
1226 static void bio_setup_sector(struct bio
*bio
, sector_t sector
, unsigned len
)
1228 bio
->bi_iter
.bi_sector
= sector
;
1229 bio
->bi_iter
.bi_size
= to_bytes(len
);
1233 * Creates a bio that consists of range of complete bvecs.
1235 static int clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1236 sector_t sector
, unsigned len
)
1238 struct bio
*clone
= &tio
->clone
;
1240 __bio_clone_fast(clone
, bio
);
1242 if (unlikely(bio_integrity(bio
) != NULL
)) {
1245 if (unlikely(!dm_target_has_integrity(tio
->ti
->type
) &&
1246 !dm_target_passes_integrity(tio
->ti
->type
))) {
1247 DMWARN("%s: the target %s doesn't support integrity data.",
1248 dm_device_name(tio
->io
->md
),
1249 tio
->ti
->type
->name
);
1253 r
= bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1258 if (bio_op(bio
) != REQ_OP_ZONE_REPORT
)
1259 bio_advance(clone
, to_bytes(sector
- clone
->bi_iter
.bi_sector
));
1260 clone
->bi_iter
.bi_size
= to_bytes(len
);
1262 if (unlikely(bio_integrity(bio
) != NULL
))
1263 bio_integrity_trim(clone
);
1268 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1269 struct dm_target
*ti
,
1270 unsigned target_bio_nr
)
1272 struct dm_target_io
*tio
;
1275 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1276 tio
= container_of(clone
, struct dm_target_io
, clone
);
1280 tio
->target_bio_nr
= target_bio_nr
;
1285 static void __clone_and_map_simple_bio(struct clone_info
*ci
,
1286 struct dm_target
*ti
,
1287 unsigned target_bio_nr
, unsigned *len
)
1289 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1290 struct bio
*clone
= &tio
->clone
;
1294 __bio_clone_fast(clone
, ci
->bio
);
1296 bio_setup_sector(clone
, ci
->sector
, *len
);
1301 static void __send_duplicate_bios(struct clone_info
*ci
, struct dm_target
*ti
,
1302 unsigned num_bios
, unsigned *len
)
1304 unsigned target_bio_nr
;
1306 for (target_bio_nr
= 0; target_bio_nr
< num_bios
; target_bio_nr
++)
1307 __clone_and_map_simple_bio(ci
, ti
, target_bio_nr
, len
);
1310 static int __send_empty_flush(struct clone_info
*ci
)
1312 unsigned target_nr
= 0;
1313 struct dm_target
*ti
;
1315 BUG_ON(bio_has_data(ci
->bio
));
1316 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1317 __send_duplicate_bios(ci
, ti
, ti
->num_flush_bios
, NULL
);
1322 static int __clone_and_map_data_bio(struct clone_info
*ci
, struct dm_target
*ti
,
1323 sector_t sector
, unsigned *len
)
1325 struct bio
*bio
= ci
->bio
;
1326 struct dm_target_io
*tio
;
1327 unsigned target_bio_nr
;
1328 unsigned num_target_bios
= 1;
1332 * Does the target want to receive duplicate copies of the bio?
1334 if (bio_data_dir(bio
) == WRITE
&& ti
->num_write_bios
)
1335 num_target_bios
= ti
->num_write_bios(ti
, bio
);
1337 for (target_bio_nr
= 0; target_bio_nr
< num_target_bios
; target_bio_nr
++) {
1338 tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1340 r
= clone_bio(tio
, bio
, sector
, *len
);
1351 typedef unsigned (*get_num_bios_fn
)(struct dm_target
*ti
);
1353 static unsigned get_num_discard_bios(struct dm_target
*ti
)
1355 return ti
->num_discard_bios
;
1358 static unsigned get_num_write_same_bios(struct dm_target
*ti
)
1360 return ti
->num_write_same_bios
;
1363 static unsigned get_num_write_zeroes_bios(struct dm_target
*ti
)
1365 return ti
->num_write_zeroes_bios
;
1368 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1370 static bool is_split_required_for_discard(struct dm_target
*ti
)
1372 return ti
->split_discard_bios
;
1375 static int __send_changing_extent_only(struct clone_info
*ci
,
1376 get_num_bios_fn get_num_bios
,
1377 is_split_required_fn is_split_required
)
1379 struct dm_target
*ti
;
1384 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1385 if (!dm_target_is_valid(ti
))
1389 * Even though the device advertised support for this type of
1390 * request, that does not mean every target supports it, and
1391 * reconfiguration might also have changed that since the
1392 * check was performed.
1394 num_bios
= get_num_bios
? get_num_bios(ti
) : 0;
1398 if (is_split_required
&& !is_split_required(ti
))
1399 len
= min((sector_t
)ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1401 len
= min((sector_t
)ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1403 __send_duplicate_bios(ci
, ti
, num_bios
, &len
);
1406 } while (ci
->sector_count
-= len
);
1411 static int __send_discard(struct clone_info
*ci
)
1413 return __send_changing_extent_only(ci
, get_num_discard_bios
,
1414 is_split_required_for_discard
);
1417 static int __send_write_same(struct clone_info
*ci
)
1419 return __send_changing_extent_only(ci
, get_num_write_same_bios
, NULL
);
1422 static int __send_write_zeroes(struct clone_info
*ci
)
1424 return __send_changing_extent_only(ci
, get_num_write_zeroes_bios
, NULL
);
1428 * Select the correct strategy for processing a non-flush bio.
1430 static int __split_and_process_non_flush(struct clone_info
*ci
)
1432 struct bio
*bio
= ci
->bio
;
1433 struct dm_target
*ti
;
1437 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
))
1438 return __send_discard(ci
);
1439 else if (unlikely(bio_op(bio
) == REQ_OP_WRITE_SAME
))
1440 return __send_write_same(ci
);
1441 else if (unlikely(bio_op(bio
) == REQ_OP_WRITE_ZEROES
))
1442 return __send_write_zeroes(ci
);
1444 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1445 if (!dm_target_is_valid(ti
))
1448 if (bio_op(bio
) == REQ_OP_ZONE_REPORT
)
1449 len
= ci
->sector_count
;
1451 len
= min_t(sector_t
, max_io_len(ci
->sector
, ti
),
1454 r
= __clone_and_map_data_bio(ci
, ti
, ci
->sector
, &len
);
1459 ci
->sector_count
-= len
;
1465 * Entry point to split a bio into clones and submit them to the targets.
1467 static void __split_and_process_bio(struct mapped_device
*md
,
1468 struct dm_table
*map
, struct bio
*bio
)
1470 struct clone_info ci
;
1473 if (unlikely(!map
)) {
1480 ci
.io
= alloc_io(md
);
1482 atomic_set(&ci
.io
->io_count
, 1);
1485 spin_lock_init(&ci
.io
->endio_lock
);
1486 ci
.sector
= bio
->bi_iter
.bi_sector
;
1488 start_io_acct(ci
.io
);
1490 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1491 ci
.bio
= &ci
.md
->flush_bio
;
1492 ci
.sector_count
= 0;
1493 error
= __send_empty_flush(&ci
);
1494 /* dec_pending submits any data associated with flush */
1495 } else if (bio_op(bio
) == REQ_OP_ZONE_RESET
) {
1497 ci
.sector_count
= 0;
1498 error
= __split_and_process_non_flush(&ci
);
1501 ci
.sector_count
= bio_sectors(bio
);
1502 while (ci
.sector_count
&& !error
) {
1503 error
= __split_and_process_non_flush(&ci
);
1504 if (current
->bio_list
&& ci
.sector_count
&& !error
) {
1506 * Remainder must be passed to generic_make_request()
1507 * so that it gets handled *after* bios already submitted
1508 * have been completely processed.
1509 * We take a clone of the original to store in
1510 * ci.io->bio to be used by end_io_acct() and
1511 * for dec_pending to use for completion handling.
1512 * As this path is not used for REQ_OP_ZONE_REPORT,
1513 * the usage of io->bio in dm_remap_zone_report()
1514 * won't be affected by this reassignment.
1516 struct bio
*b
= bio_clone_bioset(bio
, GFP_NOIO
,
1517 md
->queue
->bio_split
);
1519 bio_advance(bio
, (bio_sectors(bio
) - ci
.sector_count
) << 9);
1521 generic_make_request(bio
);
1527 /* drop the extra reference count */
1528 dec_pending(ci
.io
, errno_to_blk_status(error
));
1530 /*-----------------------------------------------------------------
1532 *---------------------------------------------------------------*/
1535 * The request function that remaps the bio to one target and
1536 * splits off any remainder.
1538 static blk_qc_t
dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1540 int rw
= bio_data_dir(bio
);
1541 struct mapped_device
*md
= q
->queuedata
;
1543 struct dm_table
*map
;
1545 map
= dm_get_live_table(md
, &srcu_idx
);
1547 generic_start_io_acct(q
, rw
, bio_sectors(bio
), &dm_disk(md
)->part0
);
1549 /* if we're suspended, we have to queue this io for later */
1550 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1551 dm_put_live_table(md
, srcu_idx
);
1553 if (!(bio
->bi_opf
& REQ_RAHEAD
))
1557 return BLK_QC_T_NONE
;
1560 __split_and_process_bio(md
, map
, bio
);
1561 dm_put_live_table(md
, srcu_idx
);
1562 return BLK_QC_T_NONE
;
1565 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1568 struct mapped_device
*md
= congested_data
;
1569 struct dm_table
*map
;
1571 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1572 if (dm_request_based(md
)) {
1574 * With request-based DM we only need to check the
1575 * top-level queue for congestion.
1577 r
= md
->queue
->backing_dev_info
->wb
.state
& bdi_bits
;
1579 map
= dm_get_live_table_fast(md
);
1581 r
= dm_table_any_congested(map
, bdi_bits
);
1582 dm_put_live_table_fast(md
);
1589 /*-----------------------------------------------------------------
1590 * An IDR is used to keep track of allocated minor numbers.
1591 *---------------------------------------------------------------*/
1592 static void free_minor(int minor
)
1594 spin_lock(&_minor_lock
);
1595 idr_remove(&_minor_idr
, minor
);
1596 spin_unlock(&_minor_lock
);
1600 * See if the device with a specific minor # is free.
1602 static int specific_minor(int minor
)
1606 if (minor
>= (1 << MINORBITS
))
1609 idr_preload(GFP_KERNEL
);
1610 spin_lock(&_minor_lock
);
1612 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
1614 spin_unlock(&_minor_lock
);
1617 return r
== -ENOSPC
? -EBUSY
: r
;
1621 static int next_free_minor(int *minor
)
1625 idr_preload(GFP_KERNEL
);
1626 spin_lock(&_minor_lock
);
1628 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
1630 spin_unlock(&_minor_lock
);
1638 static const struct block_device_operations dm_blk_dops
;
1639 static const struct dax_operations dm_dax_ops
;
1641 static void dm_wq_work(struct work_struct
*work
);
1643 void dm_init_md_queue(struct mapped_device
*md
)
1646 * Initialize data that will only be used by a non-blk-mq DM queue
1647 * - must do so here (in alloc_dev callchain) before queue is used
1649 md
->queue
->queuedata
= md
;
1650 md
->queue
->backing_dev_info
->congested_data
= md
;
1653 void dm_init_normal_md_queue(struct mapped_device
*md
)
1655 md
->use_blk_mq
= false;
1656 dm_init_md_queue(md
);
1659 * Initialize aspects of queue that aren't relevant for blk-mq
1661 md
->queue
->backing_dev_info
->congested_fn
= dm_any_congested
;
1664 static void cleanup_mapped_device(struct mapped_device
*md
)
1667 destroy_workqueue(md
->wq
);
1668 if (md
->kworker_task
)
1669 kthread_stop(md
->kworker_task
);
1670 mempool_destroy(md
->io_pool
);
1672 bioset_free(md
->bs
);
1675 kill_dax(md
->dax_dev
);
1676 put_dax(md
->dax_dev
);
1681 spin_lock(&_minor_lock
);
1682 md
->disk
->private_data
= NULL
;
1683 spin_unlock(&_minor_lock
);
1684 del_gendisk(md
->disk
);
1689 blk_cleanup_queue(md
->queue
);
1691 cleanup_srcu_struct(&md
->io_barrier
);
1698 dm_mq_cleanup_mapped_device(md
);
1702 * Allocate and initialise a blank device with a given minor.
1704 static struct mapped_device
*alloc_dev(int minor
)
1706 int r
, numa_node_id
= dm_get_numa_node();
1707 struct dax_device
*dax_dev
;
1708 struct mapped_device
*md
;
1711 md
= kvzalloc_node(sizeof(*md
), GFP_KERNEL
, numa_node_id
);
1713 DMWARN("unable to allocate device, out of memory.");
1717 if (!try_module_get(THIS_MODULE
))
1718 goto bad_module_get
;
1720 /* get a minor number for the dev */
1721 if (minor
== DM_ANY_MINOR
)
1722 r
= next_free_minor(&minor
);
1724 r
= specific_minor(minor
);
1728 r
= init_srcu_struct(&md
->io_barrier
);
1730 goto bad_io_barrier
;
1732 md
->numa_node_id
= numa_node_id
;
1733 md
->use_blk_mq
= dm_use_blk_mq_default();
1734 md
->init_tio_pdu
= false;
1735 md
->type
= DM_TYPE_NONE
;
1736 mutex_init(&md
->suspend_lock
);
1737 mutex_init(&md
->type_lock
);
1738 mutex_init(&md
->table_devices_lock
);
1739 spin_lock_init(&md
->deferred_lock
);
1740 atomic_set(&md
->holders
, 1);
1741 atomic_set(&md
->open_count
, 0);
1742 atomic_set(&md
->event_nr
, 0);
1743 atomic_set(&md
->uevent_seq
, 0);
1744 INIT_LIST_HEAD(&md
->uevent_list
);
1745 INIT_LIST_HEAD(&md
->table_devices
);
1746 spin_lock_init(&md
->uevent_lock
);
1748 md
->queue
= blk_alloc_queue_node(GFP_KERNEL
, numa_node_id
);
1752 dm_init_md_queue(md
);
1754 md
->disk
= alloc_disk_node(1, numa_node_id
);
1758 atomic_set(&md
->pending
[0], 0);
1759 atomic_set(&md
->pending
[1], 0);
1760 init_waitqueue_head(&md
->wait
);
1761 INIT_WORK(&md
->work
, dm_wq_work
);
1762 init_waitqueue_head(&md
->eventq
);
1763 init_completion(&md
->kobj_holder
.completion
);
1764 md
->kworker_task
= NULL
;
1766 md
->disk
->major
= _major
;
1767 md
->disk
->first_minor
= minor
;
1768 md
->disk
->fops
= &dm_blk_dops
;
1769 md
->disk
->queue
= md
->queue
;
1770 md
->disk
->private_data
= md
;
1771 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1773 dax_dev
= alloc_dax(md
, md
->disk
->disk_name
, &dm_dax_ops
);
1776 md
->dax_dev
= dax_dev
;
1779 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1781 md
->wq
= alloc_workqueue("kdmflush", WQ_MEM_RECLAIM
, 0);
1785 md
->bdev
= bdget_disk(md
->disk
, 0);
1789 bio_init(&md
->flush_bio
, NULL
, 0);
1790 bio_set_dev(&md
->flush_bio
, md
->bdev
);
1791 md
->flush_bio
.bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
| REQ_SYNC
;
1793 dm_stats_init(&md
->stats
);
1795 /* Populate the mapping, nobody knows we exist yet */
1796 spin_lock(&_minor_lock
);
1797 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1798 spin_unlock(&_minor_lock
);
1800 BUG_ON(old_md
!= MINOR_ALLOCED
);
1805 cleanup_mapped_device(md
);
1809 module_put(THIS_MODULE
);
1815 static void unlock_fs(struct mapped_device
*md
);
1817 static void free_dev(struct mapped_device
*md
)
1819 int minor
= MINOR(disk_devt(md
->disk
));
1823 cleanup_mapped_device(md
);
1825 free_table_devices(&md
->table_devices
);
1826 dm_stats_cleanup(&md
->stats
);
1829 module_put(THIS_MODULE
);
1833 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
1835 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
1838 /* The md already has necessary mempools. */
1839 if (dm_table_bio_based(t
)) {
1841 * Reload bioset because front_pad may have changed
1842 * because a different table was loaded.
1844 bioset_free(md
->bs
);
1849 * There's no need to reload with request-based dm
1850 * because the size of front_pad doesn't change.
1851 * Note for future: If you are to reload bioset,
1852 * prep-ed requests in the queue may refer
1853 * to bio from the old bioset, so you must walk
1854 * through the queue to unprep.
1859 BUG_ON(!p
|| md
->io_pool
|| md
->bs
);
1861 md
->io_pool
= p
->io_pool
;
1867 /* mempool bind completed, no longer need any mempools in the table */
1868 dm_table_free_md_mempools(t
);
1872 * Bind a table to the device.
1874 static void event_callback(void *context
)
1876 unsigned long flags
;
1878 struct mapped_device
*md
= (struct mapped_device
*) context
;
1880 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1881 list_splice_init(&md
->uevent_list
, &uevents
);
1882 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1884 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
1886 atomic_inc(&md
->event_nr
);
1887 wake_up(&md
->eventq
);
1888 dm_issue_global_event();
1892 * Protected by md->suspend_lock obtained by dm_swap_table().
1894 static void __set_size(struct mapped_device
*md
, sector_t size
)
1896 lockdep_assert_held(&md
->suspend_lock
);
1898 set_capacity(md
->disk
, size
);
1900 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1904 * Returns old map, which caller must destroy.
1906 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
1907 struct queue_limits
*limits
)
1909 struct dm_table
*old_map
;
1910 struct request_queue
*q
= md
->queue
;
1913 lockdep_assert_held(&md
->suspend_lock
);
1915 size
= dm_table_get_size(t
);
1918 * Wipe any geometry if the size of the table changed.
1920 if (size
!= dm_get_size(md
))
1921 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
1923 __set_size(md
, size
);
1925 dm_table_event_callback(t
, event_callback
, md
);
1928 * The queue hasn't been stopped yet, if the old table type wasn't
1929 * for request-based during suspension. So stop it to prevent
1930 * I/O mapping before resume.
1931 * This must be done before setting the queue restrictions,
1932 * because request-based dm may be run just after the setting.
1934 if (dm_table_request_based(t
)) {
1937 * Leverage the fact that request-based DM targets are
1938 * immutable singletons and establish md->immutable_target
1939 * - used to optimize both dm_request_fn and dm_mq_queue_rq
1941 md
->immutable_target
= dm_table_get_immutable_target(t
);
1944 __bind_mempools(md
, t
);
1946 old_map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
1947 rcu_assign_pointer(md
->map
, (void *)t
);
1948 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
1950 dm_table_set_restrictions(t
, q
, limits
);
1958 * Returns unbound table for the caller to free.
1960 static struct dm_table
*__unbind(struct mapped_device
*md
)
1962 struct dm_table
*map
= rcu_dereference_protected(md
->map
, 1);
1967 dm_table_event_callback(map
, NULL
, NULL
);
1968 RCU_INIT_POINTER(md
->map
, NULL
);
1975 * Constructor for a new device.
1977 int dm_create(int minor
, struct mapped_device
**result
)
1979 struct mapped_device
*md
;
1981 md
= alloc_dev(minor
);
1992 * Functions to manage md->type.
1993 * All are required to hold md->type_lock.
1995 void dm_lock_md_type(struct mapped_device
*md
)
1997 mutex_lock(&md
->type_lock
);
2000 void dm_unlock_md_type(struct mapped_device
*md
)
2002 mutex_unlock(&md
->type_lock
);
2005 void dm_set_md_type(struct mapped_device
*md
, enum dm_queue_mode type
)
2007 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2011 enum dm_queue_mode
dm_get_md_type(struct mapped_device
*md
)
2016 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2018 return md
->immutable_target_type
;
2022 * The queue_limits are only valid as long as you have a reference
2025 struct queue_limits
*dm_get_queue_limits(struct mapped_device
*md
)
2027 BUG_ON(!atomic_read(&md
->holders
));
2028 return &md
->queue
->limits
;
2030 EXPORT_SYMBOL_GPL(dm_get_queue_limits
);
2033 * Setup the DM device's queue based on md's type
2035 int dm_setup_md_queue(struct mapped_device
*md
, struct dm_table
*t
)
2038 enum dm_queue_mode type
= dm_get_md_type(md
);
2041 case DM_TYPE_REQUEST_BASED
:
2042 r
= dm_old_init_request_queue(md
, t
);
2044 DMERR("Cannot initialize queue for request-based mapped device");
2048 case DM_TYPE_MQ_REQUEST_BASED
:
2049 r
= dm_mq_init_request_queue(md
, t
);
2051 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2055 case DM_TYPE_BIO_BASED
:
2056 case DM_TYPE_DAX_BIO_BASED
:
2057 dm_init_normal_md_queue(md
);
2058 blk_queue_make_request(md
->queue
, dm_make_request
);
2060 if (type
== DM_TYPE_DAX_BIO_BASED
)
2061 queue_flag_set_unlocked(QUEUE_FLAG_DAX
, md
->queue
);
2071 struct mapped_device
*dm_get_md(dev_t dev
)
2073 struct mapped_device
*md
;
2074 unsigned minor
= MINOR(dev
);
2076 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2079 spin_lock(&_minor_lock
);
2081 md
= idr_find(&_minor_idr
, minor
);
2082 if (!md
|| md
== MINOR_ALLOCED
|| (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2083 test_bit(DMF_FREEING
, &md
->flags
) || dm_deleting_md(md
)) {
2089 spin_unlock(&_minor_lock
);
2093 EXPORT_SYMBOL_GPL(dm_get_md
);
2095 void *dm_get_mdptr(struct mapped_device
*md
)
2097 return md
->interface_ptr
;
2100 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2102 md
->interface_ptr
= ptr
;
2105 void dm_get(struct mapped_device
*md
)
2107 atomic_inc(&md
->holders
);
2108 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2111 int dm_hold(struct mapped_device
*md
)
2113 spin_lock(&_minor_lock
);
2114 if (test_bit(DMF_FREEING
, &md
->flags
)) {
2115 spin_unlock(&_minor_lock
);
2119 spin_unlock(&_minor_lock
);
2122 EXPORT_SYMBOL_GPL(dm_hold
);
2124 const char *dm_device_name(struct mapped_device
*md
)
2128 EXPORT_SYMBOL_GPL(dm_device_name
);
2130 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2132 struct request_queue
*q
= dm_get_md_queue(md
);
2133 struct dm_table
*map
;
2138 spin_lock(&_minor_lock
);
2139 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2140 set_bit(DMF_FREEING
, &md
->flags
);
2141 spin_unlock(&_minor_lock
);
2143 blk_set_queue_dying(q
);
2145 if (dm_request_based(md
) && md
->kworker_task
)
2146 kthread_flush_worker(&md
->kworker
);
2149 * Take suspend_lock so that presuspend and postsuspend methods
2150 * do not race with internal suspend.
2152 mutex_lock(&md
->suspend_lock
);
2153 map
= dm_get_live_table(md
, &srcu_idx
);
2154 if (!dm_suspended_md(md
)) {
2155 dm_table_presuspend_targets(map
);
2156 dm_table_postsuspend_targets(map
);
2158 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2159 dm_put_live_table(md
, srcu_idx
);
2160 mutex_unlock(&md
->suspend_lock
);
2163 * Rare, but there may be I/O requests still going to complete,
2164 * for example. Wait for all references to disappear.
2165 * No one should increment the reference count of the mapped_device,
2166 * after the mapped_device state becomes DMF_FREEING.
2169 while (atomic_read(&md
->holders
))
2171 else if (atomic_read(&md
->holders
))
2172 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2173 dm_device_name(md
), atomic_read(&md
->holders
));
2176 dm_table_destroy(__unbind(md
));
2180 void dm_destroy(struct mapped_device
*md
)
2182 __dm_destroy(md
, true);
2185 void dm_destroy_immediate(struct mapped_device
*md
)
2187 __dm_destroy(md
, false);
2190 void dm_put(struct mapped_device
*md
)
2192 atomic_dec(&md
->holders
);
2194 EXPORT_SYMBOL_GPL(dm_put
);
2196 static int dm_wait_for_completion(struct mapped_device
*md
, long task_state
)
2202 prepare_to_wait(&md
->wait
, &wait
, task_state
);
2204 if (!md_in_flight(md
))
2207 if (signal_pending_state(task_state
, current
)) {
2214 finish_wait(&md
->wait
, &wait
);
2220 * Process the deferred bios
2222 static void dm_wq_work(struct work_struct
*work
)
2224 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2228 struct dm_table
*map
;
2230 map
= dm_get_live_table(md
, &srcu_idx
);
2232 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2233 spin_lock_irq(&md
->deferred_lock
);
2234 c
= bio_list_pop(&md
->deferred
);
2235 spin_unlock_irq(&md
->deferred_lock
);
2240 if (dm_request_based(md
))
2241 generic_make_request(c
);
2243 __split_and_process_bio(md
, map
, c
);
2246 dm_put_live_table(md
, srcu_idx
);
2249 static void dm_queue_flush(struct mapped_device
*md
)
2251 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2252 smp_mb__after_atomic();
2253 queue_work(md
->wq
, &md
->work
);
2257 * Swap in a new table, returning the old one for the caller to destroy.
2259 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2261 struct dm_table
*live_map
= NULL
, *map
= ERR_PTR(-EINVAL
);
2262 struct queue_limits limits
;
2265 mutex_lock(&md
->suspend_lock
);
2267 /* device must be suspended */
2268 if (!dm_suspended_md(md
))
2272 * If the new table has no data devices, retain the existing limits.
2273 * This helps multipath with queue_if_no_path if all paths disappear,
2274 * then new I/O is queued based on these limits, and then some paths
2277 if (dm_table_has_no_data_devices(table
)) {
2278 live_map
= dm_get_live_table_fast(md
);
2280 limits
= md
->queue
->limits
;
2281 dm_put_live_table_fast(md
);
2285 r
= dm_calculate_queue_limits(table
, &limits
);
2292 map
= __bind(md
, table
, &limits
);
2293 dm_issue_global_event();
2296 mutex_unlock(&md
->suspend_lock
);
2301 * Functions to lock and unlock any filesystem running on the
2304 static int lock_fs(struct mapped_device
*md
)
2308 WARN_ON(md
->frozen_sb
);
2310 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2311 if (IS_ERR(md
->frozen_sb
)) {
2312 r
= PTR_ERR(md
->frozen_sb
);
2313 md
->frozen_sb
= NULL
;
2317 set_bit(DMF_FROZEN
, &md
->flags
);
2322 static void unlock_fs(struct mapped_device
*md
)
2324 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2327 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2328 md
->frozen_sb
= NULL
;
2329 clear_bit(DMF_FROZEN
, &md
->flags
);
2333 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2334 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2335 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2337 * If __dm_suspend returns 0, the device is completely quiescent
2338 * now. There is no request-processing activity. All new requests
2339 * are being added to md->deferred list.
2341 static int __dm_suspend(struct mapped_device
*md
, struct dm_table
*map
,
2342 unsigned suspend_flags
, long task_state
,
2343 int dmf_suspended_flag
)
2345 bool do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
;
2346 bool noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
;
2349 lockdep_assert_held(&md
->suspend_lock
);
2352 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2353 * This flag is cleared before dm_suspend returns.
2356 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2358 pr_debug("%s: suspending with flush\n", dm_device_name(md
));
2361 * This gets reverted if there's an error later and the targets
2362 * provide the .presuspend_undo hook.
2364 dm_table_presuspend_targets(map
);
2367 * Flush I/O to the device.
2368 * Any I/O submitted after lock_fs() may not be flushed.
2369 * noflush takes precedence over do_lockfs.
2370 * (lock_fs() flushes I/Os and waits for them to complete.)
2372 if (!noflush
&& do_lockfs
) {
2375 dm_table_presuspend_undo_targets(map
);
2381 * Here we must make sure that no processes are submitting requests
2382 * to target drivers i.e. no one may be executing
2383 * __split_and_process_bio. This is called from dm_request and
2386 * To get all processes out of __split_and_process_bio in dm_request,
2387 * we take the write lock. To prevent any process from reentering
2388 * __split_and_process_bio from dm_request and quiesce the thread
2389 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2390 * flush_workqueue(md->wq).
2392 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2394 synchronize_srcu(&md
->io_barrier
);
2397 * Stop md->queue before flushing md->wq in case request-based
2398 * dm defers requests to md->wq from md->queue.
2400 if (dm_request_based(md
)) {
2401 dm_stop_queue(md
->queue
);
2402 if (md
->kworker_task
)
2403 kthread_flush_worker(&md
->kworker
);
2406 flush_workqueue(md
->wq
);
2409 * At this point no more requests are entering target request routines.
2410 * We call dm_wait_for_completion to wait for all existing requests
2413 r
= dm_wait_for_completion(md
, task_state
);
2415 set_bit(dmf_suspended_flag
, &md
->flags
);
2418 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2420 synchronize_srcu(&md
->io_barrier
);
2422 /* were we interrupted ? */
2426 if (dm_request_based(md
))
2427 dm_start_queue(md
->queue
);
2430 dm_table_presuspend_undo_targets(map
);
2431 /* pushback list is already flushed, so skip flush */
2438 * We need to be able to change a mapping table under a mounted
2439 * filesystem. For example we might want to move some data in
2440 * the background. Before the table can be swapped with
2441 * dm_bind_table, dm_suspend must be called to flush any in
2442 * flight bios and ensure that any further io gets deferred.
2445 * Suspend mechanism in request-based dm.
2447 * 1. Flush all I/Os by lock_fs() if needed.
2448 * 2. Stop dispatching any I/O by stopping the request_queue.
2449 * 3. Wait for all in-flight I/Os to be completed or requeued.
2451 * To abort suspend, start the request_queue.
2453 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2455 struct dm_table
*map
= NULL
;
2459 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
2461 if (dm_suspended_md(md
)) {
2466 if (dm_suspended_internally_md(md
)) {
2467 /* already internally suspended, wait for internal resume */
2468 mutex_unlock(&md
->suspend_lock
);
2469 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
2475 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2477 r
= __dm_suspend(md
, map
, suspend_flags
, TASK_INTERRUPTIBLE
, DMF_SUSPENDED
);
2481 dm_table_postsuspend_targets(map
);
2484 mutex_unlock(&md
->suspend_lock
);
2488 static int __dm_resume(struct mapped_device
*md
, struct dm_table
*map
)
2491 int r
= dm_table_resume_targets(map
);
2499 * Flushing deferred I/Os must be done after targets are resumed
2500 * so that mapping of targets can work correctly.
2501 * Request-based dm is queueing the deferred I/Os in its request_queue.
2503 if (dm_request_based(md
))
2504 dm_start_queue(md
->queue
);
2511 int dm_resume(struct mapped_device
*md
)
2514 struct dm_table
*map
= NULL
;
2518 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
2520 if (!dm_suspended_md(md
))
2523 if (dm_suspended_internally_md(md
)) {
2524 /* already internally suspended, wait for internal resume */
2525 mutex_unlock(&md
->suspend_lock
);
2526 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
2532 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2533 if (!map
|| !dm_table_get_size(map
))
2536 r
= __dm_resume(md
, map
);
2540 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2542 mutex_unlock(&md
->suspend_lock
);
2548 * Internal suspend/resume works like userspace-driven suspend. It waits
2549 * until all bios finish and prevents issuing new bios to the target drivers.
2550 * It may be used only from the kernel.
2553 static void __dm_internal_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2555 struct dm_table
*map
= NULL
;
2557 lockdep_assert_held(&md
->suspend_lock
);
2559 if (md
->internal_suspend_count
++)
2560 return; /* nested internal suspend */
2562 if (dm_suspended_md(md
)) {
2563 set_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2564 return; /* nest suspend */
2567 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2570 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2571 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2572 * would require changing .presuspend to return an error -- avoid this
2573 * until there is a need for more elaborate variants of internal suspend.
2575 (void) __dm_suspend(md
, map
, suspend_flags
, TASK_UNINTERRUPTIBLE
,
2576 DMF_SUSPENDED_INTERNALLY
);
2578 dm_table_postsuspend_targets(map
);
2581 static void __dm_internal_resume(struct mapped_device
*md
)
2583 BUG_ON(!md
->internal_suspend_count
);
2585 if (--md
->internal_suspend_count
)
2586 return; /* resume from nested internal suspend */
2588 if (dm_suspended_md(md
))
2589 goto done
; /* resume from nested suspend */
2592 * NOTE: existing callers don't need to call dm_table_resume_targets
2593 * (which may fail -- so best to avoid it for now by passing NULL map)
2595 (void) __dm_resume(md
, NULL
);
2598 clear_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2599 smp_mb__after_atomic();
2600 wake_up_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
);
2603 void dm_internal_suspend_noflush(struct mapped_device
*md
)
2605 mutex_lock(&md
->suspend_lock
);
2606 __dm_internal_suspend(md
, DM_SUSPEND_NOFLUSH_FLAG
);
2607 mutex_unlock(&md
->suspend_lock
);
2609 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush
);
2611 void dm_internal_resume(struct mapped_device
*md
)
2613 mutex_lock(&md
->suspend_lock
);
2614 __dm_internal_resume(md
);
2615 mutex_unlock(&md
->suspend_lock
);
2617 EXPORT_SYMBOL_GPL(dm_internal_resume
);
2620 * Fast variants of internal suspend/resume hold md->suspend_lock,
2621 * which prevents interaction with userspace-driven suspend.
2624 void dm_internal_suspend_fast(struct mapped_device
*md
)
2626 mutex_lock(&md
->suspend_lock
);
2627 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
2630 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2631 synchronize_srcu(&md
->io_barrier
);
2632 flush_workqueue(md
->wq
);
2633 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2635 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast
);
2637 void dm_internal_resume_fast(struct mapped_device
*md
)
2639 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
2645 mutex_unlock(&md
->suspend_lock
);
2647 EXPORT_SYMBOL_GPL(dm_internal_resume_fast
);
2649 /*-----------------------------------------------------------------
2650 * Event notification.
2651 *---------------------------------------------------------------*/
2652 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2655 char udev_cookie
[DM_COOKIE_LENGTH
];
2656 char *envp
[] = { udev_cookie
, NULL
};
2659 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2661 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2662 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2663 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2668 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2670 return atomic_add_return(1, &md
->uevent_seq
);
2673 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2675 return atomic_read(&md
->event_nr
);
2678 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2680 return wait_event_interruptible(md
->eventq
,
2681 (event_nr
!= atomic_read(&md
->event_nr
)));
2684 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2686 unsigned long flags
;
2688 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2689 list_add(elist
, &md
->uevent_list
);
2690 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2694 * The gendisk is only valid as long as you have a reference
2697 struct gendisk
*dm_disk(struct mapped_device
*md
)
2701 EXPORT_SYMBOL_GPL(dm_disk
);
2703 struct kobject
*dm_kobject(struct mapped_device
*md
)
2705 return &md
->kobj_holder
.kobj
;
2708 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2710 struct mapped_device
*md
;
2712 md
= container_of(kobj
, struct mapped_device
, kobj_holder
.kobj
);
2714 spin_lock(&_minor_lock
);
2715 if (test_bit(DMF_FREEING
, &md
->flags
) || dm_deleting_md(md
)) {
2721 spin_unlock(&_minor_lock
);
2726 int dm_suspended_md(struct mapped_device
*md
)
2728 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2731 int dm_suspended_internally_md(struct mapped_device
*md
)
2733 return test_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
2736 int dm_test_deferred_remove_flag(struct mapped_device
*md
)
2738 return test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
2741 int dm_suspended(struct dm_target
*ti
)
2743 return dm_suspended_md(dm_table_get_md(ti
->table
));
2745 EXPORT_SYMBOL_GPL(dm_suspended
);
2747 int dm_noflush_suspending(struct dm_target
*ti
)
2749 return __noflush_suspending(dm_table_get_md(ti
->table
));
2751 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2753 struct dm_md_mempools
*dm_alloc_md_mempools(struct mapped_device
*md
, enum dm_queue_mode type
,
2754 unsigned integrity
, unsigned per_io_data_size
)
2756 struct dm_md_mempools
*pools
= kzalloc_node(sizeof(*pools
), GFP_KERNEL
, md
->numa_node_id
);
2757 unsigned int pool_size
= 0;
2758 unsigned int front_pad
;
2764 case DM_TYPE_BIO_BASED
:
2765 case DM_TYPE_DAX_BIO_BASED
:
2766 pool_size
= dm_get_reserved_bio_based_ios();
2767 front_pad
= roundup(per_io_data_size
, __alignof__(struct dm_target_io
)) + offsetof(struct dm_target_io
, clone
);
2769 pools
->io_pool
= mempool_create_slab_pool(pool_size
, _io_cache
);
2770 if (!pools
->io_pool
)
2773 case DM_TYPE_REQUEST_BASED
:
2774 case DM_TYPE_MQ_REQUEST_BASED
:
2775 pool_size
= dm_get_reserved_rq_based_ios();
2776 front_pad
= offsetof(struct dm_rq_clone_bio_info
, clone
);
2777 /* per_io_data_size is used for blk-mq pdu at queue allocation */
2783 pools
->bs
= bioset_create(pool_size
, front_pad
, BIOSET_NEED_RESCUER
);
2787 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
2793 dm_free_md_mempools(pools
);
2798 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2803 mempool_destroy(pools
->io_pool
);
2806 bioset_free(pools
->bs
);
2818 static int dm_call_pr(struct block_device
*bdev
, iterate_devices_callout_fn fn
,
2821 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2822 struct dm_table
*table
;
2823 struct dm_target
*ti
;
2824 int ret
= -ENOTTY
, srcu_idx
;
2826 table
= dm_get_live_table(md
, &srcu_idx
);
2827 if (!table
|| !dm_table_get_size(table
))
2830 /* We only support devices that have a single target */
2831 if (dm_table_get_num_targets(table
) != 1)
2833 ti
= dm_table_get_target(table
, 0);
2836 if (!ti
->type
->iterate_devices
)
2839 ret
= ti
->type
->iterate_devices(ti
, fn
, data
);
2841 dm_put_live_table(md
, srcu_idx
);
2846 * For register / unregister we need to manually call out to every path.
2848 static int __dm_pr_register(struct dm_target
*ti
, struct dm_dev
*dev
,
2849 sector_t start
, sector_t len
, void *data
)
2851 struct dm_pr
*pr
= data
;
2852 const struct pr_ops
*ops
= dev
->bdev
->bd_disk
->fops
->pr_ops
;
2854 if (!ops
|| !ops
->pr_register
)
2856 return ops
->pr_register(dev
->bdev
, pr
->old_key
, pr
->new_key
, pr
->flags
);
2859 static int dm_pr_register(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
2870 ret
= dm_call_pr(bdev
, __dm_pr_register
, &pr
);
2871 if (ret
&& new_key
) {
2872 /* unregister all paths if we failed to register any path */
2873 pr
.old_key
= new_key
;
2876 pr
.fail_early
= false;
2877 dm_call_pr(bdev
, __dm_pr_register
, &pr
);
2883 static int dm_pr_reserve(struct block_device
*bdev
, u64 key
, enum pr_type type
,
2886 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2887 const struct pr_ops
*ops
;
2891 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2895 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2896 if (ops
&& ops
->pr_reserve
)
2897 r
= ops
->pr_reserve(bdev
, key
, type
, flags
);
2905 static int dm_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
2907 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2908 const struct pr_ops
*ops
;
2912 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2916 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2917 if (ops
&& ops
->pr_release
)
2918 r
= ops
->pr_release(bdev
, key
, type
);
2926 static int dm_pr_preempt(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
2927 enum pr_type type
, bool abort
)
2929 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2930 const struct pr_ops
*ops
;
2934 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2938 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2939 if (ops
&& ops
->pr_preempt
)
2940 r
= ops
->pr_preempt(bdev
, old_key
, new_key
, type
, abort
);
2948 static int dm_pr_clear(struct block_device
*bdev
, u64 key
)
2950 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
2951 const struct pr_ops
*ops
;
2955 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
2959 ops
= bdev
->bd_disk
->fops
->pr_ops
;
2960 if (ops
&& ops
->pr_clear
)
2961 r
= ops
->pr_clear(bdev
, key
);
2969 static const struct pr_ops dm_pr_ops
= {
2970 .pr_register
= dm_pr_register
,
2971 .pr_reserve
= dm_pr_reserve
,
2972 .pr_release
= dm_pr_release
,
2973 .pr_preempt
= dm_pr_preempt
,
2974 .pr_clear
= dm_pr_clear
,
2977 static const struct block_device_operations dm_blk_dops
= {
2978 .open
= dm_blk_open
,
2979 .release
= dm_blk_close
,
2980 .ioctl
= dm_blk_ioctl
,
2981 .getgeo
= dm_blk_getgeo
,
2982 .pr_ops
= &dm_pr_ops
,
2983 .owner
= THIS_MODULE
2986 static const struct dax_operations dm_dax_ops
= {
2987 .direct_access
= dm_dax_direct_access
,
2988 .copy_from_iter
= dm_dax_copy_from_iter
,
2994 module_init(dm_init
);
2995 module_exit(dm_exit
);
2997 module_param(major
, uint
, 0);
2998 MODULE_PARM_DESC(major
, "The major number of the device mapper");
3000 module_param(reserved_bio_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3001 MODULE_PARM_DESC(reserved_bio_based_ios
, "Reserved IOs in bio-based mempools");
3003 module_param(dm_numa_node
, int, S_IRUGO
| S_IWUSR
);
3004 MODULE_PARM_DESC(dm_numa_node
, "NUMA node for DM device memory allocations");
3006 MODULE_DESCRIPTION(DM_NAME
" driver");
3007 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3008 MODULE_LICENSE("GPL");