2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/mount.h>
23 #include <linux/dax.h>
25 #define DM_MSG_PREFIX "table"
28 #define NODE_SIZE L1_CACHE_BYTES
29 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
30 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
33 struct mapped_device
*md
;
34 enum dm_queue_mode type
;
38 unsigned int counts
[MAX_DEPTH
]; /* in nodes */
39 sector_t
*index
[MAX_DEPTH
];
41 unsigned int num_targets
;
42 unsigned int num_allocated
;
44 struct dm_target
*targets
;
46 struct target_type
*immutable_target_type
;
48 bool integrity_supported
:1;
50 unsigned integrity_added
:1;
53 * Indicates the rw permissions for the new logical
54 * device. This should be a combination of FMODE_READ
59 /* a list of devices used by this table */
60 struct list_head devices
;
62 /* events get handed up using this callback */
63 void (*event_fn
)(void *);
66 struct dm_md_mempools
*mempools
;
68 struct list_head target_callbacks
;
72 * Similar to ceiling(log_size(n))
74 static unsigned int int_log(unsigned int n
, unsigned int base
)
79 n
= dm_div_up(n
, base
);
87 * Calculate the index of the child node of the n'th node k'th key.
89 static inline unsigned int get_child(unsigned int n
, unsigned int k
)
91 return (n
* CHILDREN_PER_NODE
) + k
;
95 * Return the n'th node of level l from table t.
97 static inline sector_t
*get_node(struct dm_table
*t
,
98 unsigned int l
, unsigned int n
)
100 return t
->index
[l
] + (n
* KEYS_PER_NODE
);
104 * Return the highest key that you could lookup from the n'th
105 * node on level l of the btree.
107 static sector_t
high(struct dm_table
*t
, unsigned int l
, unsigned int n
)
109 for (; l
< t
->depth
- 1; l
++)
110 n
= get_child(n
, CHILDREN_PER_NODE
- 1);
112 if (n
>= t
->counts
[l
])
113 return (sector_t
) - 1;
115 return get_node(t
, l
, n
)[KEYS_PER_NODE
- 1];
119 * Fills in a level of the btree based on the highs of the level
122 static int setup_btree_index(unsigned int l
, struct dm_table
*t
)
127 for (n
= 0U; n
< t
->counts
[l
]; n
++) {
128 node
= get_node(t
, l
, n
);
130 for (k
= 0U; k
< KEYS_PER_NODE
; k
++)
131 node
[k
] = high(t
, l
+ 1, get_child(n
, k
));
137 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
)
143 * Check that we're not going to overflow.
145 if (nmemb
> (ULONG_MAX
/ elem_size
))
148 size
= nmemb
* elem_size
;
149 addr
= vzalloc(size
);
153 EXPORT_SYMBOL(dm_vcalloc
);
156 * highs, and targets are managed as dynamic arrays during a
159 static int alloc_targets(struct dm_table
*t
, unsigned int num
)
162 struct dm_target
*n_targets
;
165 * Allocate both the target array and offset array at once.
166 * Append an empty entry to catch sectors beyond the end of
169 n_highs
= (sector_t
*) dm_vcalloc(num
+ 1, sizeof(struct dm_target
) +
174 n_targets
= (struct dm_target
*) (n_highs
+ num
);
176 memset(n_highs
, -1, sizeof(*n_highs
) * num
);
179 t
->num_allocated
= num
;
181 t
->targets
= n_targets
;
186 int dm_table_create(struct dm_table
**result
, fmode_t mode
,
187 unsigned num_targets
, struct mapped_device
*md
)
189 struct dm_table
*t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
194 INIT_LIST_HEAD(&t
->devices
);
195 INIT_LIST_HEAD(&t
->target_callbacks
);
198 num_targets
= KEYS_PER_NODE
;
200 num_targets
= dm_round_up(num_targets
, KEYS_PER_NODE
);
207 if (alloc_targets(t
, num_targets
)) {
212 t
->type
= DM_TYPE_NONE
;
219 static void free_devices(struct list_head
*devices
, struct mapped_device
*md
)
221 struct list_head
*tmp
, *next
;
223 list_for_each_safe(tmp
, next
, devices
) {
224 struct dm_dev_internal
*dd
=
225 list_entry(tmp
, struct dm_dev_internal
, list
);
226 DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
227 dm_device_name(md
), dd
->dm_dev
->name
);
228 dm_put_table_device(md
, dd
->dm_dev
);
233 void dm_table_destroy(struct dm_table
*t
)
240 /* free the indexes */
242 vfree(t
->index
[t
->depth
- 2]);
244 /* free the targets */
245 for (i
= 0; i
< t
->num_targets
; i
++) {
246 struct dm_target
*tgt
= t
->targets
+ i
;
251 dm_put_target_type(tgt
->type
);
256 /* free the device list */
257 free_devices(&t
->devices
, t
->md
);
259 dm_free_md_mempools(t
->mempools
);
265 * See if we've already got a device in the list.
267 static struct dm_dev_internal
*find_device(struct list_head
*l
, dev_t dev
)
269 struct dm_dev_internal
*dd
;
271 list_for_each_entry (dd
, l
, list
)
272 if (dd
->dm_dev
->bdev
->bd_dev
== dev
)
279 * If possible, this checks an area of a destination device is invalid.
281 static int device_area_is_invalid(struct dm_target
*ti
, struct dm_dev
*dev
,
282 sector_t start
, sector_t len
, void *data
)
284 struct request_queue
*q
;
285 struct queue_limits
*limits
= data
;
286 struct block_device
*bdev
= dev
->bdev
;
288 i_size_read(bdev
->bd_inode
) >> SECTOR_SHIFT
;
289 unsigned short logical_block_size_sectors
=
290 limits
->logical_block_size
>> SECTOR_SHIFT
;
291 char b
[BDEVNAME_SIZE
];
294 * Some devices exist without request functions,
295 * such as loop devices not yet bound to backing files.
296 * Forbid the use of such devices.
298 q
= bdev_get_queue(bdev
);
299 if (!q
|| !q
->make_request_fn
) {
300 DMWARN("%s: %s is not yet initialised: "
301 "start=%llu, len=%llu, dev_size=%llu",
302 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
303 (unsigned long long)start
,
304 (unsigned long long)len
,
305 (unsigned long long)dev_size
);
312 if ((start
>= dev_size
) || (start
+ len
> dev_size
)) {
313 DMWARN("%s: %s too small for target: "
314 "start=%llu, len=%llu, dev_size=%llu",
315 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
316 (unsigned long long)start
,
317 (unsigned long long)len
,
318 (unsigned long long)dev_size
);
323 * If the target is mapped to zoned block device(s), check
324 * that the zones are not partially mapped.
326 if (bdev_zoned_model(bdev
) != BLK_ZONED_NONE
) {
327 unsigned int zone_sectors
= bdev_zone_sectors(bdev
);
329 if (start
& (zone_sectors
- 1)) {
330 DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
331 dm_device_name(ti
->table
->md
),
332 (unsigned long long)start
,
333 zone_sectors
, bdevname(bdev
, b
));
338 * Note: The last zone of a zoned block device may be smaller
339 * than other zones. So for a target mapping the end of a
340 * zoned block device with such a zone, len would not be zone
341 * aligned. We do not allow such last smaller zone to be part
342 * of the mapping here to ensure that mappings with multiple
343 * devices do not end up with a smaller zone in the middle of
346 if (len
& (zone_sectors
- 1)) {
347 DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
348 dm_device_name(ti
->table
->md
),
349 (unsigned long long)len
,
350 zone_sectors
, bdevname(bdev
, b
));
355 if (logical_block_size_sectors
<= 1)
358 if (start
& (logical_block_size_sectors
- 1)) {
359 DMWARN("%s: start=%llu not aligned to h/w "
360 "logical block size %u of %s",
361 dm_device_name(ti
->table
->md
),
362 (unsigned long long)start
,
363 limits
->logical_block_size
, bdevname(bdev
, b
));
367 if (len
& (logical_block_size_sectors
- 1)) {
368 DMWARN("%s: len=%llu not aligned to h/w "
369 "logical block size %u of %s",
370 dm_device_name(ti
->table
->md
),
371 (unsigned long long)len
,
372 limits
->logical_block_size
, bdevname(bdev
, b
));
380 * This upgrades the mode on an already open dm_dev, being
381 * careful to leave things as they were if we fail to reopen the
382 * device and not to touch the existing bdev field in case
383 * it is accessed concurrently inside dm_table_any_congested().
385 static int upgrade_mode(struct dm_dev_internal
*dd
, fmode_t new_mode
,
386 struct mapped_device
*md
)
389 struct dm_dev
*old_dev
, *new_dev
;
391 old_dev
= dd
->dm_dev
;
393 r
= dm_get_table_device(md
, dd
->dm_dev
->bdev
->bd_dev
,
394 dd
->dm_dev
->mode
| new_mode
, &new_dev
);
398 dd
->dm_dev
= new_dev
;
399 dm_put_table_device(md
, old_dev
);
405 * Convert the path to a device
407 dev_t
dm_get_dev_t(const char *path
)
410 struct block_device
*bdev
;
412 bdev
= lookup_bdev(path
);
414 dev
= name_to_dev_t(path
);
422 EXPORT_SYMBOL_GPL(dm_get_dev_t
);
425 * Add a device to the list, or just increment the usage count if
426 * it's already present.
428 int dm_get_device(struct dm_target
*ti
, const char *path
, fmode_t mode
,
429 struct dm_dev
**result
)
433 struct dm_dev_internal
*dd
;
434 struct dm_table
*t
= ti
->table
;
438 dev
= dm_get_dev_t(path
);
442 dd
= find_device(&t
->devices
, dev
);
444 dd
= kmalloc(sizeof(*dd
), GFP_KERNEL
);
448 if ((r
= dm_get_table_device(t
->md
, dev
, mode
, &dd
->dm_dev
))) {
453 refcount_set(&dd
->count
, 1);
454 list_add(&dd
->list
, &t
->devices
);
457 } else if (dd
->dm_dev
->mode
!= (mode
| dd
->dm_dev
->mode
)) {
458 r
= upgrade_mode(dd
, mode
, t
->md
);
462 refcount_inc(&dd
->count
);
464 *result
= dd
->dm_dev
;
467 EXPORT_SYMBOL(dm_get_device
);
469 static int dm_set_device_limits(struct dm_target
*ti
, struct dm_dev
*dev
,
470 sector_t start
, sector_t len
, void *data
)
472 struct queue_limits
*limits
= data
;
473 struct block_device
*bdev
= dev
->bdev
;
474 struct request_queue
*q
= bdev_get_queue(bdev
);
475 char b
[BDEVNAME_SIZE
];
478 DMWARN("%s: Cannot set limits for nonexistent device %s",
479 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
));
483 if (bdev_stack_limits(limits
, bdev
, start
) < 0)
484 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
485 "physical_block_size=%u, logical_block_size=%u, "
486 "alignment_offset=%u, start=%llu",
487 dm_device_name(ti
->table
->md
), bdevname(bdev
, b
),
488 q
->limits
.physical_block_size
,
489 q
->limits
.logical_block_size
,
490 q
->limits
.alignment_offset
,
491 (unsigned long long) start
<< SECTOR_SHIFT
);
493 limits
->zoned
= blk_queue_zoned_model(q
);
499 * Decrement a device's use count and remove it if necessary.
501 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*d
)
504 struct list_head
*devices
= &ti
->table
->devices
;
505 struct dm_dev_internal
*dd
;
507 list_for_each_entry(dd
, devices
, list
) {
508 if (dd
->dm_dev
== d
) {
514 DMWARN("%s: device %s not in table devices list",
515 dm_device_name(ti
->table
->md
), d
->name
);
518 if (refcount_dec_and_test(&dd
->count
)) {
519 dm_put_table_device(ti
->table
->md
, d
);
524 EXPORT_SYMBOL(dm_put_device
);
527 * Checks to see if the target joins onto the end of the table.
529 static int adjoin(struct dm_table
*table
, struct dm_target
*ti
)
531 struct dm_target
*prev
;
533 if (!table
->num_targets
)
536 prev
= &table
->targets
[table
->num_targets
- 1];
537 return (ti
->begin
== (prev
->begin
+ prev
->len
));
541 * Used to dynamically allocate the arg array.
543 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
544 * process messages even if some device is suspended. These messages have a
545 * small fixed number of arguments.
547 * On the other hand, dm-switch needs to process bulk data using messages and
548 * excessive use of GFP_NOIO could cause trouble.
550 static char **realloc_argv(unsigned *size
, char **old_argv
)
557 new_size
= *size
* 2;
563 argv
= kmalloc_array(new_size
, sizeof(*argv
), gfp
);
565 memcpy(argv
, old_argv
, *size
* sizeof(*argv
));
574 * Destructively splits up the argument list to pass to ctr.
576 int dm_split_args(int *argc
, char ***argvp
, char *input
)
578 char *start
, *end
= input
, *out
, **argv
= NULL
;
579 unsigned array_size
= 0;
588 argv
= realloc_argv(&array_size
, argv
);
593 /* Skip whitespace */
594 start
= skip_spaces(end
);
597 break; /* success, we hit the end */
599 /* 'out' is used to remove any back-quotes */
602 /* Everything apart from '\0' can be quoted */
603 if (*end
== '\\' && *(end
+ 1)) {
610 break; /* end of token */
615 /* have we already filled the array ? */
616 if ((*argc
+ 1) > array_size
) {
617 argv
= realloc_argv(&array_size
, argv
);
622 /* we know this is whitespace */
626 /* terminate the string and put it in the array */
637 * Impose necessary and sufficient conditions on a devices's table such
638 * that any incoming bio which respects its logical_block_size can be
639 * processed successfully. If it falls across the boundary between
640 * two or more targets, the size of each piece it gets split into must
641 * be compatible with the logical_block_size of the target processing it.
643 static int validate_hardware_logical_block_alignment(struct dm_table
*table
,
644 struct queue_limits
*limits
)
647 * This function uses arithmetic modulo the logical_block_size
648 * (in units of 512-byte sectors).
650 unsigned short device_logical_block_size_sects
=
651 limits
->logical_block_size
>> SECTOR_SHIFT
;
654 * Offset of the start of the next table entry, mod logical_block_size.
656 unsigned short next_target_start
= 0;
659 * Given an aligned bio that extends beyond the end of a
660 * target, how many sectors must the next target handle?
662 unsigned short remaining
= 0;
664 struct dm_target
*uninitialized_var(ti
);
665 struct queue_limits ti_limits
;
669 * Check each entry in the table in turn.
671 for (i
= 0; i
< dm_table_get_num_targets(table
); i
++) {
672 ti
= dm_table_get_target(table
, i
);
674 blk_set_stacking_limits(&ti_limits
);
676 /* combine all target devices' limits */
677 if (ti
->type
->iterate_devices
)
678 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
682 * If the remaining sectors fall entirely within this
683 * table entry are they compatible with its logical_block_size?
685 if (remaining
< ti
->len
&&
686 remaining
& ((ti_limits
.logical_block_size
>>
691 (unsigned short) ((next_target_start
+ ti
->len
) &
692 (device_logical_block_size_sects
- 1));
693 remaining
= next_target_start
?
694 device_logical_block_size_sects
- next_target_start
: 0;
698 DMWARN("%s: table line %u (start sect %llu len %llu) "
699 "not aligned to h/w logical block size %u",
700 dm_device_name(table
->md
), i
,
701 (unsigned long long) ti
->begin
,
702 (unsigned long long) ti
->len
,
703 limits
->logical_block_size
);
710 int dm_table_add_target(struct dm_table
*t
, const char *type
,
711 sector_t start
, sector_t len
, char *params
)
713 int r
= -EINVAL
, argc
;
715 struct dm_target
*tgt
;
718 DMERR("%s: target type %s must appear alone in table",
719 dm_device_name(t
->md
), t
->targets
->type
->name
);
723 BUG_ON(t
->num_targets
>= t
->num_allocated
);
725 tgt
= t
->targets
+ t
->num_targets
;
726 memset(tgt
, 0, sizeof(*tgt
));
729 DMERR("%s: zero-length target", dm_device_name(t
->md
));
733 tgt
->type
= dm_get_target_type(type
);
735 DMERR("%s: %s: unknown target type", dm_device_name(t
->md
), type
);
739 if (dm_target_needs_singleton(tgt
->type
)) {
740 if (t
->num_targets
) {
741 tgt
->error
= "singleton target type must appear alone in table";
747 if (dm_target_always_writeable(tgt
->type
) && !(t
->mode
& FMODE_WRITE
)) {
748 tgt
->error
= "target type may not be included in a read-only table";
752 if (t
->immutable_target_type
) {
753 if (t
->immutable_target_type
!= tgt
->type
) {
754 tgt
->error
= "immutable target type cannot be mixed with other target types";
757 } else if (dm_target_is_immutable(tgt
->type
)) {
758 if (t
->num_targets
) {
759 tgt
->error
= "immutable target type cannot be mixed with other target types";
762 t
->immutable_target_type
= tgt
->type
;
765 if (dm_target_has_integrity(tgt
->type
))
766 t
->integrity_added
= 1;
771 tgt
->error
= "Unknown error";
774 * Does this target adjoin the previous one ?
776 if (!adjoin(t
, tgt
)) {
777 tgt
->error
= "Gap in table";
781 r
= dm_split_args(&argc
, &argv
, params
);
783 tgt
->error
= "couldn't split parameters (insufficient memory)";
787 r
= tgt
->type
->ctr(tgt
, argc
, argv
);
792 t
->highs
[t
->num_targets
++] = tgt
->begin
+ tgt
->len
- 1;
794 if (!tgt
->num_discard_bios
&& tgt
->discards_supported
)
795 DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
796 dm_device_name(t
->md
), type
);
801 DMERR("%s: %s: %s", dm_device_name(t
->md
), type
, tgt
->error
);
802 dm_put_target_type(tgt
->type
);
807 * Target argument parsing helpers.
809 static int validate_next_arg(const struct dm_arg
*arg
,
810 struct dm_arg_set
*arg_set
,
811 unsigned *value
, char **error
, unsigned grouped
)
813 const char *arg_str
= dm_shift_arg(arg_set
);
817 (sscanf(arg_str
, "%u%c", value
, &dummy
) != 1) ||
818 (*value
< arg
->min
) ||
819 (*value
> arg
->max
) ||
820 (grouped
&& arg_set
->argc
< *value
)) {
828 int dm_read_arg(const struct dm_arg
*arg
, struct dm_arg_set
*arg_set
,
829 unsigned *value
, char **error
)
831 return validate_next_arg(arg
, arg_set
, value
, error
, 0);
833 EXPORT_SYMBOL(dm_read_arg
);
835 int dm_read_arg_group(const struct dm_arg
*arg
, struct dm_arg_set
*arg_set
,
836 unsigned *value
, char **error
)
838 return validate_next_arg(arg
, arg_set
, value
, error
, 1);
840 EXPORT_SYMBOL(dm_read_arg_group
);
842 const char *dm_shift_arg(struct dm_arg_set
*as
)
855 EXPORT_SYMBOL(dm_shift_arg
);
857 void dm_consume_args(struct dm_arg_set
*as
, unsigned num_args
)
859 BUG_ON(as
->argc
< num_args
);
860 as
->argc
-= num_args
;
861 as
->argv
+= num_args
;
863 EXPORT_SYMBOL(dm_consume_args
);
865 static bool __table_type_bio_based(enum dm_queue_mode table_type
)
867 return (table_type
== DM_TYPE_BIO_BASED
||
868 table_type
== DM_TYPE_DAX_BIO_BASED
||
869 table_type
== DM_TYPE_NVME_BIO_BASED
);
872 static bool __table_type_request_based(enum dm_queue_mode table_type
)
874 return table_type
== DM_TYPE_REQUEST_BASED
;
877 void dm_table_set_type(struct dm_table
*t
, enum dm_queue_mode type
)
881 EXPORT_SYMBOL_GPL(dm_table_set_type
);
883 static int device_supports_dax(struct dm_target
*ti
, struct dm_dev
*dev
,
884 sector_t start
, sector_t len
, void *data
)
886 return bdev_dax_supported(dev
->bdev
, PAGE_SIZE
);
889 static bool dm_table_supports_dax(struct dm_table
*t
)
891 struct dm_target
*ti
;
894 /* Ensure that all targets support DAX. */
895 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
896 ti
= dm_table_get_target(t
, i
);
898 if (!ti
->type
->direct_access
)
901 if (!ti
->type
->iterate_devices
||
902 !ti
->type
->iterate_devices(ti
, device_supports_dax
, NULL
))
909 static bool dm_table_does_not_support_partial_completion(struct dm_table
*t
);
911 struct verify_rq_based_data
{
916 static int device_is_rq_based(struct dm_target
*ti
, struct dm_dev
*dev
,
917 sector_t start
, sector_t len
, void *data
)
919 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
920 struct verify_rq_based_data
*v
= data
;
927 return queue_is_mq(q
);
930 static int dm_table_determine_type(struct dm_table
*t
)
933 unsigned bio_based
= 0, request_based
= 0, hybrid
= 0;
934 struct verify_rq_based_data v
= {.sq_count
= 0, .mq_count
= 0};
935 struct dm_target
*tgt
;
936 struct list_head
*devices
= dm_table_get_devices(t
);
937 enum dm_queue_mode live_md_type
= dm_get_md_type(t
->md
);
939 if (t
->type
!= DM_TYPE_NONE
) {
940 /* target already set the table's type */
941 if (t
->type
== DM_TYPE_BIO_BASED
) {
942 /* possibly upgrade to a variant of bio-based */
943 goto verify_bio_based
;
945 BUG_ON(t
->type
== DM_TYPE_DAX_BIO_BASED
);
946 BUG_ON(t
->type
== DM_TYPE_NVME_BIO_BASED
);
947 goto verify_rq_based
;
950 for (i
= 0; i
< t
->num_targets
; i
++) {
951 tgt
= t
->targets
+ i
;
952 if (dm_target_hybrid(tgt
))
954 else if (dm_target_request_based(tgt
))
959 if (bio_based
&& request_based
) {
960 DMERR("Inconsistent table: different target types"
961 " can't be mixed up");
966 if (hybrid
&& !bio_based
&& !request_based
) {
968 * The targets can work either way.
969 * Determine the type from the live device.
970 * Default to bio-based if device is new.
972 if (__table_type_request_based(live_md_type
))
980 /* We must use this table as bio-based */
981 t
->type
= DM_TYPE_BIO_BASED
;
982 if (dm_table_supports_dax(t
) ||
983 (list_empty(devices
) && live_md_type
== DM_TYPE_DAX_BIO_BASED
)) {
984 t
->type
= DM_TYPE_DAX_BIO_BASED
;
986 /* Check if upgrading to NVMe bio-based is valid or required */
987 tgt
= dm_table_get_immutable_target(t
);
988 if (tgt
&& !tgt
->max_io_len
&& dm_table_does_not_support_partial_completion(t
)) {
989 t
->type
= DM_TYPE_NVME_BIO_BASED
;
990 goto verify_rq_based
; /* must be stacked directly on NVMe (blk-mq) */
991 } else if (list_empty(devices
) && live_md_type
== DM_TYPE_NVME_BIO_BASED
) {
992 t
->type
= DM_TYPE_NVME_BIO_BASED
;
998 BUG_ON(!request_based
); /* No targets in this table */
1000 t
->type
= DM_TYPE_REQUEST_BASED
;
1004 * Request-based dm supports only tables that have a single target now.
1005 * To support multiple targets, request splitting support is needed,
1006 * and that needs lots of changes in the block-layer.
1007 * (e.g. request completion process for partial completion.)
1009 if (t
->num_targets
> 1) {
1010 DMERR("%s DM doesn't support multiple targets",
1011 t
->type
== DM_TYPE_NVME_BIO_BASED
? "nvme bio-based" : "request-based");
1015 if (list_empty(devices
)) {
1017 struct dm_table
*live_table
= dm_get_live_table(t
->md
, &srcu_idx
);
1019 /* inherit live table's type */
1021 t
->type
= live_table
->type
;
1022 dm_put_live_table(t
->md
, srcu_idx
);
1026 tgt
= dm_table_get_immutable_target(t
);
1028 DMERR("table load rejected: immutable target is required");
1030 } else if (tgt
->max_io_len
) {
1031 DMERR("table load rejected: immutable target that splits IO is not supported");
1035 /* Non-request-stackable devices can't be used for request-based dm */
1036 if (!tgt
->type
->iterate_devices
||
1037 !tgt
->type
->iterate_devices(tgt
, device_is_rq_based
, &v
)) {
1038 DMERR("table load rejected: including non-request-stackable devices");
1041 if (v
.sq_count
> 0) {
1042 DMERR("table load rejected: not all devices are blk-mq request-stackable");
1049 enum dm_queue_mode
dm_table_get_type(struct dm_table
*t
)
1054 struct target_type
*dm_table_get_immutable_target_type(struct dm_table
*t
)
1056 return t
->immutable_target_type
;
1059 struct dm_target
*dm_table_get_immutable_target(struct dm_table
*t
)
1061 /* Immutable target is implicitly a singleton */
1062 if (t
->num_targets
> 1 ||
1063 !dm_target_is_immutable(t
->targets
[0].type
))
1069 struct dm_target
*dm_table_get_wildcard_target(struct dm_table
*t
)
1071 struct dm_target
*ti
;
1074 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1075 ti
= dm_table_get_target(t
, i
);
1076 if (dm_target_is_wildcard(ti
->type
))
1083 bool dm_table_bio_based(struct dm_table
*t
)
1085 return __table_type_bio_based(dm_table_get_type(t
));
1088 bool dm_table_request_based(struct dm_table
*t
)
1090 return __table_type_request_based(dm_table_get_type(t
));
1093 static int dm_table_alloc_md_mempools(struct dm_table
*t
, struct mapped_device
*md
)
1095 enum dm_queue_mode type
= dm_table_get_type(t
);
1096 unsigned per_io_data_size
= 0;
1097 unsigned min_pool_size
= 0;
1098 struct dm_target
*ti
;
1101 if (unlikely(type
== DM_TYPE_NONE
)) {
1102 DMWARN("no table type is set, can't allocate mempools");
1106 if (__table_type_bio_based(type
))
1107 for (i
= 0; i
< t
->num_targets
; i
++) {
1108 ti
= t
->targets
+ i
;
1109 per_io_data_size
= max(per_io_data_size
, ti
->per_io_data_size
);
1110 min_pool_size
= max(min_pool_size
, ti
->num_flush_bios
);
1113 t
->mempools
= dm_alloc_md_mempools(md
, type
, t
->integrity_supported
,
1114 per_io_data_size
, min_pool_size
);
1121 void dm_table_free_md_mempools(struct dm_table
*t
)
1123 dm_free_md_mempools(t
->mempools
);
1127 struct dm_md_mempools
*dm_table_get_md_mempools(struct dm_table
*t
)
1132 static int setup_indexes(struct dm_table
*t
)
1135 unsigned int total
= 0;
1138 /* allocate the space for *all* the indexes */
1139 for (i
= t
->depth
- 2; i
>= 0; i
--) {
1140 t
->counts
[i
] = dm_div_up(t
->counts
[i
+ 1], CHILDREN_PER_NODE
);
1141 total
+= t
->counts
[i
];
1144 indexes
= (sector_t
*) dm_vcalloc(total
, (unsigned long) NODE_SIZE
);
1148 /* set up internal nodes, bottom-up */
1149 for (i
= t
->depth
- 2; i
>= 0; i
--) {
1150 t
->index
[i
] = indexes
;
1151 indexes
+= (KEYS_PER_NODE
* t
->counts
[i
]);
1152 setup_btree_index(i
, t
);
1159 * Builds the btree to index the map.
1161 static int dm_table_build_index(struct dm_table
*t
)
1164 unsigned int leaf_nodes
;
1166 /* how many indexes will the btree have ? */
1167 leaf_nodes
= dm_div_up(t
->num_targets
, KEYS_PER_NODE
);
1168 t
->depth
= 1 + int_log(leaf_nodes
, CHILDREN_PER_NODE
);
1170 /* leaf layer has already been set up */
1171 t
->counts
[t
->depth
- 1] = leaf_nodes
;
1172 t
->index
[t
->depth
- 1] = t
->highs
;
1175 r
= setup_indexes(t
);
1180 static bool integrity_profile_exists(struct gendisk
*disk
)
1182 return !!blk_get_integrity(disk
);
1186 * Get a disk whose integrity profile reflects the table's profile.
1187 * Returns NULL if integrity support was inconsistent or unavailable.
1189 static struct gendisk
* dm_table_get_integrity_disk(struct dm_table
*t
)
1191 struct list_head
*devices
= dm_table_get_devices(t
);
1192 struct dm_dev_internal
*dd
= NULL
;
1193 struct gendisk
*prev_disk
= NULL
, *template_disk
= NULL
;
1196 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1197 struct dm_target
*ti
= dm_table_get_target(t
, i
);
1198 if (!dm_target_passes_integrity(ti
->type
))
1202 list_for_each_entry(dd
, devices
, list
) {
1203 template_disk
= dd
->dm_dev
->bdev
->bd_disk
;
1204 if (!integrity_profile_exists(template_disk
))
1206 else if (prev_disk
&&
1207 blk_integrity_compare(prev_disk
, template_disk
) < 0)
1209 prev_disk
= template_disk
;
1212 return template_disk
;
1216 DMWARN("%s: integrity not set: %s and %s profile mismatch",
1217 dm_device_name(t
->md
),
1218 prev_disk
->disk_name
,
1219 template_disk
->disk_name
);
1224 * Register the mapped device for blk_integrity support if the
1225 * underlying devices have an integrity profile. But all devices may
1226 * not have matching profiles (checking all devices isn't reliable
1227 * during table load because this table may use other DM device(s) which
1228 * must be resumed before they will have an initialized integity
1229 * profile). Consequently, stacked DM devices force a 2 stage integrity
1230 * profile validation: First pass during table load, final pass during
1233 static int dm_table_register_integrity(struct dm_table
*t
)
1235 struct mapped_device
*md
= t
->md
;
1236 struct gendisk
*template_disk
= NULL
;
1238 /* If target handles integrity itself do not register it here. */
1239 if (t
->integrity_added
)
1242 template_disk
= dm_table_get_integrity_disk(t
);
1246 if (!integrity_profile_exists(dm_disk(md
))) {
1247 t
->integrity_supported
= true;
1249 * Register integrity profile during table load; we can do
1250 * this because the final profile must match during resume.
1252 blk_integrity_register(dm_disk(md
),
1253 blk_get_integrity(template_disk
));
1258 * If DM device already has an initialized integrity
1259 * profile the new profile should not conflict.
1261 if (blk_integrity_compare(dm_disk(md
), template_disk
) < 0) {
1262 DMWARN("%s: conflict with existing integrity profile: "
1263 "%s profile mismatch",
1264 dm_device_name(t
->md
),
1265 template_disk
->disk_name
);
1269 /* Preserve existing integrity profile */
1270 t
->integrity_supported
= true;
1275 * Prepares the table for use by building the indices,
1276 * setting the type, and allocating mempools.
1278 int dm_table_complete(struct dm_table
*t
)
1282 r
= dm_table_determine_type(t
);
1284 DMERR("unable to determine table type");
1288 r
= dm_table_build_index(t
);
1290 DMERR("unable to build btrees");
1294 r
= dm_table_register_integrity(t
);
1296 DMERR("could not register integrity profile.");
1300 r
= dm_table_alloc_md_mempools(t
, t
->md
);
1302 DMERR("unable to allocate mempools");
1307 static DEFINE_MUTEX(_event_lock
);
1308 void dm_table_event_callback(struct dm_table
*t
,
1309 void (*fn
)(void *), void *context
)
1311 mutex_lock(&_event_lock
);
1313 t
->event_context
= context
;
1314 mutex_unlock(&_event_lock
);
1317 void dm_table_event(struct dm_table
*t
)
1320 * You can no longer call dm_table_event() from interrupt
1321 * context, use a bottom half instead.
1323 BUG_ON(in_interrupt());
1325 mutex_lock(&_event_lock
);
1327 t
->event_fn(t
->event_context
);
1328 mutex_unlock(&_event_lock
);
1330 EXPORT_SYMBOL(dm_table_event
);
1332 sector_t
dm_table_get_size(struct dm_table
*t
)
1334 return t
->num_targets
? (t
->highs
[t
->num_targets
- 1] + 1) : 0;
1336 EXPORT_SYMBOL(dm_table_get_size
);
1338 struct dm_target
*dm_table_get_target(struct dm_table
*t
, unsigned int index
)
1340 if (index
>= t
->num_targets
)
1343 return t
->targets
+ index
;
1347 * Search the btree for the correct target.
1349 * Caller should check returned pointer with dm_target_is_valid()
1350 * to trap I/O beyond end of device.
1352 struct dm_target
*dm_table_find_target(struct dm_table
*t
, sector_t sector
)
1354 unsigned int l
, n
= 0, k
= 0;
1357 for (l
= 0; l
< t
->depth
; l
++) {
1358 n
= get_child(n
, k
);
1359 node
= get_node(t
, l
, n
);
1361 for (k
= 0; k
< KEYS_PER_NODE
; k
++)
1362 if (node
[k
] >= sector
)
1366 return &t
->targets
[(KEYS_PER_NODE
* n
) + k
];
1369 static int count_device(struct dm_target
*ti
, struct dm_dev
*dev
,
1370 sector_t start
, sector_t len
, void *data
)
1372 unsigned *num_devices
= data
;
1380 * Check whether a table has no data devices attached using each
1381 * target's iterate_devices method.
1382 * Returns false if the result is unknown because a target doesn't
1383 * support iterate_devices.
1385 bool dm_table_has_no_data_devices(struct dm_table
*table
)
1387 struct dm_target
*ti
;
1388 unsigned i
, num_devices
;
1390 for (i
= 0; i
< dm_table_get_num_targets(table
); i
++) {
1391 ti
= dm_table_get_target(table
, i
);
1393 if (!ti
->type
->iterate_devices
)
1397 ti
->type
->iterate_devices(ti
, count_device
, &num_devices
);
1405 static int device_is_zoned_model(struct dm_target
*ti
, struct dm_dev
*dev
,
1406 sector_t start
, sector_t len
, void *data
)
1408 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1409 enum blk_zoned_model
*zoned_model
= data
;
1411 return q
&& blk_queue_zoned_model(q
) == *zoned_model
;
1414 static bool dm_table_supports_zoned_model(struct dm_table
*t
,
1415 enum blk_zoned_model zoned_model
)
1417 struct dm_target
*ti
;
1420 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1421 ti
= dm_table_get_target(t
, i
);
1423 if (zoned_model
== BLK_ZONED_HM
&&
1424 !dm_target_supports_zoned_hm(ti
->type
))
1427 if (!ti
->type
->iterate_devices
||
1428 !ti
->type
->iterate_devices(ti
, device_is_zoned_model
, &zoned_model
))
1435 static int device_matches_zone_sectors(struct dm_target
*ti
, struct dm_dev
*dev
,
1436 sector_t start
, sector_t len
, void *data
)
1438 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1439 unsigned int *zone_sectors
= data
;
1441 return q
&& blk_queue_zone_sectors(q
) == *zone_sectors
;
1444 static bool dm_table_matches_zone_sectors(struct dm_table
*t
,
1445 unsigned int zone_sectors
)
1447 struct dm_target
*ti
;
1450 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1451 ti
= dm_table_get_target(t
, i
);
1453 if (!ti
->type
->iterate_devices
||
1454 !ti
->type
->iterate_devices(ti
, device_matches_zone_sectors
, &zone_sectors
))
1461 static int validate_hardware_zoned_model(struct dm_table
*table
,
1462 enum blk_zoned_model zoned_model
,
1463 unsigned int zone_sectors
)
1465 if (zoned_model
== BLK_ZONED_NONE
)
1468 if (!dm_table_supports_zoned_model(table
, zoned_model
)) {
1469 DMERR("%s: zoned model is not consistent across all devices",
1470 dm_device_name(table
->md
));
1474 /* Check zone size validity and compatibility */
1475 if (!zone_sectors
|| !is_power_of_2(zone_sectors
))
1478 if (!dm_table_matches_zone_sectors(table
, zone_sectors
)) {
1479 DMERR("%s: zone sectors is not consistent across all devices",
1480 dm_device_name(table
->md
));
1488 * Establish the new table's queue_limits and validate them.
1490 int dm_calculate_queue_limits(struct dm_table
*table
,
1491 struct queue_limits
*limits
)
1493 struct dm_target
*ti
;
1494 struct queue_limits ti_limits
;
1496 enum blk_zoned_model zoned_model
= BLK_ZONED_NONE
;
1497 unsigned int zone_sectors
= 0;
1499 blk_set_stacking_limits(limits
);
1501 for (i
= 0; i
< dm_table_get_num_targets(table
); i
++) {
1502 blk_set_stacking_limits(&ti_limits
);
1504 ti
= dm_table_get_target(table
, i
);
1506 if (!ti
->type
->iterate_devices
)
1507 goto combine_limits
;
1510 * Combine queue limits of all the devices this target uses.
1512 ti
->type
->iterate_devices(ti
, dm_set_device_limits
,
1515 if (zoned_model
== BLK_ZONED_NONE
&& ti_limits
.zoned
!= BLK_ZONED_NONE
) {
1517 * After stacking all limits, validate all devices
1518 * in table support this zoned model and zone sectors.
1520 zoned_model
= ti_limits
.zoned
;
1521 zone_sectors
= ti_limits
.chunk_sectors
;
1524 /* Set I/O hints portion of queue limits */
1525 if (ti
->type
->io_hints
)
1526 ti
->type
->io_hints(ti
, &ti_limits
);
1529 * Check each device area is consistent with the target's
1530 * overall queue limits.
1532 if (ti
->type
->iterate_devices(ti
, device_area_is_invalid
,
1538 * Merge this target's queue limits into the overall limits
1541 if (blk_stack_limits(limits
, &ti_limits
, 0) < 0)
1542 DMWARN("%s: adding target device "
1543 "(start sect %llu len %llu) "
1544 "caused an alignment inconsistency",
1545 dm_device_name(table
->md
),
1546 (unsigned long long) ti
->begin
,
1547 (unsigned long long) ti
->len
);
1550 * FIXME: this should likely be moved to blk_stack_limits(), would
1551 * also eliminate limits->zoned stacking hack in dm_set_device_limits()
1553 if (limits
->zoned
== BLK_ZONED_NONE
&& ti_limits
.zoned
!= BLK_ZONED_NONE
) {
1555 * By default, the stacked limits zoned model is set to
1556 * BLK_ZONED_NONE in blk_set_stacking_limits(). Update
1557 * this model using the first target model reported
1558 * that is not BLK_ZONED_NONE. This will be either the
1559 * first target device zoned model or the model reported
1560 * by the target .io_hints.
1562 limits
->zoned
= ti_limits
.zoned
;
1567 * Verify that the zoned model and zone sectors, as determined before
1568 * any .io_hints override, are the same across all devices in the table.
1569 * - this is especially relevant if .io_hints is emulating a disk-managed
1570 * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1573 if (limits
->zoned
!= BLK_ZONED_NONE
) {
1575 * ...IF the above limits stacking determined a zoned model
1576 * validate that all of the table's devices conform to it.
1578 zoned_model
= limits
->zoned
;
1579 zone_sectors
= limits
->chunk_sectors
;
1581 if (validate_hardware_zoned_model(table
, zoned_model
, zone_sectors
))
1584 return validate_hardware_logical_block_alignment(table
, limits
);
1588 * Verify that all devices have an integrity profile that matches the
1589 * DM device's registered integrity profile. If the profiles don't
1590 * match then unregister the DM device's integrity profile.
1592 static void dm_table_verify_integrity(struct dm_table
*t
)
1594 struct gendisk
*template_disk
= NULL
;
1596 if (t
->integrity_added
)
1599 if (t
->integrity_supported
) {
1601 * Verify that the original integrity profile
1602 * matches all the devices in this table.
1604 template_disk
= dm_table_get_integrity_disk(t
);
1605 if (template_disk
&&
1606 blk_integrity_compare(dm_disk(t
->md
), template_disk
) >= 0)
1610 if (integrity_profile_exists(dm_disk(t
->md
))) {
1611 DMWARN("%s: unable to establish an integrity profile",
1612 dm_device_name(t
->md
));
1613 blk_integrity_unregister(dm_disk(t
->md
));
1617 static int device_flush_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1618 sector_t start
, sector_t len
, void *data
)
1620 unsigned long flush
= (unsigned long) data
;
1621 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1623 return q
&& (q
->queue_flags
& flush
);
1626 static bool dm_table_supports_flush(struct dm_table
*t
, unsigned long flush
)
1628 struct dm_target
*ti
;
1632 * Require at least one underlying device to support flushes.
1633 * t->devices includes internal dm devices such as mirror logs
1634 * so we need to use iterate_devices here, which targets
1635 * supporting flushes must provide.
1637 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1638 ti
= dm_table_get_target(t
, i
);
1640 if (!ti
->num_flush_bios
)
1643 if (ti
->flush_supported
)
1646 if (ti
->type
->iterate_devices
&&
1647 ti
->type
->iterate_devices(ti
, device_flush_capable
, (void *) flush
))
1654 static int device_dax_write_cache_enabled(struct dm_target
*ti
,
1655 struct dm_dev
*dev
, sector_t start
,
1656 sector_t len
, void *data
)
1658 struct dax_device
*dax_dev
= dev
->dax_dev
;
1663 if (dax_write_cache_enabled(dax_dev
))
1668 static int dm_table_supports_dax_write_cache(struct dm_table
*t
)
1670 struct dm_target
*ti
;
1673 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1674 ti
= dm_table_get_target(t
, i
);
1676 if (ti
->type
->iterate_devices
&&
1677 ti
->type
->iterate_devices(ti
,
1678 device_dax_write_cache_enabled
, NULL
))
1685 static int device_is_nonrot(struct dm_target
*ti
, struct dm_dev
*dev
,
1686 sector_t start
, sector_t len
, void *data
)
1688 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1690 return q
&& blk_queue_nonrot(q
);
1693 static int device_is_not_random(struct dm_target
*ti
, struct dm_dev
*dev
,
1694 sector_t start
, sector_t len
, void *data
)
1696 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1698 return q
&& !blk_queue_add_random(q
);
1701 static bool dm_table_all_devices_attribute(struct dm_table
*t
,
1702 iterate_devices_callout_fn func
)
1704 struct dm_target
*ti
;
1707 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1708 ti
= dm_table_get_target(t
, i
);
1710 if (!ti
->type
->iterate_devices
||
1711 !ti
->type
->iterate_devices(ti
, func
, NULL
))
1718 static int device_no_partial_completion(struct dm_target
*ti
, struct dm_dev
*dev
,
1719 sector_t start
, sector_t len
, void *data
)
1721 char b
[BDEVNAME_SIZE
];
1723 /* For now, NVMe devices are the only devices of this class */
1724 return (strncmp(bdevname(dev
->bdev
, b
), "nvme", 4) == 0);
1727 static bool dm_table_does_not_support_partial_completion(struct dm_table
*t
)
1729 return dm_table_all_devices_attribute(t
, device_no_partial_completion
);
1732 static int device_not_write_same_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1733 sector_t start
, sector_t len
, void *data
)
1735 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1737 return q
&& !q
->limits
.max_write_same_sectors
;
1740 static bool dm_table_supports_write_same(struct dm_table
*t
)
1742 struct dm_target
*ti
;
1745 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1746 ti
= dm_table_get_target(t
, i
);
1748 if (!ti
->num_write_same_bios
)
1751 if (!ti
->type
->iterate_devices
||
1752 ti
->type
->iterate_devices(ti
, device_not_write_same_capable
, NULL
))
1759 static int device_not_write_zeroes_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1760 sector_t start
, sector_t len
, void *data
)
1762 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1764 return q
&& !q
->limits
.max_write_zeroes_sectors
;
1767 static bool dm_table_supports_write_zeroes(struct dm_table
*t
)
1769 struct dm_target
*ti
;
1772 while (i
< dm_table_get_num_targets(t
)) {
1773 ti
= dm_table_get_target(t
, i
++);
1775 if (!ti
->num_write_zeroes_bios
)
1778 if (!ti
->type
->iterate_devices
||
1779 ti
->type
->iterate_devices(ti
, device_not_write_zeroes_capable
, NULL
))
1786 static int device_not_discard_capable(struct dm_target
*ti
, struct dm_dev
*dev
,
1787 sector_t start
, sector_t len
, void *data
)
1789 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1791 return q
&& !blk_queue_discard(q
);
1794 static bool dm_table_supports_discards(struct dm_table
*t
)
1796 struct dm_target
*ti
;
1799 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1800 ti
= dm_table_get_target(t
, i
);
1802 if (!ti
->num_discard_bios
)
1806 * Either the target provides discard support (as implied by setting
1807 * 'discards_supported') or it relies on _all_ data devices having
1810 if (!ti
->discards_supported
&&
1811 (!ti
->type
->iterate_devices
||
1812 ti
->type
->iterate_devices(ti
, device_not_discard_capable
, NULL
)))
1819 static int device_not_secure_erase_capable(struct dm_target
*ti
,
1820 struct dm_dev
*dev
, sector_t start
,
1821 sector_t len
, void *data
)
1823 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
1825 return q
&& !blk_queue_secure_erase(q
);
1828 static bool dm_table_supports_secure_erase(struct dm_table
*t
)
1830 struct dm_target
*ti
;
1833 for (i
= 0; i
< dm_table_get_num_targets(t
); i
++) {
1834 ti
= dm_table_get_target(t
, i
);
1836 if (!ti
->num_secure_erase_bios
)
1839 if (!ti
->type
->iterate_devices
||
1840 ti
->type
->iterate_devices(ti
, device_not_secure_erase_capable
, NULL
))
1847 void dm_table_set_restrictions(struct dm_table
*t
, struct request_queue
*q
,
1848 struct queue_limits
*limits
)
1850 bool wc
= false, fua
= false;
1853 * Copy table's limits to the DM device's request_queue
1855 q
->limits
= *limits
;
1857 if (!dm_table_supports_discards(t
)) {
1858 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, q
);
1859 /* Must also clear discard limits... */
1860 q
->limits
.max_discard_sectors
= 0;
1861 q
->limits
.max_hw_discard_sectors
= 0;
1862 q
->limits
.discard_granularity
= 0;
1863 q
->limits
.discard_alignment
= 0;
1864 q
->limits
.discard_misaligned
= 0;
1866 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
1868 if (dm_table_supports_secure_erase(t
))
1869 blk_queue_flag_set(QUEUE_FLAG_SECERASE
, q
);
1871 if (dm_table_supports_flush(t
, (1UL << QUEUE_FLAG_WC
))) {
1873 if (dm_table_supports_flush(t
, (1UL << QUEUE_FLAG_FUA
)))
1876 blk_queue_write_cache(q
, wc
, fua
);
1878 if (dm_table_supports_dax(t
))
1879 blk_queue_flag_set(QUEUE_FLAG_DAX
, q
);
1881 blk_queue_flag_clear(QUEUE_FLAG_DAX
, q
);
1883 if (dm_table_supports_dax_write_cache(t
))
1884 dax_write_cache(t
->md
->dax_dev
, true);
1886 /* Ensure that all underlying devices are non-rotational. */
1887 if (dm_table_all_devices_attribute(t
, device_is_nonrot
))
1888 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
1890 blk_queue_flag_clear(QUEUE_FLAG_NONROT
, q
);
1892 if (!dm_table_supports_write_same(t
))
1893 q
->limits
.max_write_same_sectors
= 0;
1894 if (!dm_table_supports_write_zeroes(t
))
1895 q
->limits
.max_write_zeroes_sectors
= 0;
1897 dm_table_verify_integrity(t
);
1900 * Determine whether or not this queue's I/O timings contribute
1901 * to the entropy pool, Only request-based targets use this.
1902 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1905 if (blk_queue_add_random(q
) && dm_table_all_devices_attribute(t
, device_is_not_random
))
1906 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, q
);
1909 * For a zoned target, the number of zones should be updated for the
1910 * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
1911 * target, this is all that is needed. For a request based target, the
1912 * queue zone bitmaps must also be updated.
1913 * Use blk_revalidate_disk_zones() to handle this.
1915 if (blk_queue_is_zoned(q
))
1916 blk_revalidate_disk_zones(t
->md
->disk
);
1918 /* Allow reads to exceed readahead limits */
1919 q
->backing_dev_info
->io_pages
= limits
->max_sectors
>> (PAGE_SHIFT
- 9);
1922 unsigned int dm_table_get_num_targets(struct dm_table
*t
)
1924 return t
->num_targets
;
1927 struct list_head
*dm_table_get_devices(struct dm_table
*t
)
1932 fmode_t
dm_table_get_mode(struct dm_table
*t
)
1936 EXPORT_SYMBOL(dm_table_get_mode
);
1944 static void suspend_targets(struct dm_table
*t
, enum suspend_mode mode
)
1946 int i
= t
->num_targets
;
1947 struct dm_target
*ti
= t
->targets
;
1949 lockdep_assert_held(&t
->md
->suspend_lock
);
1954 if (ti
->type
->presuspend
)
1955 ti
->type
->presuspend(ti
);
1957 case PRESUSPEND_UNDO
:
1958 if (ti
->type
->presuspend_undo
)
1959 ti
->type
->presuspend_undo(ti
);
1962 if (ti
->type
->postsuspend
)
1963 ti
->type
->postsuspend(ti
);
1970 void dm_table_presuspend_targets(struct dm_table
*t
)
1975 suspend_targets(t
, PRESUSPEND
);
1978 void dm_table_presuspend_undo_targets(struct dm_table
*t
)
1983 suspend_targets(t
, PRESUSPEND_UNDO
);
1986 void dm_table_postsuspend_targets(struct dm_table
*t
)
1991 suspend_targets(t
, POSTSUSPEND
);
1994 int dm_table_resume_targets(struct dm_table
*t
)
1998 lockdep_assert_held(&t
->md
->suspend_lock
);
2000 for (i
= 0; i
< t
->num_targets
; i
++) {
2001 struct dm_target
*ti
= t
->targets
+ i
;
2003 if (!ti
->type
->preresume
)
2006 r
= ti
->type
->preresume(ti
);
2008 DMERR("%s: %s: preresume failed, error = %d",
2009 dm_device_name(t
->md
), ti
->type
->name
, r
);
2014 for (i
= 0; i
< t
->num_targets
; i
++) {
2015 struct dm_target
*ti
= t
->targets
+ i
;
2017 if (ti
->type
->resume
)
2018 ti
->type
->resume(ti
);
2024 void dm_table_add_target_callbacks(struct dm_table
*t
, struct dm_target_callbacks
*cb
)
2026 list_add(&cb
->list
, &t
->target_callbacks
);
2028 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks
);
2030 int dm_table_any_congested(struct dm_table
*t
, int bdi_bits
)
2032 struct dm_dev_internal
*dd
;
2033 struct list_head
*devices
= dm_table_get_devices(t
);
2034 struct dm_target_callbacks
*cb
;
2037 list_for_each_entry(dd
, devices
, list
) {
2038 struct request_queue
*q
= bdev_get_queue(dd
->dm_dev
->bdev
);
2039 char b
[BDEVNAME_SIZE
];
2042 r
|= bdi_congested(q
->backing_dev_info
, bdi_bits
);
2044 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
2045 dm_device_name(t
->md
),
2046 bdevname(dd
->dm_dev
->bdev
, b
));
2049 list_for_each_entry(cb
, &t
->target_callbacks
, list
)
2050 if (cb
->congested_fn
)
2051 r
|= cb
->congested_fn(cb
, bdi_bits
);
2056 struct mapped_device
*dm_table_get_md(struct dm_table
*t
)
2060 EXPORT_SYMBOL(dm_table_get_md
);
2062 const char *dm_table_device_name(struct dm_table
*t
)
2064 return dm_device_name(t
->md
);
2066 EXPORT_SYMBOL_GPL(dm_table_device_name
);
2068 void dm_table_run_md_queue_async(struct dm_table
*t
)
2070 struct mapped_device
*md
;
2071 struct request_queue
*queue
;
2073 if (!dm_table_request_based(t
))
2076 md
= dm_table_get_md(t
);
2077 queue
= dm_get_md_queue(md
);
2079 blk_mq_run_hw_queues(queue
, true);
2081 EXPORT_SYMBOL(dm_table_run_md_queue_async
);