]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Sistina Software (UK) Limited. | |
d5816876 | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
4cc96131 | 8 | #include "dm-core.h" |
1da177e4 LT |
9 | |
10 | #include <linux/module.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/blkdev.h> | |
13 | #include <linux/namei.h> | |
14 | #include <linux/ctype.h> | |
e7d2860b | 15 | #include <linux/string.h> |
1da177e4 LT |
16 | #include <linux/slab.h> |
17 | #include <linux/interrupt.h> | |
48c9c27b | 18 | #include <linux/mutex.h> |
d5816876 | 19 | #include <linux/delay.h> |
60063497 | 20 | #include <linux/atomic.h> |
bfebd1cd | 21 | #include <linux/blk-mq.h> |
644bda6f | 22 | #include <linux/mount.h> |
273752c9 | 23 | #include <linux/dax.h> |
1da177e4 | 24 | |
72d94861 AK |
25 | #define DM_MSG_PREFIX "table" |
26 | ||
1da177e4 LT |
27 | #define NODE_SIZE L1_CACHE_BYTES |
28 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) | |
29 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) | |
30 | ||
1da177e4 LT |
31 | /* |
32 | * Similar to ceiling(log_size(n)) | |
33 | */ | |
34 | static unsigned int int_log(unsigned int n, unsigned int base) | |
35 | { | |
36 | int result = 0; | |
37 | ||
38 | while (n > 1) { | |
39 | n = dm_div_up(n, base); | |
40 | result++; | |
41 | } | |
42 | ||
43 | return result; | |
44 | } | |
45 | ||
1da177e4 LT |
46 | /* |
47 | * Calculate the index of the child node of the n'th node k'th key. | |
48 | */ | |
49 | static inline unsigned int get_child(unsigned int n, unsigned int k) | |
50 | { | |
51 | return (n * CHILDREN_PER_NODE) + k; | |
52 | } | |
53 | ||
54 | /* | |
55 | * Return the n'th node of level l from table t. | |
56 | */ | |
57 | static inline sector_t *get_node(struct dm_table *t, | |
58 | unsigned int l, unsigned int n) | |
59 | { | |
60 | return t->index[l] + (n * KEYS_PER_NODE); | |
61 | } | |
62 | ||
63 | /* | |
64 | * Return the highest key that you could lookup from the n'th | |
65 | * node on level l of the btree. | |
66 | */ | |
67 | static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) | |
68 | { | |
69 | for (; l < t->depth - 1; l++) | |
70 | n = get_child(n, CHILDREN_PER_NODE - 1); | |
71 | ||
72 | if (n >= t->counts[l]) | |
73 | return (sector_t) - 1; | |
74 | ||
75 | return get_node(t, l, n)[KEYS_PER_NODE - 1]; | |
76 | } | |
77 | ||
78 | /* | |
79 | * Fills in a level of the btree based on the highs of the level | |
80 | * below it. | |
81 | */ | |
82 | static int setup_btree_index(unsigned int l, struct dm_table *t) | |
83 | { | |
84 | unsigned int n, k; | |
85 | sector_t *node; | |
86 | ||
87 | for (n = 0U; n < t->counts[l]; n++) { | |
88 | node = get_node(t, l, n); | |
89 | ||
90 | for (k = 0U; k < KEYS_PER_NODE; k++) | |
91 | node[k] = high(t, l + 1, get_child(n, k)); | |
92 | } | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
1da177e4 LT |
97 | /* |
98 | * highs, and targets are managed as dynamic arrays during a | |
99 | * table load. | |
100 | */ | |
101 | static int alloc_targets(struct dm_table *t, unsigned int num) | |
102 | { | |
103 | sector_t *n_highs; | |
104 | struct dm_target *n_targets; | |
1da177e4 LT |
105 | |
106 | /* | |
107 | * Allocate both the target array and offset array at once. | |
108 | */ | |
7a35693a MWO |
109 | n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t), |
110 | GFP_KERNEL); | |
1da177e4 LT |
111 | if (!n_highs) |
112 | return -ENOMEM; | |
113 | ||
114 | n_targets = (struct dm_target *) (n_highs + num); | |
115 | ||
57a2f238 | 116 | memset(n_highs, -1, sizeof(*n_highs) * num); |
7a35693a | 117 | kvfree(t->highs); |
1da177e4 LT |
118 | |
119 | t->num_allocated = num; | |
120 | t->highs = n_highs; | |
121 | t->targets = n_targets; | |
122 | ||
123 | return 0; | |
124 | } | |
125 | ||
aeb5d727 | 126 | int dm_table_create(struct dm_table **result, fmode_t mode, |
1134e5ae | 127 | unsigned num_targets, struct mapped_device *md) |
1da177e4 | 128 | { |
094262db | 129 | struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); |
1da177e4 LT |
130 | |
131 | if (!t) | |
132 | return -ENOMEM; | |
133 | ||
1da177e4 | 134 | INIT_LIST_HEAD(&t->devices); |
1da177e4 LT |
135 | |
136 | if (!num_targets) | |
137 | num_targets = KEYS_PER_NODE; | |
138 | ||
139 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); | |
140 | ||
5b2d0657 MP |
141 | if (!num_targets) { |
142 | kfree(t); | |
143 | return -ENOMEM; | |
144 | } | |
145 | ||
1da177e4 LT |
146 | if (alloc_targets(t, num_targets)) { |
147 | kfree(t); | |
1da177e4 LT |
148 | return -ENOMEM; |
149 | } | |
150 | ||
e83068a5 | 151 | t->type = DM_TYPE_NONE; |
1da177e4 | 152 | t->mode = mode; |
1134e5ae | 153 | t->md = md; |
1da177e4 LT |
154 | *result = t; |
155 | return 0; | |
156 | } | |
157 | ||
86f1152b | 158 | static void free_devices(struct list_head *devices, struct mapped_device *md) |
1da177e4 LT |
159 | { |
160 | struct list_head *tmp, *next; | |
161 | ||
afb24528 | 162 | list_for_each_safe(tmp, next, devices) { |
82b1519b MP |
163 | struct dm_dev_internal *dd = |
164 | list_entry(tmp, struct dm_dev_internal, list); | |
86f1152b BM |
165 | DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", |
166 | dm_device_name(md), dd->dm_dev->name); | |
167 | dm_put_table_device(md, dd->dm_dev); | |
1da177e4 LT |
168 | kfree(dd); |
169 | } | |
170 | } | |
171 | ||
aa6ce87a ST |
172 | static void dm_table_destroy_keyslot_manager(struct dm_table *t); |
173 | ||
d5816876 | 174 | void dm_table_destroy(struct dm_table *t) |
1da177e4 LT |
175 | { |
176 | unsigned int i; | |
177 | ||
a7940155 AK |
178 | if (!t) |
179 | return; | |
180 | ||
26803b9f | 181 | /* free the indexes */ |
1da177e4 | 182 | if (t->depth >= 2) |
7a35693a | 183 | kvfree(t->index[t->depth - 2]); |
1da177e4 LT |
184 | |
185 | /* free the targets */ | |
186 | for (i = 0; i < t->num_targets; i++) { | |
187 | struct dm_target *tgt = t->targets + i; | |
188 | ||
189 | if (tgt->type->dtr) | |
190 | tgt->type->dtr(tgt); | |
191 | ||
192 | dm_put_target_type(tgt->type); | |
193 | } | |
194 | ||
7a35693a | 195 | kvfree(t->highs); |
1da177e4 LT |
196 | |
197 | /* free the device list */ | |
86f1152b | 198 | free_devices(&t->devices, t->md); |
1da177e4 | 199 | |
e6ee8c0b KU |
200 | dm_free_md_mempools(t->mempools); |
201 | ||
aa6ce87a ST |
202 | dm_table_destroy_keyslot_manager(t); |
203 | ||
1da177e4 LT |
204 | kfree(t); |
205 | } | |
206 | ||
1da177e4 LT |
207 | /* |
208 | * See if we've already got a device in the list. | |
209 | */ | |
82b1519b | 210 | static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) |
1da177e4 | 211 | { |
82b1519b | 212 | struct dm_dev_internal *dd; |
1da177e4 LT |
213 | |
214 | list_for_each_entry (dd, l, list) | |
86f1152b | 215 | if (dd->dm_dev->bdev->bd_dev == dev) |
1da177e4 LT |
216 | return dd; |
217 | ||
218 | return NULL; | |
219 | } | |
220 | ||
1da177e4 | 221 | /* |
f6a1ed10 | 222 | * If possible, this checks an area of a destination device is invalid. |
1da177e4 | 223 | */ |
f6a1ed10 MP |
224 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, |
225 | sector_t start, sector_t len, void *data) | |
1da177e4 | 226 | { |
754c5fc7 MS |
227 | struct queue_limits *limits = data; |
228 | struct block_device *bdev = dev->bdev; | |
229 | sector_t dev_size = | |
230 | i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; | |
02acc3a4 | 231 | unsigned short logical_block_size_sectors = |
754c5fc7 | 232 | limits->logical_block_size >> SECTOR_SHIFT; |
02acc3a4 | 233 | char b[BDEVNAME_SIZE]; |
2cd54d9b MA |
234 | |
235 | if (!dev_size) | |
f6a1ed10 | 236 | return 0; |
2cd54d9b | 237 | |
5dea271b | 238 | if ((start >= dev_size) || (start + len > dev_size)) { |
a963a956 MS |
239 | DMWARN("%s: %s too small for target: " |
240 | "start=%llu, len=%llu, dev_size=%llu", | |
241 | dm_device_name(ti->table->md), bdevname(bdev, b), | |
242 | (unsigned long long)start, | |
243 | (unsigned long long)len, | |
244 | (unsigned long long)dev_size); | |
f6a1ed10 | 245 | return 1; |
02acc3a4 MS |
246 | } |
247 | ||
dd88d313 DLM |
248 | /* |
249 | * If the target is mapped to zoned block device(s), check | |
250 | * that the zones are not partially mapped. | |
251 | */ | |
dd73c320 | 252 | if (bdev_is_zoned(bdev)) { |
dd88d313 DLM |
253 | unsigned int zone_sectors = bdev_zone_sectors(bdev); |
254 | ||
255 | if (start & (zone_sectors - 1)) { | |
256 | DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s", | |
257 | dm_device_name(ti->table->md), | |
258 | (unsigned long long)start, | |
259 | zone_sectors, bdevname(bdev, b)); | |
260 | return 1; | |
261 | } | |
262 | ||
263 | /* | |
264 | * Note: The last zone of a zoned block device may be smaller | |
265 | * than other zones. So for a target mapping the end of a | |
266 | * zoned block device with such a zone, len would not be zone | |
267 | * aligned. We do not allow such last smaller zone to be part | |
268 | * of the mapping here to ensure that mappings with multiple | |
269 | * devices do not end up with a smaller zone in the middle of | |
270 | * the sector range. | |
271 | */ | |
272 | if (len & (zone_sectors - 1)) { | |
273 | DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s", | |
274 | dm_device_name(ti->table->md), | |
275 | (unsigned long long)len, | |
276 | zone_sectors, bdevname(bdev, b)); | |
277 | return 1; | |
278 | } | |
279 | } | |
280 | ||
02acc3a4 | 281 | if (logical_block_size_sectors <= 1) |
f6a1ed10 | 282 | return 0; |
02acc3a4 MS |
283 | |
284 | if (start & (logical_block_size_sectors - 1)) { | |
285 | DMWARN("%s: start=%llu not aligned to h/w " | |
a963a956 | 286 | "logical block size %u of %s", |
02acc3a4 MS |
287 | dm_device_name(ti->table->md), |
288 | (unsigned long long)start, | |
754c5fc7 | 289 | limits->logical_block_size, bdevname(bdev, b)); |
f6a1ed10 | 290 | return 1; |
02acc3a4 MS |
291 | } |
292 | ||
5dea271b | 293 | if (len & (logical_block_size_sectors - 1)) { |
02acc3a4 | 294 | DMWARN("%s: len=%llu not aligned to h/w " |
a963a956 | 295 | "logical block size %u of %s", |
02acc3a4 | 296 | dm_device_name(ti->table->md), |
5dea271b | 297 | (unsigned long long)len, |
754c5fc7 | 298 | limits->logical_block_size, bdevname(bdev, b)); |
f6a1ed10 | 299 | return 1; |
02acc3a4 MS |
300 | } |
301 | ||
f6a1ed10 | 302 | return 0; |
1da177e4 LT |
303 | } |
304 | ||
305 | /* | |
570b9d96 | 306 | * This upgrades the mode on an already open dm_dev, being |
1da177e4 | 307 | * careful to leave things as they were if we fail to reopen the |
570b9d96 | 308 | * device and not to touch the existing bdev field in case |
21cf8661 | 309 | * it is accessed concurrently. |
1da177e4 | 310 | */ |
aeb5d727 | 311 | static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, |
82b1519b | 312 | struct mapped_device *md) |
1da177e4 LT |
313 | { |
314 | int r; | |
86f1152b | 315 | struct dm_dev *old_dev, *new_dev; |
1da177e4 | 316 | |
86f1152b | 317 | old_dev = dd->dm_dev; |
570b9d96 | 318 | |
86f1152b BM |
319 | r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, |
320 | dd->dm_dev->mode | new_mode, &new_dev); | |
570b9d96 AK |
321 | if (r) |
322 | return r; | |
1da177e4 | 323 | |
86f1152b BM |
324 | dd->dm_dev = new_dev; |
325 | dm_put_table_device(md, old_dev); | |
1da177e4 | 326 | |
570b9d96 | 327 | return 0; |
1da177e4 LT |
328 | } |
329 | ||
4df2bf46 D |
330 | /* |
331 | * Convert the path to a device | |
332 | */ | |
333 | dev_t dm_get_dev_t(const char *path) | |
334 | { | |
3c120169 | 335 | dev_t dev; |
4df2bf46 | 336 | |
4e7b5671 | 337 | if (lookup_bdev(path, &dev)) |
4df2bf46 | 338 | dev = name_to_dev_t(path); |
4df2bf46 D |
339 | return dev; |
340 | } | |
341 | EXPORT_SYMBOL_GPL(dm_get_dev_t); | |
342 | ||
1da177e4 LT |
343 | /* |
344 | * Add a device to the list, or just increment the usage count if | |
345 | * it's already present. | |
346 | */ | |
08649012 MS |
347 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, |
348 | struct dm_dev **result) | |
1da177e4 LT |
349 | { |
350 | int r; | |
4df2bf46 | 351 | dev_t dev; |
809b1e49 HR |
352 | unsigned int major, minor; |
353 | char dummy; | |
82b1519b | 354 | struct dm_dev_internal *dd; |
08649012 | 355 | struct dm_table *t = ti->table; |
1da177e4 | 356 | |
547bc926 | 357 | BUG_ON(!t); |
1da177e4 | 358 | |
809b1e49 HR |
359 | if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { |
360 | /* Extract the major/minor numbers */ | |
361 | dev = MKDEV(major, minor); | |
362 | if (MAJOR(dev) != major || MINOR(dev) != minor) | |
363 | return -EOVERFLOW; | |
364 | } else { | |
365 | dev = dm_get_dev_t(path); | |
366 | if (!dev) | |
367 | return -ENODEV; | |
368 | } | |
1da177e4 LT |
369 | |
370 | dd = find_device(&t->devices, dev); | |
371 | if (!dd) { | |
372 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | |
373 | if (!dd) | |
374 | return -ENOMEM; | |
375 | ||
86f1152b | 376 | if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { |
1da177e4 LT |
377 | kfree(dd); |
378 | return r; | |
379 | } | |
380 | ||
2a0b4682 | 381 | refcount_set(&dd->count, 1); |
1da177e4 | 382 | list_add(&dd->list, &t->devices); |
afc567a4 | 383 | goto out; |
1da177e4 | 384 | |
86f1152b | 385 | } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { |
f165921d | 386 | r = upgrade_mode(dd, mode, t->md); |
1da177e4 LT |
387 | if (r) |
388 | return r; | |
389 | } | |
afc567a4 MS |
390 | refcount_inc(&dd->count); |
391 | out: | |
86f1152b | 392 | *result = dd->dm_dev; |
1da177e4 LT |
393 | return 0; |
394 | } | |
08649012 | 395 | EXPORT_SYMBOL(dm_get_device); |
1da177e4 | 396 | |
11f0431b MS |
397 | static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, |
398 | sector_t start, sector_t len, void *data) | |
1da177e4 | 399 | { |
754c5fc7 MS |
400 | struct queue_limits *limits = data; |
401 | struct block_device *bdev = dev->bdev; | |
165125e1 | 402 | struct request_queue *q = bdev_get_queue(bdev); |
0c2322e4 AK |
403 | char b[BDEVNAME_SIZE]; |
404 | ||
405 | if (unlikely(!q)) { | |
406 | DMWARN("%s: Cannot set limits for nonexistent device %s", | |
407 | dm_device_name(ti->table->md), bdevname(bdev, b)); | |
754c5fc7 | 408 | return 0; |
0c2322e4 | 409 | } |
3cb40214 | 410 | |
9efa82ef CH |
411 | if (blk_stack_limits(limits, &q->limits, |
412 | get_start_sect(bdev) + start) < 0) | |
b27d7f16 | 413 | DMWARN("%s: adding target device %s caused an alignment inconsistency: " |
a963a956 MS |
414 | "physical_block_size=%u, logical_block_size=%u, " |
415 | "alignment_offset=%u, start=%llu", | |
416 | dm_device_name(ti->table->md), bdevname(bdev, b), | |
417 | q->limits.physical_block_size, | |
418 | q->limits.logical_block_size, | |
419 | q->limits.alignment_offset, | |
b27d7f16 | 420 | (unsigned long long) start << SECTOR_SHIFT); |
754c5fc7 | 421 | return 0; |
3cb40214 | 422 | } |
969429b5 | 423 | |
1da177e4 | 424 | /* |
08649012 | 425 | * Decrement a device's use count and remove it if necessary. |
1da177e4 | 426 | */ |
82b1519b | 427 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
1da177e4 | 428 | { |
86f1152b BM |
429 | int found = 0; |
430 | struct list_head *devices = &ti->table->devices; | |
431 | struct dm_dev_internal *dd; | |
82b1519b | 432 | |
86f1152b BM |
433 | list_for_each_entry(dd, devices, list) { |
434 | if (dd->dm_dev == d) { | |
435 | found = 1; | |
436 | break; | |
437 | } | |
438 | } | |
439 | if (!found) { | |
440 | DMWARN("%s: device %s not in table devices list", | |
441 | dm_device_name(ti->table->md), d->name); | |
442 | return; | |
443 | } | |
2a0b4682 | 444 | if (refcount_dec_and_test(&dd->count)) { |
86f1152b | 445 | dm_put_table_device(ti->table->md, d); |
1da177e4 LT |
446 | list_del(&dd->list); |
447 | kfree(dd); | |
448 | } | |
449 | } | |
08649012 | 450 | EXPORT_SYMBOL(dm_put_device); |
1da177e4 LT |
451 | |
452 | /* | |
453 | * Checks to see if the target joins onto the end of the table. | |
454 | */ | |
455 | static int adjoin(struct dm_table *table, struct dm_target *ti) | |
456 | { | |
457 | struct dm_target *prev; | |
458 | ||
459 | if (!table->num_targets) | |
460 | return !ti->begin; | |
461 | ||
462 | prev = &table->targets[table->num_targets - 1]; | |
463 | return (ti->begin == (prev->begin + prev->len)); | |
464 | } | |
465 | ||
466 | /* | |
467 | * Used to dynamically allocate the arg array. | |
f36afb39 MP |
468 | * |
469 | * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must | |
470 | * process messages even if some device is suspended. These messages have a | |
471 | * small fixed number of arguments. | |
472 | * | |
473 | * On the other hand, dm-switch needs to process bulk data using messages and | |
474 | * excessive use of GFP_NOIO could cause trouble. | |
1da177e4 | 475 | */ |
610b15c5 | 476 | static char **realloc_argv(unsigned *size, char **old_argv) |
1da177e4 LT |
477 | { |
478 | char **argv; | |
479 | unsigned new_size; | |
f36afb39 | 480 | gfp_t gfp; |
1da177e4 | 481 | |
610b15c5 KC |
482 | if (*size) { |
483 | new_size = *size * 2; | |
f36afb39 MP |
484 | gfp = GFP_KERNEL; |
485 | } else { | |
486 | new_size = 8; | |
487 | gfp = GFP_NOIO; | |
488 | } | |
6da2ec56 | 489 | argv = kmalloc_array(new_size, sizeof(*argv), gfp); |
a0651926 | 490 | if (argv && old_argv) { |
610b15c5 KC |
491 | memcpy(argv, old_argv, *size * sizeof(*argv)); |
492 | *size = new_size; | |
1da177e4 LT |
493 | } |
494 | ||
495 | kfree(old_argv); | |
496 | return argv; | |
497 | } | |
498 | ||
499 | /* | |
500 | * Destructively splits up the argument list to pass to ctr. | |
501 | */ | |
502 | int dm_split_args(int *argc, char ***argvp, char *input) | |
503 | { | |
504 | char *start, *end = input, *out, **argv = NULL; | |
505 | unsigned array_size = 0; | |
506 | ||
507 | *argc = 0; | |
814d6862 DT |
508 | |
509 | if (!input) { | |
510 | *argvp = NULL; | |
511 | return 0; | |
512 | } | |
513 | ||
1da177e4 LT |
514 | argv = realloc_argv(&array_size, argv); |
515 | if (!argv) | |
516 | return -ENOMEM; | |
517 | ||
518 | while (1) { | |
1da177e4 | 519 | /* Skip whitespace */ |
e7d2860b | 520 | start = skip_spaces(end); |
1da177e4 LT |
521 | |
522 | if (!*start) | |
523 | break; /* success, we hit the end */ | |
524 | ||
525 | /* 'out' is used to remove any back-quotes */ | |
526 | end = out = start; | |
527 | while (*end) { | |
528 | /* Everything apart from '\0' can be quoted */ | |
529 | if (*end == '\\' && *(end + 1)) { | |
530 | *out++ = *(end + 1); | |
531 | end += 2; | |
532 | continue; | |
533 | } | |
534 | ||
535 | if (isspace(*end)) | |
536 | break; /* end of token */ | |
537 | ||
538 | *out++ = *end++; | |
539 | } | |
540 | ||
541 | /* have we already filled the array ? */ | |
542 | if ((*argc + 1) > array_size) { | |
543 | argv = realloc_argv(&array_size, argv); | |
544 | if (!argv) | |
545 | return -ENOMEM; | |
546 | } | |
547 | ||
548 | /* we know this is whitespace */ | |
549 | if (*end) | |
550 | end++; | |
551 | ||
552 | /* terminate the string and put it in the array */ | |
553 | *out = '\0'; | |
554 | argv[*argc] = start; | |
555 | (*argc)++; | |
556 | } | |
557 | ||
558 | *argvp = argv; | |
559 | return 0; | |
560 | } | |
561 | ||
be6d4305 MS |
562 | /* |
563 | * Impose necessary and sufficient conditions on a devices's table such | |
564 | * that any incoming bio which respects its logical_block_size can be | |
565 | * processed successfully. If it falls across the boundary between | |
566 | * two or more targets, the size of each piece it gets split into must | |
567 | * be compatible with the logical_block_size of the target processing it. | |
568 | */ | |
754c5fc7 MS |
569 | static int validate_hardware_logical_block_alignment(struct dm_table *table, |
570 | struct queue_limits *limits) | |
be6d4305 MS |
571 | { |
572 | /* | |
573 | * This function uses arithmetic modulo the logical_block_size | |
574 | * (in units of 512-byte sectors). | |
575 | */ | |
576 | unsigned short device_logical_block_size_sects = | |
754c5fc7 | 577 | limits->logical_block_size >> SECTOR_SHIFT; |
be6d4305 MS |
578 | |
579 | /* | |
580 | * Offset of the start of the next table entry, mod logical_block_size. | |
581 | */ | |
582 | unsigned short next_target_start = 0; | |
583 | ||
584 | /* | |
585 | * Given an aligned bio that extends beyond the end of a | |
586 | * target, how many sectors must the next target handle? | |
587 | */ | |
588 | unsigned short remaining = 0; | |
589 | ||
3f649ab7 | 590 | struct dm_target *ti; |
754c5fc7 | 591 | struct queue_limits ti_limits; |
3c120169 | 592 | unsigned i; |
be6d4305 MS |
593 | |
594 | /* | |
595 | * Check each entry in the table in turn. | |
596 | */ | |
3c120169 MP |
597 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
598 | ti = dm_table_get_target(table, i); | |
be6d4305 | 599 | |
b1bd055d | 600 | blk_set_stacking_limits(&ti_limits); |
754c5fc7 MS |
601 | |
602 | /* combine all target devices' limits */ | |
603 | if (ti->type->iterate_devices) | |
604 | ti->type->iterate_devices(ti, dm_set_device_limits, | |
605 | &ti_limits); | |
606 | ||
be6d4305 MS |
607 | /* |
608 | * If the remaining sectors fall entirely within this | |
609 | * table entry are they compatible with its logical_block_size? | |
610 | */ | |
611 | if (remaining < ti->len && | |
754c5fc7 | 612 | remaining & ((ti_limits.logical_block_size >> |
be6d4305 MS |
613 | SECTOR_SHIFT) - 1)) |
614 | break; /* Error */ | |
615 | ||
616 | next_target_start = | |
617 | (unsigned short) ((next_target_start + ti->len) & | |
618 | (device_logical_block_size_sects - 1)); | |
619 | remaining = next_target_start ? | |
620 | device_logical_block_size_sects - next_target_start : 0; | |
621 | } | |
622 | ||
623 | if (remaining) { | |
624 | DMWARN("%s: table line %u (start sect %llu len %llu) " | |
a963a956 | 625 | "not aligned to h/w logical block size %u", |
be6d4305 MS |
626 | dm_device_name(table->md), i, |
627 | (unsigned long long) ti->begin, | |
628 | (unsigned long long) ti->len, | |
754c5fc7 | 629 | limits->logical_block_size); |
be6d4305 MS |
630 | return -EINVAL; |
631 | } | |
632 | ||
633 | return 0; | |
634 | } | |
635 | ||
1da177e4 LT |
636 | int dm_table_add_target(struct dm_table *t, const char *type, |
637 | sector_t start, sector_t len, char *params) | |
638 | { | |
639 | int r = -EINVAL, argc; | |
640 | char **argv; | |
641 | struct dm_target *tgt; | |
642 | ||
3791e2fc AK |
643 | if (t->singleton) { |
644 | DMERR("%s: target type %s must appear alone in table", | |
645 | dm_device_name(t->md), t->targets->type->name); | |
646 | return -EINVAL; | |
647 | } | |
648 | ||
57a2f238 | 649 | BUG_ON(t->num_targets >= t->num_allocated); |
1da177e4 LT |
650 | |
651 | tgt = t->targets + t->num_targets; | |
652 | memset(tgt, 0, sizeof(*tgt)); | |
653 | ||
654 | if (!len) { | |
72d94861 | 655 | DMERR("%s: zero-length target", dm_device_name(t->md)); |
1da177e4 LT |
656 | return -EINVAL; |
657 | } | |
658 | ||
659 | tgt->type = dm_get_target_type(type); | |
660 | if (!tgt->type) { | |
dafa724b | 661 | DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); |
1da177e4 LT |
662 | return -EINVAL; |
663 | } | |
664 | ||
3791e2fc AK |
665 | if (dm_target_needs_singleton(tgt->type)) { |
666 | if (t->num_targets) { | |
dafa724b | 667 | tgt->error = "singleton target type must appear alone in table"; |
668 | goto bad; | |
3791e2fc | 669 | } |
e83068a5 | 670 | t->singleton = true; |
3791e2fc AK |
671 | } |
672 | ||
cc6cbe14 | 673 | if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { |
dafa724b | 674 | tgt->error = "target type may not be included in a read-only table"; |
675 | goto bad; | |
cc6cbe14 AK |
676 | } |
677 | ||
36a0456f AK |
678 | if (t->immutable_target_type) { |
679 | if (t->immutable_target_type != tgt->type) { | |
dafa724b | 680 | tgt->error = "immutable target type cannot be mixed with other target types"; |
681 | goto bad; | |
36a0456f AK |
682 | } |
683 | } else if (dm_target_is_immutable(tgt->type)) { | |
684 | if (t->num_targets) { | |
dafa724b | 685 | tgt->error = "immutable target type cannot be mixed with other target types"; |
686 | goto bad; | |
36a0456f AK |
687 | } |
688 | t->immutable_target_type = tgt->type; | |
689 | } | |
690 | ||
9b4b5a79 MB |
691 | if (dm_target_has_integrity(tgt->type)) |
692 | t->integrity_added = 1; | |
693 | ||
1da177e4 LT |
694 | tgt->table = t; |
695 | tgt->begin = start; | |
696 | tgt->len = len; | |
697 | tgt->error = "Unknown error"; | |
698 | ||
699 | /* | |
700 | * Does this target adjoin the previous one ? | |
701 | */ | |
702 | if (!adjoin(t, tgt)) { | |
703 | tgt->error = "Gap in table"; | |
1da177e4 LT |
704 | goto bad; |
705 | } | |
706 | ||
707 | r = dm_split_args(&argc, &argv, params); | |
708 | if (r) { | |
709 | tgt->error = "couldn't split parameters (insufficient memory)"; | |
710 | goto bad; | |
711 | } | |
712 | ||
713 | r = tgt->type->ctr(tgt, argc, argv); | |
714 | kfree(argv); | |
715 | if (r) | |
716 | goto bad; | |
717 | ||
718 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | |
719 | ||
55a62eef AK |
720 | if (!tgt->num_discard_bios && tgt->discards_supported) |
721 | DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", | |
936688d7 | 722 | dm_device_name(t->md), type); |
5ae89a87 | 723 | |
1da177e4 LT |
724 | return 0; |
725 | ||
726 | bad: | |
72d94861 | 727 | DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); |
1da177e4 LT |
728 | dm_put_target_type(tgt->type); |
729 | return r; | |
730 | } | |
731 | ||
498f0103 MS |
732 | /* |
733 | * Target argument parsing helpers. | |
734 | */ | |
5916a22b EB |
735 | static int validate_next_arg(const struct dm_arg *arg, |
736 | struct dm_arg_set *arg_set, | |
498f0103 MS |
737 | unsigned *value, char **error, unsigned grouped) |
738 | { | |
739 | const char *arg_str = dm_shift_arg(arg_set); | |
31998ef1 | 740 | char dummy; |
498f0103 MS |
741 | |
742 | if (!arg_str || | |
31998ef1 | 743 | (sscanf(arg_str, "%u%c", value, &dummy) != 1) || |
498f0103 MS |
744 | (*value < arg->min) || |
745 | (*value > arg->max) || | |
746 | (grouped && arg_set->argc < *value)) { | |
747 | *error = arg->error; | |
748 | return -EINVAL; | |
749 | } | |
750 | ||
751 | return 0; | |
752 | } | |
753 | ||
5916a22b | 754 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
498f0103 MS |
755 | unsigned *value, char **error) |
756 | { | |
757 | return validate_next_arg(arg, arg_set, value, error, 0); | |
758 | } | |
759 | EXPORT_SYMBOL(dm_read_arg); | |
760 | ||
5916a22b | 761 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
498f0103 MS |
762 | unsigned *value, char **error) |
763 | { | |
764 | return validate_next_arg(arg, arg_set, value, error, 1); | |
765 | } | |
766 | EXPORT_SYMBOL(dm_read_arg_group); | |
767 | ||
768 | const char *dm_shift_arg(struct dm_arg_set *as) | |
769 | { | |
770 | char *r; | |
771 | ||
772 | if (as->argc) { | |
773 | as->argc--; | |
774 | r = *as->argv; | |
775 | as->argv++; | |
776 | return r; | |
777 | } | |
778 | ||
779 | return NULL; | |
780 | } | |
781 | EXPORT_SYMBOL(dm_shift_arg); | |
782 | ||
783 | void dm_consume_args(struct dm_arg_set *as, unsigned num_args) | |
784 | { | |
785 | BUG_ON(as->argc < num_args); | |
786 | as->argc -= num_args; | |
787 | as->argv += num_args; | |
788 | } | |
789 | EXPORT_SYMBOL(dm_consume_args); | |
790 | ||
7e0d574f | 791 | static bool __table_type_bio_based(enum dm_queue_mode table_type) |
545ed20e TK |
792 | { |
793 | return (table_type == DM_TYPE_BIO_BASED || | |
9c37de29 | 794 | table_type == DM_TYPE_DAX_BIO_BASED); |
545ed20e TK |
795 | } |
796 | ||
7e0d574f | 797 | static bool __table_type_request_based(enum dm_queue_mode table_type) |
15b94a69 | 798 | { |
953923c0 | 799 | return table_type == DM_TYPE_REQUEST_BASED; |
15b94a69 JN |
800 | } |
801 | ||
7e0d574f | 802 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) |
e83068a5 MS |
803 | { |
804 | t->type = type; | |
805 | } | |
806 | EXPORT_SYMBOL_GPL(dm_table_set_type); | |
807 | ||
7bf7eac8 | 808 | /* validate the dax capability of the target device span */ |
5b0fab50 | 809 | int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, |
9c50a98f | 810 | sector_t start, sector_t len, void *data) |
545ed20e | 811 | { |
673a0658 | 812 | int blocksize = *(int *) data; |
7bf7eac8 | 813 | |
673a0658 | 814 | return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); |
545ed20e TK |
815 | } |
816 | ||
2e9ee095 | 817 | /* Check devices support synchronous DAX */ |
5b0fab50 JX |
818 | static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, |
819 | sector_t start, sector_t len, void *data) | |
2e9ee095 | 820 | { |
5b0fab50 | 821 | return !dev->dax_dev || !dax_synchronous(dev->dax_dev); |
2e9ee095 PG |
822 | } |
823 | ||
824 | bool dm_table_supports_dax(struct dm_table *t, | |
9c50a98f | 825 | iterate_devices_callout_fn iterate_fn, int *blocksize) |
545ed20e TK |
826 | { |
827 | struct dm_target *ti; | |
3c120169 | 828 | unsigned i; |
545ed20e TK |
829 | |
830 | /* Ensure that all targets support DAX. */ | |
3c120169 MP |
831 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
832 | ti = dm_table_get_target(t, i); | |
545ed20e TK |
833 | |
834 | if (!ti->type->direct_access) | |
835 | return false; | |
836 | ||
837 | if (!ti->type->iterate_devices || | |
5b0fab50 | 838 | ti->type->iterate_devices(ti, iterate_fn, blocksize)) |
545ed20e TK |
839 | return false; |
840 | } | |
841 | ||
842 | return true; | |
843 | } | |
844 | ||
6ba01df7 MS |
845 | static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, |
846 | sector_t start, sector_t len, void *data) | |
eaa160ed | 847 | { |
6ba01df7 MS |
848 | struct block_device *bdev = dev->bdev; |
849 | struct request_queue *q = bdev_get_queue(bdev); | |
eaa160ed | 850 | |
6ba01df7 | 851 | /* request-based cannot stack on partitions! */ |
fa01b1e9 | 852 | if (bdev_is_partition(bdev)) |
6ba01df7 | 853 | return false; |
eaa160ed | 854 | |
344e9ffc | 855 | return queue_is_mq(q); |
eaa160ed MS |
856 | } |
857 | ||
e83068a5 | 858 | static int dm_table_determine_type(struct dm_table *t) |
e6ee8c0b KU |
859 | { |
860 | unsigned i; | |
169e2cc2 | 861 | unsigned bio_based = 0, request_based = 0, hybrid = 0; |
e6ee8c0b | 862 | struct dm_target *tgt; |
e83068a5 | 863 | struct list_head *devices = dm_table_get_devices(t); |
7e0d574f | 864 | enum dm_queue_mode live_md_type = dm_get_md_type(t->md); |
2e9ee095 | 865 | int page_size = PAGE_SIZE; |
e6ee8c0b | 866 | |
e83068a5 MS |
867 | if (t->type != DM_TYPE_NONE) { |
868 | /* target already set the table's type */ | |
c934edad MS |
869 | if (t->type == DM_TYPE_BIO_BASED) { |
870 | /* possibly upgrade to a variant of bio-based */ | |
871 | goto verify_bio_based; | |
22c11858 | 872 | } |
545ed20e | 873 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); |
e83068a5 MS |
874 | goto verify_rq_based; |
875 | } | |
876 | ||
e6ee8c0b KU |
877 | for (i = 0; i < t->num_targets; i++) { |
878 | tgt = t->targets + i; | |
169e2cc2 MS |
879 | if (dm_target_hybrid(tgt)) |
880 | hybrid = 1; | |
881 | else if (dm_target_request_based(tgt)) | |
e6ee8c0b KU |
882 | request_based = 1; |
883 | else | |
884 | bio_based = 1; | |
885 | ||
886 | if (bio_based && request_based) { | |
22c11858 MS |
887 | DMERR("Inconsistent table: different target types" |
888 | " can't be mixed up"); | |
e6ee8c0b KU |
889 | return -EINVAL; |
890 | } | |
891 | } | |
892 | ||
169e2cc2 MS |
893 | if (hybrid && !bio_based && !request_based) { |
894 | /* | |
895 | * The targets can work either way. | |
896 | * Determine the type from the live device. | |
897 | * Default to bio-based if device is new. | |
898 | */ | |
15b94a69 | 899 | if (__table_type_request_based(live_md_type)) |
169e2cc2 MS |
900 | request_based = 1; |
901 | else | |
902 | bio_based = 1; | |
903 | } | |
904 | ||
e6ee8c0b | 905 | if (bio_based) { |
c934edad | 906 | verify_bio_based: |
e6ee8c0b KU |
907 | /* We must use this table as bio-based */ |
908 | t->type = DM_TYPE_BIO_BASED; | |
5b0fab50 | 909 | if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || |
22c11858 | 910 | (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { |
545ed20e | 911 | t->type = DM_TYPE_DAX_BIO_BASED; |
22c11858 | 912 | } |
e6ee8c0b KU |
913 | return 0; |
914 | } | |
915 | ||
916 | BUG_ON(!request_based); /* No targets in this table */ | |
917 | ||
e83068a5 MS |
918 | t->type = DM_TYPE_REQUEST_BASED; |
919 | ||
920 | verify_rq_based: | |
65803c20 MS |
921 | /* |
922 | * Request-based dm supports only tables that have a single target now. | |
923 | * To support multiple targets, request splitting support is needed, | |
924 | * and that needs lots of changes in the block-layer. | |
925 | * (e.g. request completion process for partial completion.) | |
926 | */ | |
927 | if (t->num_targets > 1) { | |
9c37de29 | 928 | DMERR("request-based DM doesn't support multiple targets"); |
65803c20 MS |
929 | return -EINVAL; |
930 | } | |
931 | ||
6936c12c MS |
932 | if (list_empty(devices)) { |
933 | int srcu_idx; | |
934 | struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); | |
935 | ||
6a23e05c JA |
936 | /* inherit live table's type */ |
937 | if (live_table) | |
6936c12c | 938 | t->type = live_table->type; |
6936c12c MS |
939 | dm_put_live_table(t->md, srcu_idx); |
940 | return 0; | |
941 | } | |
942 | ||
22c11858 MS |
943 | tgt = dm_table_get_immutable_target(t); |
944 | if (!tgt) { | |
945 | DMERR("table load rejected: immutable target is required"); | |
946 | return -EINVAL; | |
947 | } else if (tgt->max_io_len) { | |
948 | DMERR("table load rejected: immutable target that splits IO is not supported"); | |
949 | return -EINVAL; | |
950 | } | |
951 | ||
e6ee8c0b | 952 | /* Non-request-stackable devices can't be used for request-based dm */ |
eaa160ed | 953 | if (!tgt->type->iterate_devices || |
6ba01df7 | 954 | !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) { |
eaa160ed MS |
955 | DMERR("table load rejected: including non-request-stackable devices"); |
956 | return -EINVAL; | |
e5863d9a | 957 | } |
301fc3f5 | 958 | |
e6ee8c0b KU |
959 | return 0; |
960 | } | |
961 | ||
7e0d574f | 962 | enum dm_queue_mode dm_table_get_type(struct dm_table *t) |
e6ee8c0b KU |
963 | { |
964 | return t->type; | |
965 | } | |
966 | ||
36a0456f AK |
967 | struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) |
968 | { | |
969 | return t->immutable_target_type; | |
970 | } | |
971 | ||
16f12266 MS |
972 | struct dm_target *dm_table_get_immutable_target(struct dm_table *t) |
973 | { | |
974 | /* Immutable target is implicitly a singleton */ | |
975 | if (t->num_targets > 1 || | |
976 | !dm_target_is_immutable(t->targets[0].type)) | |
977 | return NULL; | |
978 | ||
979 | return t->targets; | |
980 | } | |
981 | ||
f083b09b MS |
982 | struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) |
983 | { | |
3c120169 MP |
984 | struct dm_target *ti; |
985 | unsigned i; | |
f083b09b | 986 | |
3c120169 MP |
987 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
988 | ti = dm_table_get_target(t, i); | |
f083b09b MS |
989 | if (dm_target_is_wildcard(ti->type)) |
990 | return ti; | |
991 | } | |
992 | ||
993 | return NULL; | |
994 | } | |
995 | ||
545ed20e TK |
996 | bool dm_table_bio_based(struct dm_table *t) |
997 | { | |
998 | return __table_type_bio_based(dm_table_get_type(t)); | |
999 | } | |
1000 | ||
e6ee8c0b KU |
1001 | bool dm_table_request_based(struct dm_table *t) |
1002 | { | |
15b94a69 | 1003 | return __table_type_request_based(dm_table_get_type(t)); |
e5863d9a MS |
1004 | } |
1005 | ||
17e149b8 | 1006 | static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) |
e6ee8c0b | 1007 | { |
7e0d574f | 1008 | enum dm_queue_mode type = dm_table_get_type(t); |
30187e1d | 1009 | unsigned per_io_data_size = 0; |
0776aa0e MS |
1010 | unsigned min_pool_size = 0; |
1011 | struct dm_target *ti; | |
c0820cf5 | 1012 | unsigned i; |
e6ee8c0b | 1013 | |
78d8e58a | 1014 | if (unlikely(type == DM_TYPE_NONE)) { |
e6ee8c0b KU |
1015 | DMWARN("no table type is set, can't allocate mempools"); |
1016 | return -EINVAL; | |
1017 | } | |
1018 | ||
545ed20e | 1019 | if (__table_type_bio_based(type)) |
78d8e58a | 1020 | for (i = 0; i < t->num_targets; i++) { |
0776aa0e MS |
1021 | ti = t->targets + i; |
1022 | per_io_data_size = max(per_io_data_size, ti->per_io_data_size); | |
1023 | min_pool_size = max(min_pool_size, ti->num_flush_bios); | |
78d8e58a MS |
1024 | } |
1025 | ||
0776aa0e MS |
1026 | t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, |
1027 | per_io_data_size, min_pool_size); | |
4e6e36c3 MS |
1028 | if (!t->mempools) |
1029 | return -ENOMEM; | |
e6ee8c0b KU |
1030 | |
1031 | return 0; | |
1032 | } | |
1033 | ||
1034 | void dm_table_free_md_mempools(struct dm_table *t) | |
1035 | { | |
1036 | dm_free_md_mempools(t->mempools); | |
1037 | t->mempools = NULL; | |
1038 | } | |
1039 | ||
1040 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) | |
1041 | { | |
1042 | return t->mempools; | |
1043 | } | |
1044 | ||
1da177e4 LT |
1045 | static int setup_indexes(struct dm_table *t) |
1046 | { | |
1047 | int i; | |
1048 | unsigned int total = 0; | |
1049 | sector_t *indexes; | |
1050 | ||
1051 | /* allocate the space for *all* the indexes */ | |
1052 | for (i = t->depth - 2; i >= 0; i--) { | |
1053 | t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); | |
1054 | total += t->counts[i]; | |
1055 | } | |
1056 | ||
7a35693a | 1057 | indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL); |
1da177e4 LT |
1058 | if (!indexes) |
1059 | return -ENOMEM; | |
1060 | ||
1061 | /* set up internal nodes, bottom-up */ | |
82d601dc | 1062 | for (i = t->depth - 2; i >= 0; i--) { |
1da177e4 LT |
1063 | t->index[i] = indexes; |
1064 | indexes += (KEYS_PER_NODE * t->counts[i]); | |
1065 | setup_btree_index(i, t); | |
1066 | } | |
1067 | ||
1068 | return 0; | |
1069 | } | |
1070 | ||
1071 | /* | |
1072 | * Builds the btree to index the map. | |
1073 | */ | |
26803b9f | 1074 | static int dm_table_build_index(struct dm_table *t) |
1da177e4 LT |
1075 | { |
1076 | int r = 0; | |
1077 | unsigned int leaf_nodes; | |
1078 | ||
1da177e4 LT |
1079 | /* how many indexes will the btree have ? */ |
1080 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | |
1081 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | |
1082 | ||
1083 | /* leaf layer has already been set up */ | |
1084 | t->counts[t->depth - 1] = leaf_nodes; | |
1085 | t->index[t->depth - 1] = t->highs; | |
1086 | ||
1087 | if (t->depth >= 2) | |
1088 | r = setup_indexes(t); | |
1089 | ||
1090 | return r; | |
1091 | } | |
1092 | ||
25520d55 MP |
1093 | static bool integrity_profile_exists(struct gendisk *disk) |
1094 | { | |
1095 | return !!blk_get_integrity(disk); | |
1096 | } | |
1097 | ||
a63a5cf8 MS |
1098 | /* |
1099 | * Get a disk whose integrity profile reflects the table's profile. | |
a63a5cf8 MS |
1100 | * Returns NULL if integrity support was inconsistent or unavailable. |
1101 | */ | |
25520d55 | 1102 | static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) |
a63a5cf8 MS |
1103 | { |
1104 | struct list_head *devices = dm_table_get_devices(t); | |
1105 | struct dm_dev_internal *dd = NULL; | |
1106 | struct gendisk *prev_disk = NULL, *template_disk = NULL; | |
e2460f2a MP |
1107 | unsigned i; |
1108 | ||
1109 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1110 | struct dm_target *ti = dm_table_get_target(t, i); | |
1111 | if (!dm_target_passes_integrity(ti->type)) | |
1112 | goto no_integrity; | |
1113 | } | |
a63a5cf8 MS |
1114 | |
1115 | list_for_each_entry(dd, devices, list) { | |
86f1152b | 1116 | template_disk = dd->dm_dev->bdev->bd_disk; |
25520d55 | 1117 | if (!integrity_profile_exists(template_disk)) |
a63a5cf8 | 1118 | goto no_integrity; |
a63a5cf8 MS |
1119 | else if (prev_disk && |
1120 | blk_integrity_compare(prev_disk, template_disk) < 0) | |
1121 | goto no_integrity; | |
1122 | prev_disk = template_disk; | |
1123 | } | |
1124 | ||
1125 | return template_disk; | |
1126 | ||
1127 | no_integrity: | |
1128 | if (prev_disk) | |
1129 | DMWARN("%s: integrity not set: %s and %s profile mismatch", | |
1130 | dm_device_name(t->md), | |
1131 | prev_disk->disk_name, | |
1132 | template_disk->disk_name); | |
1133 | return NULL; | |
1134 | } | |
1135 | ||
26803b9f | 1136 | /* |
25520d55 MP |
1137 | * Register the mapped device for blk_integrity support if the |
1138 | * underlying devices have an integrity profile. But all devices may | |
1139 | * not have matching profiles (checking all devices isn't reliable | |
a63a5cf8 | 1140 | * during table load because this table may use other DM device(s) which |
25520d55 MP |
1141 | * must be resumed before they will have an initialized integity |
1142 | * profile). Consequently, stacked DM devices force a 2 stage integrity | |
1143 | * profile validation: First pass during table load, final pass during | |
1144 | * resume. | |
26803b9f | 1145 | */ |
25520d55 | 1146 | static int dm_table_register_integrity(struct dm_table *t) |
26803b9f | 1147 | { |
25520d55 | 1148 | struct mapped_device *md = t->md; |
a63a5cf8 | 1149 | struct gendisk *template_disk = NULL; |
26803b9f | 1150 | |
9b4b5a79 MB |
1151 | /* If target handles integrity itself do not register it here. */ |
1152 | if (t->integrity_added) | |
1153 | return 0; | |
1154 | ||
25520d55 | 1155 | template_disk = dm_table_get_integrity_disk(t); |
a63a5cf8 MS |
1156 | if (!template_disk) |
1157 | return 0; | |
26803b9f | 1158 | |
25520d55 | 1159 | if (!integrity_profile_exists(dm_disk(md))) { |
e83068a5 | 1160 | t->integrity_supported = true; |
25520d55 MP |
1161 | /* |
1162 | * Register integrity profile during table load; we can do | |
1163 | * this because the final profile must match during resume. | |
1164 | */ | |
1165 | blk_integrity_register(dm_disk(md), | |
1166 | blk_get_integrity(template_disk)); | |
1167 | return 0; | |
a63a5cf8 MS |
1168 | } |
1169 | ||
1170 | /* | |
25520d55 | 1171 | * If DM device already has an initialized integrity |
a63a5cf8 MS |
1172 | * profile the new profile should not conflict. |
1173 | */ | |
25520d55 | 1174 | if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { |
a63a5cf8 MS |
1175 | DMWARN("%s: conflict with existing integrity profile: " |
1176 | "%s profile mismatch", | |
1177 | dm_device_name(t->md), | |
1178 | template_disk->disk_name); | |
1179 | return 1; | |
1180 | } | |
1181 | ||
25520d55 | 1182 | /* Preserve existing integrity profile */ |
e83068a5 | 1183 | t->integrity_supported = true; |
26803b9f WD |
1184 | return 0; |
1185 | } | |
1186 | ||
aa6ce87a ST |
1187 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
1188 | ||
1189 | struct dm_keyslot_manager { | |
1190 | struct blk_keyslot_manager ksm; | |
1191 | struct mapped_device *md; | |
1192 | }; | |
1193 | ||
9355a9eb ST |
1194 | struct dm_keyslot_evict_args { |
1195 | const struct blk_crypto_key *key; | |
1196 | int err; | |
1197 | }; | |
1198 | ||
1199 | static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, | |
1200 | sector_t start, sector_t len, void *data) | |
1201 | { | |
1202 | struct dm_keyslot_evict_args *args = data; | |
1203 | int err; | |
1204 | ||
1205 | err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key); | |
1206 | if (!args->err) | |
1207 | args->err = err; | |
1208 | /* Always try to evict the key from all devices. */ | |
1209 | return 0; | |
1210 | } | |
1211 | ||
1212 | /* | |
1213 | * When an inline encryption key is evicted from a device-mapper device, evict | |
1214 | * it from all the underlying devices. | |
1215 | */ | |
1216 | static int dm_keyslot_evict(struct blk_keyslot_manager *ksm, | |
1217 | const struct blk_crypto_key *key, unsigned int slot) | |
1218 | { | |
1219 | struct dm_keyslot_manager *dksm = container_of(ksm, | |
1220 | struct dm_keyslot_manager, | |
1221 | ksm); | |
1222 | struct mapped_device *md = dksm->md; | |
1223 | struct dm_keyslot_evict_args args = { key }; | |
1224 | struct dm_table *t; | |
1225 | int srcu_idx; | |
1226 | int i; | |
1227 | struct dm_target *ti; | |
1228 | ||
1229 | t = dm_get_live_table(md, &srcu_idx); | |
1230 | if (!t) | |
1231 | return 0; | |
1232 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1233 | ti = dm_table_get_target(t, i); | |
1234 | if (!ti->type->iterate_devices) | |
1235 | continue; | |
1236 | ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args); | |
1237 | } | |
1238 | dm_put_live_table(md, srcu_idx); | |
1239 | return args.err; | |
1240 | } | |
1241 | ||
ccde2cbf | 1242 | static const struct blk_ksm_ll_ops dm_ksm_ll_ops = { |
9355a9eb ST |
1243 | .keyslot_evict = dm_keyslot_evict, |
1244 | }; | |
1245 | ||
aa6ce87a ST |
1246 | static int device_intersect_crypto_modes(struct dm_target *ti, |
1247 | struct dm_dev *dev, sector_t start, | |
1248 | sector_t len, void *data) | |
1249 | { | |
1250 | struct blk_keyslot_manager *parent = data; | |
1251 | struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm; | |
1252 | ||
1253 | blk_ksm_intersect_modes(parent, child); | |
1254 | return 0; | |
1255 | } | |
1256 | ||
1257 | void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) | |
1258 | { | |
1259 | struct dm_keyslot_manager *dksm = container_of(ksm, | |
1260 | struct dm_keyslot_manager, | |
1261 | ksm); | |
1262 | ||
1263 | if (!ksm) | |
1264 | return; | |
1265 | ||
1266 | blk_ksm_destroy(ksm); | |
1267 | kfree(dksm); | |
1268 | } | |
1269 | ||
1270 | static void dm_table_destroy_keyslot_manager(struct dm_table *t) | |
1271 | { | |
1272 | dm_destroy_keyslot_manager(t->ksm); | |
1273 | t->ksm = NULL; | |
1274 | } | |
1275 | ||
1276 | /* | |
1277 | * Constructs and initializes t->ksm with a keyslot manager that | |
1278 | * represents the common set of crypto capabilities of the devices | |
1279 | * described by the dm_table. However, if the constructed keyslot | |
1280 | * manager does not support a superset of the crypto capabilities | |
1281 | * supported by the current keyslot manager of the mapped_device, | |
1282 | * it returns an error instead, since we don't support restricting | |
1283 | * crypto capabilities on table changes. Finally, if the constructed | |
1284 | * keyslot manager doesn't actually support any crypto modes at all, | |
1285 | * it just returns NULL. | |
1286 | */ | |
1287 | static int dm_table_construct_keyslot_manager(struct dm_table *t) | |
1288 | { | |
1289 | struct dm_keyslot_manager *dksm; | |
1290 | struct blk_keyslot_manager *ksm; | |
1291 | struct dm_target *ti; | |
1292 | unsigned int i; | |
1293 | bool ksm_is_empty = true; | |
1294 | ||
1295 | dksm = kmalloc(sizeof(*dksm), GFP_KERNEL); | |
1296 | if (!dksm) | |
1297 | return -ENOMEM; | |
1298 | dksm->md = t->md; | |
1299 | ||
1300 | ksm = &dksm->ksm; | |
1301 | blk_ksm_init_passthrough(ksm); | |
9355a9eb | 1302 | ksm->ksm_ll_ops = dm_ksm_ll_ops; |
aa6ce87a ST |
1303 | ksm->max_dun_bytes_supported = UINT_MAX; |
1304 | memset(ksm->crypto_modes_supported, 0xFF, | |
1305 | sizeof(ksm->crypto_modes_supported)); | |
1306 | ||
1307 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1308 | ti = dm_table_get_target(t, i); | |
1309 | ||
1310 | if (!dm_target_passes_crypto(ti->type)) { | |
1311 | blk_ksm_intersect_modes(ksm, NULL); | |
1312 | break; | |
1313 | } | |
1314 | if (!ti->type->iterate_devices) | |
1315 | continue; | |
1316 | ti->type->iterate_devices(ti, device_intersect_crypto_modes, | |
1317 | ksm); | |
1318 | } | |
1319 | ||
1320 | if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) { | |
1321 | DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); | |
1322 | dm_destroy_keyslot_manager(ksm); | |
1323 | return -EINVAL; | |
1324 | } | |
1325 | ||
1326 | /* | |
1327 | * If the new KSM doesn't actually support any crypto modes, we may as | |
1328 | * well represent it with a NULL ksm. | |
1329 | */ | |
1330 | ksm_is_empty = true; | |
1331 | for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) { | |
1332 | if (ksm->crypto_modes_supported[i]) { | |
1333 | ksm_is_empty = false; | |
1334 | break; | |
1335 | } | |
1336 | } | |
1337 | ||
1338 | if (ksm_is_empty) { | |
1339 | dm_destroy_keyslot_manager(ksm); | |
1340 | ksm = NULL; | |
1341 | } | |
1342 | ||
1343 | /* | |
1344 | * t->ksm is only set temporarily while the table is being set | |
1345 | * up, and it gets set to NULL after the capabilities have | |
1346 | * been transferred to the request_queue. | |
1347 | */ | |
1348 | t->ksm = ksm; | |
1349 | ||
1350 | return 0; | |
1351 | } | |
1352 | ||
1353 | static void dm_update_keyslot_manager(struct request_queue *q, | |
1354 | struct dm_table *t) | |
1355 | { | |
1356 | if (!t->ksm) | |
1357 | return; | |
1358 | ||
1359 | /* Make the ksm less restrictive */ | |
1360 | if (!q->ksm) { | |
1361 | blk_ksm_register(t->ksm, q); | |
1362 | } else { | |
1363 | blk_ksm_update_capabilities(q->ksm, t->ksm); | |
1364 | dm_destroy_keyslot_manager(t->ksm); | |
1365 | } | |
1366 | t->ksm = NULL; | |
1367 | } | |
1368 | ||
1369 | #else /* CONFIG_BLK_INLINE_ENCRYPTION */ | |
1370 | ||
1371 | static int dm_table_construct_keyslot_manager(struct dm_table *t) | |
1372 | { | |
1373 | return 0; | |
1374 | } | |
1375 | ||
1376 | void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) | |
1377 | { | |
1378 | } | |
1379 | ||
1380 | static void dm_table_destroy_keyslot_manager(struct dm_table *t) | |
1381 | { | |
1382 | } | |
1383 | ||
1384 | static void dm_update_keyslot_manager(struct request_queue *q, | |
1385 | struct dm_table *t) | |
1386 | { | |
1387 | } | |
1388 | ||
1389 | #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ | |
1390 | ||
26803b9f WD |
1391 | /* |
1392 | * Prepares the table for use by building the indices, | |
1393 | * setting the type, and allocating mempools. | |
1394 | */ | |
1395 | int dm_table_complete(struct dm_table *t) | |
1396 | { | |
1397 | int r; | |
1398 | ||
e83068a5 | 1399 | r = dm_table_determine_type(t); |
26803b9f | 1400 | if (r) { |
e83068a5 | 1401 | DMERR("unable to determine table type"); |
26803b9f WD |
1402 | return r; |
1403 | } | |
1404 | ||
1405 | r = dm_table_build_index(t); | |
1406 | if (r) { | |
1407 | DMERR("unable to build btrees"); | |
1408 | return r; | |
1409 | } | |
1410 | ||
25520d55 | 1411 | r = dm_table_register_integrity(t); |
26803b9f WD |
1412 | if (r) { |
1413 | DMERR("could not register integrity profile."); | |
1414 | return r; | |
1415 | } | |
1416 | ||
aa6ce87a ST |
1417 | r = dm_table_construct_keyslot_manager(t); |
1418 | if (r) { | |
1419 | DMERR("could not construct keyslot manager."); | |
1420 | return r; | |
1421 | } | |
1422 | ||
17e149b8 | 1423 | r = dm_table_alloc_md_mempools(t, t->md); |
26803b9f WD |
1424 | if (r) |
1425 | DMERR("unable to allocate mempools"); | |
1426 | ||
1427 | return r; | |
1428 | } | |
1429 | ||
48c9c27b | 1430 | static DEFINE_MUTEX(_event_lock); |
1da177e4 LT |
1431 | void dm_table_event_callback(struct dm_table *t, |
1432 | void (*fn)(void *), void *context) | |
1433 | { | |
48c9c27b | 1434 | mutex_lock(&_event_lock); |
1da177e4 LT |
1435 | t->event_fn = fn; |
1436 | t->event_context = context; | |
48c9c27b | 1437 | mutex_unlock(&_event_lock); |
1da177e4 LT |
1438 | } |
1439 | ||
1440 | void dm_table_event(struct dm_table *t) | |
1441 | { | |
48c9c27b | 1442 | mutex_lock(&_event_lock); |
1da177e4 LT |
1443 | if (t->event_fn) |
1444 | t->event_fn(t->event_context); | |
48c9c27b | 1445 | mutex_unlock(&_event_lock); |
1da177e4 | 1446 | } |
08649012 | 1447 | EXPORT_SYMBOL(dm_table_event); |
1da177e4 | 1448 | |
1cfd5d33 | 1449 | inline sector_t dm_table_get_size(struct dm_table *t) |
1da177e4 LT |
1450 | { |
1451 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; | |
1452 | } | |
08649012 | 1453 | EXPORT_SYMBOL(dm_table_get_size); |
1da177e4 LT |
1454 | |
1455 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | |
1456 | { | |
14353539 | 1457 | if (index >= t->num_targets) |
1da177e4 LT |
1458 | return NULL; |
1459 | ||
1460 | return t->targets + index; | |
1461 | } | |
1462 | ||
1463 | /* | |
1464 | * Search the btree for the correct target. | |
512875bd | 1465 | * |
123d87d5 | 1466 | * Caller should check returned pointer for NULL |
512875bd | 1467 | * to trap I/O beyond end of device. |
1da177e4 LT |
1468 | */ |
1469 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |
1470 | { | |
1471 | unsigned int l, n = 0, k = 0; | |
1472 | sector_t *node; | |
1473 | ||
1cfd5d33 | 1474 | if (unlikely(sector >= dm_table_get_size(t))) |
123d87d5 | 1475 | return NULL; |
1cfd5d33 | 1476 | |
1da177e4 LT |
1477 | for (l = 0; l < t->depth; l++) { |
1478 | n = get_child(n, k); | |
1479 | node = get_node(t, l, n); | |
1480 | ||
1481 | for (k = 0; k < KEYS_PER_NODE; k++) | |
1482 | if (node[k] >= sector) | |
1483 | break; | |
1484 | } | |
1485 | ||
1486 | return &t->targets[(KEYS_PER_NODE * n) + k]; | |
1487 | } | |
1488 | ||
a4c8dd9c JX |
1489 | /* |
1490 | * type->iterate_devices() should be called when the sanity check needs to | |
1491 | * iterate and check all underlying data devices. iterate_devices() will | |
1492 | * iterate all underlying data devices until it encounters a non-zero return | |
1493 | * code, returned by whether the input iterate_devices_callout_fn, or | |
1494 | * iterate_devices() itself internally. | |
1495 | * | |
1496 | * For some target type (e.g. dm-stripe), one call of iterate_devices() may | |
1497 | * iterate multiple underlying devices internally, in which case a non-zero | |
1498 | * return code returned by iterate_devices_callout_fn will stop the iteration | |
1499 | * in advance. | |
1500 | * | |
1501 | * Cases requiring _any_ underlying device supporting some kind of attribute, | |
1502 | * should use the iteration structure like dm_table_any_dev_attr(), or call | |
1503 | * it directly. @func should handle semantics of positive examples, e.g. | |
1504 | * capable of something. | |
1505 | * | |
1506 | * Cases requiring _all_ underlying devices supporting some kind of attribute, | |
1507 | * should use the iteration structure like dm_table_supports_nowait() or | |
1508 | * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that | |
1509 | * uses an @anti_func that handle semantics of counter examples, e.g. not | |
24f6b603 | 1510 | * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); |
a4c8dd9c JX |
1511 | */ |
1512 | static bool dm_table_any_dev_attr(struct dm_table *t, | |
24f6b603 | 1513 | iterate_devices_callout_fn func, void *data) |
a4c8dd9c JX |
1514 | { |
1515 | struct dm_target *ti; | |
1516 | unsigned int i; | |
1517 | ||
1518 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1519 | ti = dm_table_get_target(t, i); | |
1520 | ||
1521 | if (ti->type->iterate_devices && | |
24f6b603 | 1522 | ti->type->iterate_devices(ti, func, data)) |
a4c8dd9c JX |
1523 | return true; |
1524 | } | |
1525 | ||
1526 | return false; | |
1527 | } | |
1528 | ||
3ae70656 MS |
1529 | static int count_device(struct dm_target *ti, struct dm_dev *dev, |
1530 | sector_t start, sector_t len, void *data) | |
1531 | { | |
1532 | unsigned *num_devices = data; | |
1533 | ||
1534 | (*num_devices)++; | |
1535 | ||
1536 | return 0; | |
1537 | } | |
1538 | ||
1539 | /* | |
1540 | * Check whether a table has no data devices attached using each | |
1541 | * target's iterate_devices method. | |
1542 | * Returns false if the result is unknown because a target doesn't | |
1543 | * support iterate_devices. | |
1544 | */ | |
1545 | bool dm_table_has_no_data_devices(struct dm_table *table) | |
1546 | { | |
3c120169 MP |
1547 | struct dm_target *ti; |
1548 | unsigned i, num_devices; | |
3ae70656 | 1549 | |
3c120169 MP |
1550 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
1551 | ti = dm_table_get_target(table, i); | |
3ae70656 MS |
1552 | |
1553 | if (!ti->type->iterate_devices) | |
1554 | return false; | |
1555 | ||
3c120169 | 1556 | num_devices = 0; |
3ae70656 MS |
1557 | ti->type->iterate_devices(ti, count_device, &num_devices); |
1558 | if (num_devices) | |
1559 | return false; | |
1560 | } | |
1561 | ||
1562 | return true; | |
1563 | } | |
1564 | ||
24f6b603 JX |
1565 | static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, |
1566 | sector_t start, sector_t len, void *data) | |
dd88d313 DLM |
1567 | { |
1568 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1569 | enum blk_zoned_model *zoned_model = data; | |
1570 | ||
cccb493c | 1571 | return blk_queue_zoned_model(q) != *zoned_model; |
dd88d313 DLM |
1572 | } |
1573 | ||
2d669ceb SK |
1574 | /* |
1575 | * Check the device zoned model based on the target feature flag. If the target | |
1576 | * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are | |
1577 | * also accepted but all devices must have the same zoned model. If the target | |
1578 | * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any | |
1579 | * zoned model with all zoned devices having the same zone size. | |
1580 | */ | |
dd88d313 DLM |
1581 | static bool dm_table_supports_zoned_model(struct dm_table *t, |
1582 | enum blk_zoned_model zoned_model) | |
1583 | { | |
1584 | struct dm_target *ti; | |
1585 | unsigned i; | |
1586 | ||
1587 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1588 | ti = dm_table_get_target(t, i); | |
1589 | ||
2d669ceb SK |
1590 | if (dm_target_supports_zoned_hm(ti->type)) { |
1591 | if (!ti->type->iterate_devices || | |
1592 | ti->type->iterate_devices(ti, device_not_zoned_model, | |
1593 | &zoned_model)) | |
1594 | return false; | |
1595 | } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { | |
1596 | if (zoned_model == BLK_ZONED_HM) | |
1597 | return false; | |
1598 | } | |
dd88d313 DLM |
1599 | } |
1600 | ||
1601 | return true; | |
1602 | } | |
1603 | ||
24f6b603 JX |
1604 | static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, |
1605 | sector_t start, sector_t len, void *data) | |
dd88d313 DLM |
1606 | { |
1607 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1608 | unsigned int *zone_sectors = data; | |
1609 | ||
2d669ceb SK |
1610 | if (!blk_queue_is_zoned(q)) |
1611 | return 0; | |
1612 | ||
cccb493c | 1613 | return blk_queue_zone_sectors(q) != *zone_sectors; |
dd88d313 DLM |
1614 | } |
1615 | ||
2d669ceb SK |
1616 | /* |
1617 | * Check consistency of zoned model and zone sectors across all targets. For | |
1618 | * zone sectors, if the destination device is a zoned block device, it shall | |
1619 | * have the specified zone_sectors. | |
1620 | */ | |
dd88d313 DLM |
1621 | static int validate_hardware_zoned_model(struct dm_table *table, |
1622 | enum blk_zoned_model zoned_model, | |
1623 | unsigned int zone_sectors) | |
1624 | { | |
1625 | if (zoned_model == BLK_ZONED_NONE) | |
1626 | return 0; | |
1627 | ||
1628 | if (!dm_table_supports_zoned_model(table, zoned_model)) { | |
1629 | DMERR("%s: zoned model is not consistent across all devices", | |
1630 | dm_device_name(table->md)); | |
1631 | return -EINVAL; | |
1632 | } | |
1633 | ||
1634 | /* Check zone size validity and compatibility */ | |
1635 | if (!zone_sectors || !is_power_of_2(zone_sectors)) | |
1636 | return -EINVAL; | |
1637 | ||
24f6b603 | 1638 | if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { |
2d669ceb | 1639 | DMERR("%s: zone sectors is not consistent across all zoned devices", |
dd88d313 DLM |
1640 | dm_device_name(table->md)); |
1641 | return -EINVAL; | |
1642 | } | |
1643 | ||
1644 | return 0; | |
1645 | } | |
1646 | ||
754c5fc7 MS |
1647 | /* |
1648 | * Establish the new table's queue_limits and validate them. | |
1649 | */ | |
1650 | int dm_calculate_queue_limits(struct dm_table *table, | |
1651 | struct queue_limits *limits) | |
1652 | { | |
3c120169 | 1653 | struct dm_target *ti; |
754c5fc7 | 1654 | struct queue_limits ti_limits; |
3c120169 | 1655 | unsigned i; |
dd88d313 DLM |
1656 | enum blk_zoned_model zoned_model = BLK_ZONED_NONE; |
1657 | unsigned int zone_sectors = 0; | |
754c5fc7 | 1658 | |
b1bd055d | 1659 | blk_set_stacking_limits(limits); |
754c5fc7 | 1660 | |
3c120169 | 1661 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
b1bd055d | 1662 | blk_set_stacking_limits(&ti_limits); |
754c5fc7 | 1663 | |
3c120169 | 1664 | ti = dm_table_get_target(table, i); |
754c5fc7 MS |
1665 | |
1666 | if (!ti->type->iterate_devices) | |
1667 | goto combine_limits; | |
1668 | ||
1669 | /* | |
1670 | * Combine queue limits of all the devices this target uses. | |
1671 | */ | |
1672 | ti->type->iterate_devices(ti, dm_set_device_limits, | |
1673 | &ti_limits); | |
1674 | ||
dd88d313 DLM |
1675 | if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) { |
1676 | /* | |
1677 | * After stacking all limits, validate all devices | |
1678 | * in table support this zoned model and zone sectors. | |
1679 | */ | |
1680 | zoned_model = ti_limits.zoned; | |
1681 | zone_sectors = ti_limits.chunk_sectors; | |
1682 | } | |
1683 | ||
40bea431 MS |
1684 | /* Set I/O hints portion of queue limits */ |
1685 | if (ti->type->io_hints) | |
1686 | ti->type->io_hints(ti, &ti_limits); | |
1687 | ||
754c5fc7 MS |
1688 | /* |
1689 | * Check each device area is consistent with the target's | |
1690 | * overall queue limits. | |
1691 | */ | |
f6a1ed10 MP |
1692 | if (ti->type->iterate_devices(ti, device_area_is_invalid, |
1693 | &ti_limits)) | |
754c5fc7 MS |
1694 | return -EINVAL; |
1695 | ||
1696 | combine_limits: | |
1697 | /* | |
1698 | * Merge this target's queue limits into the overall limits | |
1699 | * for the table. | |
1700 | */ | |
1701 | if (blk_stack_limits(limits, &ti_limits, 0) < 0) | |
b27d7f16 | 1702 | DMWARN("%s: adding target device " |
754c5fc7 | 1703 | "(start sect %llu len %llu) " |
b27d7f16 | 1704 | "caused an alignment inconsistency", |
754c5fc7 MS |
1705 | dm_device_name(table->md), |
1706 | (unsigned long long) ti->begin, | |
1707 | (unsigned long long) ti->len); | |
1708 | } | |
1709 | ||
dd88d313 DLM |
1710 | /* |
1711 | * Verify that the zoned model and zone sectors, as determined before | |
1712 | * any .io_hints override, are the same across all devices in the table. | |
1713 | * - this is especially relevant if .io_hints is emulating a disk-managed | |
1714 | * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices. | |
1715 | * BUT... | |
1716 | */ | |
1717 | if (limits->zoned != BLK_ZONED_NONE) { | |
1718 | /* | |
1719 | * ...IF the above limits stacking determined a zoned model | |
1720 | * validate that all of the table's devices conform to it. | |
1721 | */ | |
1722 | zoned_model = limits->zoned; | |
1723 | zone_sectors = limits->chunk_sectors; | |
1724 | } | |
1725 | if (validate_hardware_zoned_model(table, zoned_model, zone_sectors)) | |
1726 | return -EINVAL; | |
1727 | ||
754c5fc7 MS |
1728 | return validate_hardware_logical_block_alignment(table, limits); |
1729 | } | |
1730 | ||
9c47008d | 1731 | /* |
25520d55 MP |
1732 | * Verify that all devices have an integrity profile that matches the |
1733 | * DM device's registered integrity profile. If the profiles don't | |
1734 | * match then unregister the DM device's integrity profile. | |
9c47008d | 1735 | */ |
25520d55 | 1736 | static void dm_table_verify_integrity(struct dm_table *t) |
9c47008d | 1737 | { |
a63a5cf8 | 1738 | struct gendisk *template_disk = NULL; |
9c47008d | 1739 | |
9b4b5a79 MB |
1740 | if (t->integrity_added) |
1741 | return; | |
1742 | ||
25520d55 MP |
1743 | if (t->integrity_supported) { |
1744 | /* | |
1745 | * Verify that the original integrity profile | |
1746 | * matches all the devices in this table. | |
1747 | */ | |
1748 | template_disk = dm_table_get_integrity_disk(t); | |
1749 | if (template_disk && | |
1750 | blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) | |
1751 | return; | |
1752 | } | |
9c47008d | 1753 | |
25520d55 | 1754 | if (integrity_profile_exists(dm_disk(t->md))) { |
876fbba1 MS |
1755 | DMWARN("%s: unable to establish an integrity profile", |
1756 | dm_device_name(t->md)); | |
25520d55 MP |
1757 | blk_integrity_unregister(dm_disk(t->md)); |
1758 | } | |
9c47008d MP |
1759 | } |
1760 | ||
ed8b752b MS |
1761 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, |
1762 | sector_t start, sector_t len, void *data) | |
1763 | { | |
c888a8f9 | 1764 | unsigned long flush = (unsigned long) data; |
ed8b752b MS |
1765 | struct request_queue *q = bdev_get_queue(dev->bdev); |
1766 | ||
cccb493c | 1767 | return (q->queue_flags & flush); |
ed8b752b MS |
1768 | } |
1769 | ||
c888a8f9 | 1770 | static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) |
ed8b752b MS |
1771 | { |
1772 | struct dm_target *ti; | |
3c120169 | 1773 | unsigned i; |
ed8b752b MS |
1774 | |
1775 | /* | |
1776 | * Require at least one underlying device to support flushes. | |
1777 | * t->devices includes internal dm devices such as mirror logs | |
1778 | * so we need to use iterate_devices here, which targets | |
1779 | * supporting flushes must provide. | |
1780 | */ | |
3c120169 MP |
1781 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1782 | ti = dm_table_get_target(t, i); | |
ed8b752b | 1783 | |
55a62eef | 1784 | if (!ti->num_flush_bios) |
ed8b752b MS |
1785 | continue; |
1786 | ||
0e9c24ed | 1787 | if (ti->flush_supported) |
7f61f5a0 | 1788 | return true; |
0e9c24ed | 1789 | |
ed8b752b | 1790 | if (ti->type->iterate_devices && |
c888a8f9 | 1791 | ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) |
7f61f5a0 | 1792 | return true; |
ed8b752b MS |
1793 | } |
1794 | ||
7f61f5a0 | 1795 | return false; |
ed8b752b MS |
1796 | } |
1797 | ||
273752c9 VG |
1798 | static int device_dax_write_cache_enabled(struct dm_target *ti, |
1799 | struct dm_dev *dev, sector_t start, | |
1800 | sector_t len, void *data) | |
1801 | { | |
1802 | struct dax_device *dax_dev = dev->dax_dev; | |
1803 | ||
1804 | if (!dax_dev) | |
1805 | return false; | |
1806 | ||
1807 | if (dax_write_cache_enabled(dax_dev)) | |
1808 | return true; | |
1809 | return false; | |
1810 | } | |
1811 | ||
a4c8dd9c JX |
1812 | static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, |
1813 | sector_t start, sector_t len, void *data) | |
4693c966 MSB |
1814 | { |
1815 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1816 | ||
cccb493c | 1817 | return !blk_queue_nonrot(q); |
4693c966 MSB |
1818 | } |
1819 | ||
c3c4555e MB |
1820 | static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, |
1821 | sector_t start, sector_t len, void *data) | |
1822 | { | |
1823 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1824 | ||
cccb493c | 1825 | return !blk_queue_add_random(q); |
c3c4555e MB |
1826 | } |
1827 | ||
d54eaa5a MS |
1828 | static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, |
1829 | sector_t start, sector_t len, void *data) | |
1830 | { | |
1831 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1832 | ||
cccb493c | 1833 | return !q->limits.max_write_same_sectors; |
d54eaa5a MS |
1834 | } |
1835 | ||
1836 | static bool dm_table_supports_write_same(struct dm_table *t) | |
1837 | { | |
1838 | struct dm_target *ti; | |
3c120169 | 1839 | unsigned i; |
d54eaa5a | 1840 | |
3c120169 MP |
1841 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1842 | ti = dm_table_get_target(t, i); | |
d54eaa5a | 1843 | |
55a62eef | 1844 | if (!ti->num_write_same_bios) |
d54eaa5a MS |
1845 | return false; |
1846 | ||
1847 | if (!ti->type->iterate_devices || | |
dc019b21 | 1848 | ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) |
d54eaa5a MS |
1849 | return false; |
1850 | } | |
1851 | ||
1852 | return true; | |
1853 | } | |
1854 | ||
ac62d620 CH |
1855 | static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, |
1856 | sector_t start, sector_t len, void *data) | |
1857 | { | |
1858 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1859 | ||
cccb493c | 1860 | return !q->limits.max_write_zeroes_sectors; |
ac62d620 CH |
1861 | } |
1862 | ||
1863 | static bool dm_table_supports_write_zeroes(struct dm_table *t) | |
1864 | { | |
1865 | struct dm_target *ti; | |
1866 | unsigned i = 0; | |
1867 | ||
1868 | while (i < dm_table_get_num_targets(t)) { | |
1869 | ti = dm_table_get_target(t, i++); | |
1870 | ||
1871 | if (!ti->num_write_zeroes_bios) | |
1872 | return false; | |
1873 | ||
1874 | if (!ti->type->iterate_devices || | |
1875 | ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) | |
1876 | return false; | |
1877 | } | |
1878 | ||
1879 | return true; | |
1880 | } | |
1881 | ||
6abc4946 KK |
1882 | static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, |
1883 | sector_t start, sector_t len, void *data) | |
1884 | { | |
1885 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1886 | ||
cccb493c | 1887 | return !blk_queue_nowait(q); |
6abc4946 KK |
1888 | } |
1889 | ||
1890 | static bool dm_table_supports_nowait(struct dm_table *t) | |
1891 | { | |
1892 | struct dm_target *ti; | |
1893 | unsigned i = 0; | |
1894 | ||
1895 | while (i < dm_table_get_num_targets(t)) { | |
1896 | ti = dm_table_get_target(t, i++); | |
1897 | ||
1898 | if (!dm_target_supports_nowait(ti->type)) | |
1899 | return false; | |
1900 | ||
1901 | if (!ti->type->iterate_devices || | |
1902 | ti->type->iterate_devices(ti, device_not_nowait_capable, NULL)) | |
1903 | return false; | |
1904 | } | |
1905 | ||
1906 | return true; | |
1907 | } | |
1908 | ||
8a74d29d MS |
1909 | static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, |
1910 | sector_t start, sector_t len, void *data) | |
a7ffb6a5 MP |
1911 | { |
1912 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1913 | ||
cccb493c | 1914 | return !blk_queue_discard(q); |
a7ffb6a5 MP |
1915 | } |
1916 | ||
1917 | static bool dm_table_supports_discards(struct dm_table *t) | |
1918 | { | |
1919 | struct dm_target *ti; | |
3c120169 | 1920 | unsigned i; |
a7ffb6a5 | 1921 | |
3c120169 MP |
1922 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1923 | ti = dm_table_get_target(t, i); | |
a7ffb6a5 MP |
1924 | |
1925 | if (!ti->num_discard_bios) | |
8a74d29d | 1926 | return false; |
a7ffb6a5 | 1927 | |
8a74d29d MS |
1928 | /* |
1929 | * Either the target provides discard support (as implied by setting | |
1930 | * 'discards_supported') or it relies on _all_ data devices having | |
1931 | * discard support. | |
1932 | */ | |
1933 | if (!ti->discards_supported && | |
1934 | (!ti->type->iterate_devices || | |
1935 | ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) | |
1936 | return false; | |
a7ffb6a5 MP |
1937 | } |
1938 | ||
8a74d29d | 1939 | return true; |
a7ffb6a5 MP |
1940 | } |
1941 | ||
00716545 DS |
1942 | static int device_not_secure_erase_capable(struct dm_target *ti, |
1943 | struct dm_dev *dev, sector_t start, | |
1944 | sector_t len, void *data) | |
1945 | { | |
1946 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1947 | ||
cccb493c | 1948 | return !blk_queue_secure_erase(q); |
00716545 DS |
1949 | } |
1950 | ||
1951 | static bool dm_table_supports_secure_erase(struct dm_table *t) | |
1952 | { | |
1953 | struct dm_target *ti; | |
1954 | unsigned int i; | |
1955 | ||
1956 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1957 | ti = dm_table_get_target(t, i); | |
1958 | ||
1959 | if (!ti->num_secure_erase_bios) | |
1960 | return false; | |
1961 | ||
1962 | if (!ti->type->iterate_devices || | |
1963 | ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL)) | |
1964 | return false; | |
1965 | } | |
1966 | ||
1967 | return true; | |
1968 | } | |
1969 | ||
eb40c0ac ID |
1970 | static int device_requires_stable_pages(struct dm_target *ti, |
1971 | struct dm_dev *dev, sector_t start, | |
1972 | sector_t len, void *data) | |
1973 | { | |
1974 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1975 | ||
cccb493c | 1976 | return blk_queue_stable_writes(q); |
eb40c0ac ID |
1977 | } |
1978 | ||
bb37d772 DLM |
1979 | int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1980 | struct queue_limits *limits) | |
1da177e4 | 1981 | { |
519a7e16 | 1982 | bool wc = false, fua = false; |
2e9ee095 | 1983 | int page_size = PAGE_SIZE; |
bb37d772 | 1984 | int r; |
ed8b752b | 1985 | |
1da177e4 | 1986 | /* |
1197764e | 1987 | * Copy table's limits to the DM device's request_queue |
1da177e4 | 1988 | */ |
754c5fc7 | 1989 | q->limits = *limits; |
c9a3f6d6 | 1990 | |
6abc4946 KK |
1991 | if (dm_table_supports_nowait(t)) |
1992 | blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); | |
1993 | else | |
1994 | blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q); | |
1995 | ||
5d47c89f | 1996 | if (!dm_table_supports_discards(t)) { |
8b904b5b | 1997 | blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); |
5d47c89f MS |
1998 | /* Must also clear discard limits... */ |
1999 | q->limits.max_discard_sectors = 0; | |
2000 | q->limits.max_hw_discard_sectors = 0; | |
2001 | q->limits.discard_granularity = 0; | |
2002 | q->limits.discard_alignment = 0; | |
2003 | q->limits.discard_misaligned = 0; | |
2004 | } else | |
8b904b5b | 2005 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
5ae89a87 | 2006 | |
00716545 | 2007 | if (dm_table_supports_secure_erase(t)) |
83c7c18b | 2008 | blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); |
00716545 | 2009 | |
c888a8f9 | 2010 | if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { |
519a7e16 | 2011 | wc = true; |
c888a8f9 | 2012 | if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) |
519a7e16 | 2013 | fua = true; |
ed8b752b | 2014 | } |
519a7e16 | 2015 | blk_queue_write_cache(q, wc, fua); |
ed8b752b | 2016 | |
5b0fab50 | 2017 | if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { |
8b904b5b | 2018 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
5b0fab50 | 2019 | if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) |
2e9ee095 PG |
2020 | set_dax_synchronous(t->md->dax_dev); |
2021 | } | |
dbc62659 RZ |
2022 | else |
2023 | blk_queue_flag_clear(QUEUE_FLAG_DAX, q); | |
2024 | ||
24f6b603 | 2025 | if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) |
273752c9 VG |
2026 | dax_write_cache(t->md->dax_dev, true); |
2027 | ||
c3c4555e | 2028 | /* Ensure that all underlying devices are non-rotational. */ |
24f6b603 | 2029 | if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) |
8b904b5b | 2030 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); |
a4c8dd9c JX |
2031 | else |
2032 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | |
4693c966 | 2033 | |
d54eaa5a MS |
2034 | if (!dm_table_supports_write_same(t)) |
2035 | q->limits.max_write_same_sectors = 0; | |
ac62d620 CH |
2036 | if (!dm_table_supports_write_zeroes(t)) |
2037 | q->limits.max_write_zeroes_sectors = 0; | |
c1a94672 | 2038 | |
25520d55 | 2039 | dm_table_verify_integrity(t); |
e6ee8c0b | 2040 | |
eb40c0ac ID |
2041 | /* |
2042 | * Some devices don't use blk_integrity but still want stable pages | |
2043 | * because they do their own checksumming. | |
a4c8dd9c JX |
2044 | * If any underlying device requires stable pages, a table must require |
2045 | * them as well. Only targets that support iterate_devices are considered: | |
2046 | * don't want error, zero, etc to require stable pages. | |
eb40c0ac | 2047 | */ |
24f6b603 | 2048 | if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) |
1cb039f3 | 2049 | blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); |
eb40c0ac | 2050 | else |
1cb039f3 | 2051 | blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); |
eb40c0ac | 2052 | |
c3c4555e MB |
2053 | /* |
2054 | * Determine whether or not this queue's I/O timings contribute | |
2055 | * to the entropy pool, Only request-based targets use this. | |
2056 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | |
2057 | * have it set. | |
2058 | */ | |
24f6b603 JX |
2059 | if (blk_queue_add_random(q) && |
2060 | dm_table_any_dev_attr(t, device_is_not_random, NULL)) | |
8b904b5b | 2061 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
bf505456 | 2062 | |
bb37d772 DLM |
2063 | /* |
2064 | * For a zoned target, setup the zones related queue attributes | |
2065 | * and resources necessary for zone append emulation if necessary. | |
2066 | */ | |
2067 | if (blk_queue_is_zoned(q)) { | |
2068 | r = dm_set_zones_restrictions(t, q); | |
2069 | if (r) | |
2070 | return r; | |
2071 | } | |
c6d6e9b0 | 2072 | |
aa6ce87a | 2073 | dm_update_keyslot_manager(q, t); |
471aa704 | 2074 | disk_update_readahead(t->md->disk); |
bb37d772 DLM |
2075 | |
2076 | return 0; | |
1da177e4 LT |
2077 | } |
2078 | ||
2079 | unsigned int dm_table_get_num_targets(struct dm_table *t) | |
2080 | { | |
2081 | return t->num_targets; | |
2082 | } | |
2083 | ||
2084 | struct list_head *dm_table_get_devices(struct dm_table *t) | |
2085 | { | |
2086 | return &t->devices; | |
2087 | } | |
2088 | ||
aeb5d727 | 2089 | fmode_t dm_table_get_mode(struct dm_table *t) |
1da177e4 LT |
2090 | { |
2091 | return t->mode; | |
2092 | } | |
08649012 | 2093 | EXPORT_SYMBOL(dm_table_get_mode); |
1da177e4 | 2094 | |
d67ee213 MS |
2095 | enum suspend_mode { |
2096 | PRESUSPEND, | |
2097 | PRESUSPEND_UNDO, | |
2098 | POSTSUSPEND, | |
2099 | }; | |
2100 | ||
2101 | static void suspend_targets(struct dm_table *t, enum suspend_mode mode) | |
1da177e4 LT |
2102 | { |
2103 | int i = t->num_targets; | |
2104 | struct dm_target *ti = t->targets; | |
2105 | ||
1ea0654e BVA |
2106 | lockdep_assert_held(&t->md->suspend_lock); |
2107 | ||
1da177e4 | 2108 | while (i--) { |
d67ee213 MS |
2109 | switch (mode) { |
2110 | case PRESUSPEND: | |
2111 | if (ti->type->presuspend) | |
2112 | ti->type->presuspend(ti); | |
2113 | break; | |
2114 | case PRESUSPEND_UNDO: | |
2115 | if (ti->type->presuspend_undo) | |
2116 | ti->type->presuspend_undo(ti); | |
2117 | break; | |
2118 | case POSTSUSPEND: | |
1da177e4 LT |
2119 | if (ti->type->postsuspend) |
2120 | ti->type->postsuspend(ti); | |
d67ee213 MS |
2121 | break; |
2122 | } | |
1da177e4 LT |
2123 | ti++; |
2124 | } | |
2125 | } | |
2126 | ||
2127 | void dm_table_presuspend_targets(struct dm_table *t) | |
2128 | { | |
cf222b37 AK |
2129 | if (!t) |
2130 | return; | |
2131 | ||
d67ee213 MS |
2132 | suspend_targets(t, PRESUSPEND); |
2133 | } | |
2134 | ||
2135 | void dm_table_presuspend_undo_targets(struct dm_table *t) | |
2136 | { | |
2137 | if (!t) | |
2138 | return; | |
2139 | ||
2140 | suspend_targets(t, PRESUSPEND_UNDO); | |
1da177e4 LT |
2141 | } |
2142 | ||
2143 | void dm_table_postsuspend_targets(struct dm_table *t) | |
2144 | { | |
cf222b37 AK |
2145 | if (!t) |
2146 | return; | |
2147 | ||
d67ee213 | 2148 | suspend_targets(t, POSTSUSPEND); |
1da177e4 LT |
2149 | } |
2150 | ||
8757b776 | 2151 | int dm_table_resume_targets(struct dm_table *t) |
1da177e4 | 2152 | { |
8757b776 MB |
2153 | int i, r = 0; |
2154 | ||
1ea0654e BVA |
2155 | lockdep_assert_held(&t->md->suspend_lock); |
2156 | ||
8757b776 MB |
2157 | for (i = 0; i < t->num_targets; i++) { |
2158 | struct dm_target *ti = t->targets + i; | |
2159 | ||
2160 | if (!ti->type->preresume) | |
2161 | continue; | |
2162 | ||
2163 | r = ti->type->preresume(ti); | |
7833b08e MS |
2164 | if (r) { |
2165 | DMERR("%s: %s: preresume failed, error = %d", | |
2166 | dm_device_name(t->md), ti->type->name, r); | |
8757b776 | 2167 | return r; |
7833b08e | 2168 | } |
8757b776 | 2169 | } |
1da177e4 LT |
2170 | |
2171 | for (i = 0; i < t->num_targets; i++) { | |
2172 | struct dm_target *ti = t->targets + i; | |
2173 | ||
2174 | if (ti->type->resume) | |
2175 | ti->type->resume(ti); | |
2176 | } | |
8757b776 MB |
2177 | |
2178 | return 0; | |
1da177e4 LT |
2179 | } |
2180 | ||
1134e5ae MA |
2181 | struct mapped_device *dm_table_get_md(struct dm_table *t) |
2182 | { | |
1134e5ae MA |
2183 | return t->md; |
2184 | } | |
08649012 | 2185 | EXPORT_SYMBOL(dm_table_get_md); |
1134e5ae | 2186 | |
f349b0a3 MM |
2187 | const char *dm_table_device_name(struct dm_table *t) |
2188 | { | |
2189 | return dm_device_name(t->md); | |
2190 | } | |
2191 | EXPORT_SYMBOL_GPL(dm_table_device_name); | |
2192 | ||
9974fa2c MS |
2193 | void dm_table_run_md_queue_async(struct dm_table *t) |
2194 | { | |
9974fa2c MS |
2195 | if (!dm_table_request_based(t)) |
2196 | return; | |
2197 | ||
33bd6f06 MS |
2198 | if (t->md->queue) |
2199 | blk_mq_run_hw_queues(t->md->queue, true); | |
9974fa2c MS |
2200 | } |
2201 | EXPORT_SYMBOL(dm_table_run_md_queue_async); | |
2202 |