]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/md/dm.c
Linux 5.0-rc5
[mirror_ubuntu-jammy-kernel.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
4cc96131
MS
8#include "dm-core.h"
9#include "dm-rq.h"
51e5b2bd 10#include "dm-uevent.h"
1da177e4
LT
11
12#include <linux/init.h>
13#include <linux/module.h>
48c9c27b 14#include <linux/mutex.h>
174cd4b1 15#include <linux/sched/signal.h>
1da177e4
LT
16#include <linux/blkpg.h>
17#include <linux/bio.h>
1da177e4 18#include <linux/mempool.h>
f26c5719 19#include <linux/dax.h>
1da177e4
LT
20#include <linux/slab.h>
21#include <linux/idr.h>
7e026c8c 22#include <linux/uio.h>
3ac51e74 23#include <linux/hdreg.h>
3f77316d 24#include <linux/delay.h>
ffcc3936 25#include <linux/wait.h>
71cdb697 26#include <linux/pr.h>
b0b4d7c6 27#include <linux/refcount.h>
55782138 28
72d94861
AK
29#define DM_MSG_PREFIX "core"
30
60935eb2
MB
31/*
32 * Cookies are numeric values sent with CHANGE and REMOVE
33 * uevents while resuming, removing or renaming the device.
34 */
35#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
36#define DM_COOKIE_LENGTH 24
37
1da177e4
LT
38static const char *_name = DM_NAME;
39
40static unsigned int major = 0;
41static unsigned int _major = 0;
42
d15b774c
AK
43static DEFINE_IDR(_minor_idr);
44
f32c10b0 45static DEFINE_SPINLOCK(_minor_lock);
2c140a24
MP
46
47static void do_deferred_remove(struct work_struct *w);
48
49static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
50
acfe0ad7
MP
51static struct workqueue_struct *deferred_remove_workqueue;
52
93e6442c
MP
53atomic_t dm_global_event_nr = ATOMIC_INIT(0);
54DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
55
62e08243
MP
56void dm_issue_global_event(void)
57{
58 atomic_inc(&dm_global_event_nr);
59 wake_up(&dm_global_eventq);
60}
61
1da177e4 62/*
64f52b0e 63 * One of these is allocated (on-stack) per original bio.
1da177e4 64 */
64f52b0e 65struct clone_info {
64f52b0e
MS
66 struct dm_table *map;
67 struct bio *bio;
68 struct dm_io *io;
69 sector_t sector;
70 unsigned sector_count;
71};
72
73/*
74 * One of these is allocated per clone bio.
75 */
76#define DM_TIO_MAGIC 7282014
77struct dm_target_io {
78 unsigned magic;
79 struct dm_io *io;
80 struct dm_target *ti;
81 unsigned target_bio_nr;
82 unsigned *len_ptr;
83 bool inside_dm_io;
84 struct bio clone;
85};
86
1da177e4 87/*
745dc570 88 * One of these is allocated per original bio.
64f52b0e 89 * It contains the first clone used for that original.
1da177e4 90 */
64f52b0e 91#define DM_IO_MAGIC 5191977
1da177e4 92struct dm_io {
64f52b0e 93 unsigned magic;
1da177e4 94 struct mapped_device *md;
4e4cbee9 95 blk_status_t status;
1da177e4 96 atomic_t io_count;
745dc570 97 struct bio *orig_bio;
3eaf840e 98 unsigned long start_time;
f88fb981 99 spinlock_t endio_lock;
fd2ed4d2 100 struct dm_stats_aux stats_aux;
64f52b0e
MS
101 /* last member of dm_target_io is 'struct bio' */
102 struct dm_target_io tio;
1da177e4
LT
103};
104
64f52b0e
MS
105void *dm_per_bio_data(struct bio *bio, size_t data_size)
106{
107 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
108 if (!tio->inside_dm_io)
109 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
110 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
111}
112EXPORT_SYMBOL_GPL(dm_per_bio_data);
113
114struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
115{
116 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
117 if (io->magic == DM_IO_MAGIC)
118 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
119 BUG_ON(io->magic != DM_TIO_MAGIC);
120 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
121}
122EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
123
124unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
125{
126 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
127}
128EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
129
ba61fdd1
JM
130#define MINOR_ALLOCED ((void *)-1)
131
1da177e4
LT
132/*
133 * Bits for the md->flags field.
134 */
1eb787ec 135#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 136#define DMF_SUSPENDED 1
aa8d7c2f 137#define DMF_FROZEN 2
fba9f90e 138#define DMF_FREEING 3
5c6bd75d 139#define DMF_DELETING 4
2e93ccc1 140#define DMF_NOFLUSH_SUSPENDING 5
8ae12666
KO
141#define DMF_DEFERRED_REMOVE 6
142#define DMF_SUSPENDED_INTERNALLY 7
1da177e4 143
115485e8 144#define DM_NUMA_NODE NUMA_NO_NODE
115485e8 145static int dm_numa_node = DM_NUMA_NODE;
faad87df 146
e6ee8c0b
KU
147/*
148 * For mempools pre-allocation at the table loading time.
149 */
150struct dm_md_mempools {
6f1c819c
KO
151 struct bio_set bs;
152 struct bio_set io_bs;
e6ee8c0b
KU
153};
154
86f1152b
BM
155struct table_device {
156 struct list_head list;
b0b4d7c6 157 refcount_t count;
86f1152b
BM
158 struct dm_dev dm_dev;
159};
160
8fbf26ad 161static struct kmem_cache *_rq_tio_cache;
1ae49ea2 162static struct kmem_cache *_rq_cache;
94818742 163
e8603136
MS
164/*
165 * Bio-based DM's mempools' reserved IOs set by the user.
166 */
4cc96131 167#define RESERVED_BIO_BASED_IOS 16
e8603136
MS
168static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
169
115485e8
MS
170static int __dm_get_module_param_int(int *module_param, int min, int max)
171{
6aa7de05 172 int param = READ_ONCE(*module_param);
115485e8
MS
173 int modified_param = 0;
174 bool modified = true;
175
176 if (param < min)
177 modified_param = min;
178 else if (param > max)
179 modified_param = max;
180 else
181 modified = false;
182
183 if (modified) {
184 (void)cmpxchg(module_param, param, modified_param);
185 param = modified_param;
186 }
187
188 return param;
189}
190
4cc96131
MS
191unsigned __dm_get_module_param(unsigned *module_param,
192 unsigned def, unsigned max)
f4790826 193{
6aa7de05 194 unsigned param = READ_ONCE(*module_param);
09c2d531 195 unsigned modified_param = 0;
f4790826 196
09c2d531
MS
197 if (!param)
198 modified_param = def;
199 else if (param > max)
200 modified_param = max;
f4790826 201
09c2d531
MS
202 if (modified_param) {
203 (void)cmpxchg(module_param, param, modified_param);
204 param = modified_param;
f4790826
MS
205 }
206
09c2d531 207 return param;
f4790826
MS
208}
209
e8603136
MS
210unsigned dm_get_reserved_bio_based_ios(void)
211{
09c2d531 212 return __dm_get_module_param(&reserved_bio_based_ios,
4cc96131 213 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
e8603136
MS
214}
215EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
216
115485e8
MS
217static unsigned dm_get_numa_node(void)
218{
219 return __dm_get_module_param_int(&dm_numa_node,
220 DM_NUMA_NODE, num_online_nodes() - 1);
221}
222
1da177e4
LT
223static int __init local_init(void)
224{
51157b4a 225 int r = -ENOMEM;
1da177e4 226
8fbf26ad
KU
227 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
228 if (!_rq_tio_cache)
dde1e1ec 229 return r;
8fbf26ad 230
eca7ee6d 231 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
1ae49ea2
MS
232 __alignof__(struct request), 0, NULL);
233 if (!_rq_cache)
234 goto out_free_rq_tio_cache;
235
51e5b2bd 236 r = dm_uevent_init();
51157b4a 237 if (r)
1ae49ea2 238 goto out_free_rq_cache;
51e5b2bd 239
acfe0ad7
MP
240 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
241 if (!deferred_remove_workqueue) {
242 r = -ENOMEM;
243 goto out_uevent_exit;
244 }
245
1da177e4
LT
246 _major = major;
247 r = register_blkdev(_major, _name);
51157b4a 248 if (r < 0)
acfe0ad7 249 goto out_free_workqueue;
1da177e4
LT
250
251 if (!_major)
252 _major = r;
253
254 return 0;
51157b4a 255
acfe0ad7
MP
256out_free_workqueue:
257 destroy_workqueue(deferred_remove_workqueue);
51157b4a
KU
258out_uevent_exit:
259 dm_uevent_exit();
1ae49ea2
MS
260out_free_rq_cache:
261 kmem_cache_destroy(_rq_cache);
8fbf26ad
KU
262out_free_rq_tio_cache:
263 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
264
265 return r;
1da177e4
LT
266}
267
268static void local_exit(void)
269{
2c140a24 270 flush_scheduled_work();
acfe0ad7 271 destroy_workqueue(deferred_remove_workqueue);
2c140a24 272
1ae49ea2 273 kmem_cache_destroy(_rq_cache);
8fbf26ad 274 kmem_cache_destroy(_rq_tio_cache);
00d59405 275 unregister_blkdev(_major, _name);
51e5b2bd 276 dm_uevent_exit();
1da177e4
LT
277
278 _major = 0;
279
280 DMINFO("cleaned up");
281}
282
b9249e55 283static int (*_inits[])(void) __initdata = {
1da177e4
LT
284 local_init,
285 dm_target_init,
286 dm_linear_init,
287 dm_stripe_init,
952b3557 288 dm_io_init,
945fa4d2 289 dm_kcopyd_init,
1da177e4 290 dm_interface_init,
fd2ed4d2 291 dm_statistics_init,
1da177e4
LT
292};
293
b9249e55 294static void (*_exits[])(void) = {
1da177e4
LT
295 local_exit,
296 dm_target_exit,
297 dm_linear_exit,
298 dm_stripe_exit,
952b3557 299 dm_io_exit,
945fa4d2 300 dm_kcopyd_exit,
1da177e4 301 dm_interface_exit,
fd2ed4d2 302 dm_statistics_exit,
1da177e4
LT
303};
304
305static int __init dm_init(void)
306{
307 const int count = ARRAY_SIZE(_inits);
308
309 int r, i;
310
311 for (i = 0; i < count; i++) {
312 r = _inits[i]();
313 if (r)
314 goto bad;
315 }
316
317 return 0;
318
319 bad:
320 while (i--)
321 _exits[i]();
322
323 return r;
324}
325
326static void __exit dm_exit(void)
327{
328 int i = ARRAY_SIZE(_exits);
329
330 while (i--)
331 _exits[i]();
d15b774c
AK
332
333 /*
334 * Should be empty by this point.
335 */
d15b774c 336 idr_destroy(&_minor_idr);
1da177e4
LT
337}
338
339/*
340 * Block device functions
341 */
432a212c
MA
342int dm_deleting_md(struct mapped_device *md)
343{
344 return test_bit(DMF_DELETING, &md->flags);
345}
346
fe5f9f2c 347static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
348{
349 struct mapped_device *md;
350
fba9f90e
JM
351 spin_lock(&_minor_lock);
352
fe5f9f2c 353 md = bdev->bd_disk->private_data;
fba9f90e
JM
354 if (!md)
355 goto out;
356
5c6bd75d 357 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 358 dm_deleting_md(md)) {
fba9f90e
JM
359 md = NULL;
360 goto out;
361 }
362
1da177e4 363 dm_get(md);
5c6bd75d 364 atomic_inc(&md->open_count);
fba9f90e
JM
365out:
366 spin_unlock(&_minor_lock);
367
368 return md ? 0 : -ENXIO;
1da177e4
LT
369}
370
db2a144b 371static void dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 372{
63a4f065 373 struct mapped_device *md;
6e9624b8 374
4a1aeb98
MB
375 spin_lock(&_minor_lock);
376
63a4f065
MS
377 md = disk->private_data;
378 if (WARN_ON(!md))
379 goto out;
380
2c140a24
MP
381 if (atomic_dec_and_test(&md->open_count) &&
382 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
acfe0ad7 383 queue_work(deferred_remove_workqueue, &deferred_remove_work);
2c140a24 384
1da177e4 385 dm_put(md);
63a4f065 386out:
4a1aeb98 387 spin_unlock(&_minor_lock);
1da177e4
LT
388}
389
5c6bd75d
AK
390int dm_open_count(struct mapped_device *md)
391{
392 return atomic_read(&md->open_count);
393}
394
395/*
396 * Guarantees nothing is using the device before it's deleted.
397 */
2c140a24 398int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
5c6bd75d
AK
399{
400 int r = 0;
401
402 spin_lock(&_minor_lock);
403
2c140a24 404 if (dm_open_count(md)) {
5c6bd75d 405 r = -EBUSY;
2c140a24
MP
406 if (mark_deferred)
407 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
408 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
409 r = -EEXIST;
5c6bd75d
AK
410 else
411 set_bit(DMF_DELETING, &md->flags);
412
413 spin_unlock(&_minor_lock);
414
415 return r;
416}
417
2c140a24
MP
418int dm_cancel_deferred_remove(struct mapped_device *md)
419{
420 int r = 0;
421
422 spin_lock(&_minor_lock);
423
424 if (test_bit(DMF_DELETING, &md->flags))
425 r = -EBUSY;
426 else
427 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
428
429 spin_unlock(&_minor_lock);
430
431 return r;
432}
433
434static void do_deferred_remove(struct work_struct *w)
435{
436 dm_deferred_remove();
437}
438
fd2ed4d2
MP
439sector_t dm_get_size(struct mapped_device *md)
440{
441 return get_capacity(md->disk);
442}
443
9974fa2c
MS
444struct request_queue *dm_get_md_queue(struct mapped_device *md)
445{
446 return md->queue;
447}
448
fd2ed4d2
MP
449struct dm_stats *dm_get_stats(struct mapped_device *md)
450{
451 return &md->stats;
452}
453
3ac51e74
DW
454static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
455{
456 struct mapped_device *md = bdev->bd_disk->private_data;
457
458 return dm_get_geometry(md, geo);
459}
460
e76239a3
CH
461static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
462 struct blk_zone *zones, unsigned int *nr_zones,
463 gfp_t gfp_mask)
464{
465#ifdef CONFIG_BLK_DEV_ZONED
466 struct mapped_device *md = disk->private_data;
467 struct dm_target *tgt;
468 struct dm_table *map;
469 int srcu_idx, ret;
470
471 if (dm_suspended_md(md))
472 return -EAGAIN;
473
474 map = dm_get_live_table(md, &srcu_idx);
475 if (!map)
476 return -EIO;
477
478 tgt = dm_table_find_target(map, sector);
479 if (!dm_target_is_valid(tgt)) {
480 ret = -EIO;
481 goto out;
482 }
483
484 /*
485 * If we are executing this, we already know that the block device
486 * is a zoned device and so each target should have support for that
487 * type of drive. A missing report_zones method means that the target
488 * driver has a problem.
489 */
490 if (WARN_ON(!tgt->type->report_zones)) {
491 ret = -EIO;
492 goto out;
493 }
494
495 /*
496 * blkdev_report_zones() will loop and call this again to cover all the
497 * zones of the target, eventually moving on to the next target.
498 * So there is no need to loop here trying to fill the entire array
499 * of zones.
500 */
501 ret = tgt->type->report_zones(tgt, sector, zones,
502 nr_zones, gfp_mask);
503
504out:
505 dm_put_live_table(md, srcu_idx);
506 return ret;
507#else
508 return -ENOTSUPP;
509#endif
510}
511
971888c4 512static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
5bd5e8d8 513 struct block_device **bdev)
971888c4 514 __acquires(md->io_barrier)
aa129a22 515{
66482026 516 struct dm_target *tgt;
6c182cd8 517 struct dm_table *map;
971888c4 518 int r;
aa129a22 519
6c182cd8 520retry:
e56f81e0 521 r = -ENOTTY;
971888c4 522 map = dm_get_live_table(md, srcu_idx);
aa129a22 523 if (!map || !dm_table_get_size(map))
971888c4 524 return r;
aa129a22
MB
525
526 /* We only support devices that have a single target */
527 if (dm_table_get_num_targets(map) != 1)
971888c4 528 return r;
aa129a22 529
66482026
MS
530 tgt = dm_table_get_target(map, 0);
531 if (!tgt->type->prepare_ioctl)
971888c4 532 return r;
519049af 533
971888c4
MS
534 if (dm_suspended_md(md))
535 return -EAGAIN;
aa129a22 536
5bd5e8d8 537 r = tgt->type->prepare_ioctl(tgt, bdev);
5bbbfdf6 538 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
971888c4 539 dm_put_live_table(md, *srcu_idx);
6c182cd8
HR
540 msleep(10);
541 goto retry;
542 }
971888c4 543
e56f81e0
CH
544 return r;
545}
546
971888c4
MS
547static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
548 __releases(md->io_barrier)
549{
550 dm_put_live_table(md, srcu_idx);
551}
552
e56f81e0
CH
553static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
554 unsigned int cmd, unsigned long arg)
555{
556 struct mapped_device *md = bdev->bd_disk->private_data;
971888c4 557 int r, srcu_idx;
e56f81e0 558
5bd5e8d8 559 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
e56f81e0 560 if (r < 0)
971888c4 561 goto out;
6c182cd8 562
e56f81e0
CH
563 if (r > 0) {
564 /*
e980f623
CH
565 * Target determined this ioctl is being issued against a
566 * subset of the parent bdev; require extra privileges.
e56f81e0 567 */
e980f623
CH
568 if (!capable(CAP_SYS_RAWIO)) {
569 DMWARN_LIMIT(
570 "%s: sending ioctl %x to DM device without required privilege.",
571 current->comm, cmd);
572 r = -ENOIOCTLCMD;
e56f81e0 573 goto out;
e980f623 574 }
e56f81e0 575 }
6c182cd8 576
66482026 577 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
e56f81e0 578out:
971888c4 579 dm_unprepare_ioctl(md, srcu_idx);
aa129a22
MB
580 return r;
581}
582
978e51ba
MS
583static void start_io_acct(struct dm_io *io);
584
585static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
1da177e4 586{
64f52b0e
MS
587 struct dm_io *io;
588 struct dm_target_io *tio;
589 struct bio *clone;
590
6f1c819c 591 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
64f52b0e
MS
592 if (!clone)
593 return NULL;
594
595 tio = container_of(clone, struct dm_target_io, clone);
596 tio->inside_dm_io = true;
597 tio->io = NULL;
598
599 io = container_of(tio, struct dm_io, tio);
600 io->magic = DM_IO_MAGIC;
978e51ba
MS
601 io->status = 0;
602 atomic_set(&io->io_count, 1);
603 io->orig_bio = bio;
604 io->md = md;
605 spin_lock_init(&io->endio_lock);
606
607 start_io_acct(io);
64f52b0e
MS
608
609 return io;
1da177e4
LT
610}
611
028867ac 612static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4 613{
64f52b0e
MS
614 bio_put(&io->tio.clone);
615}
616
617static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
618 unsigned target_bio_nr, gfp_t gfp_mask)
619{
620 struct dm_target_io *tio;
621
622 if (!ci->io->tio.io) {
623 /* the dm_target_io embedded in ci->io is available */
624 tio = &ci->io->tio;
625 } else {
6f1c819c 626 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
64f52b0e
MS
627 if (!clone)
628 return NULL;
629
630 tio = container_of(clone, struct dm_target_io, clone);
631 tio->inside_dm_io = false;
632 }
633
634 tio->magic = DM_TIO_MAGIC;
635 tio->io = ci->io;
636 tio->ti = ti;
637 tio->target_bio_nr = target_bio_nr;
638
639 return tio;
1da177e4
LT
640}
641
cfae7529 642static void free_tio(struct dm_target_io *tio)
1da177e4 643{
64f52b0e
MS
644 if (tio->inside_dm_io)
645 return;
dba14160 646 bio_put(&tio->clone);
1da177e4
LT
647}
648
c4576aed 649static bool md_in_flight_bios(struct mapped_device *md)
90abb8c4 650{
6f757231
MP
651 int cpu;
652 struct hd_struct *part = &dm_disk(md)->part0;
b7934ba4 653 long sum = 0;
6f757231
MP
654
655 for_each_possible_cpu(cpu) {
b7934ba4
JA
656 sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
657 sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
6f757231
MP
658 }
659
b7934ba4 660 return sum != 0;
90abb8c4
KU
661}
662
c4576aed
MS
663static bool md_in_flight(struct mapped_device *md)
664{
665 if (queue_is_mq(md->queue))
3c94d83c 666 return blk_mq_queue_inflight(md->queue);
c4576aed
MS
667 else
668 return md_in_flight_bios(md);
90abb8c4
KU
669}
670
3eaf840e
JNN
671static void start_io_acct(struct dm_io *io)
672{
673 struct mapped_device *md = io->md;
745dc570 674 struct bio *bio = io->orig_bio;
3eaf840e
JNN
675
676 io->start_time = jiffies;
677
ddcf35d3
MC
678 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
679 &dm_disk(md)->part0);
f3986374 680
fd2ed4d2 681 if (unlikely(dm_stats_used(&md->stats)))
528ec5ab
MC
682 dm_stats_account_io(&md->stats, bio_data_dir(bio),
683 bio->bi_iter.bi_sector, bio_sectors(bio),
684 false, 0, &io->stats_aux);
3eaf840e
JNN
685}
686
d221d2e7 687static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
688{
689 struct mapped_device *md = io->md;
745dc570 690 struct bio *bio = io->orig_bio;
3eaf840e 691 unsigned long duration = jiffies - io->start_time;
3eaf840e 692
ddcf35d3
MC
693 generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
694 io->start_time);
3eaf840e 695
fd2ed4d2 696 if (unlikely(dm_stats_used(&md->stats)))
528ec5ab
MC
697 dm_stats_account_io(&md->stats, bio_data_dir(bio),
698 bio->bi_iter.bi_sector, bio_sectors(bio),
699 true, duration, &io->stats_aux);
fd2ed4d2 700
d221d2e7 701 /* nudge anyone waiting on suspend queue */
b7934ba4 702 if (unlikely(waitqueue_active(&md->wait)))
d221d2e7 703 wake_up(&md->wait);
3eaf840e
JNN
704}
705
1da177e4
LT
706/*
707 * Add the bio to the list of deferred io.
708 */
92c63902 709static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 710{
05447420 711 unsigned long flags;
1da177e4 712
05447420 713 spin_lock_irqsave(&md->deferred_lock, flags);
1da177e4 714 bio_list_add(&md->deferred, bio);
05447420 715 spin_unlock_irqrestore(&md->deferred_lock, flags);
6a8736d1 716 queue_work(md->wq, &md->work);
1da177e4
LT
717}
718
719/*
720 * Everyone (including functions in this file), should use this
721 * function to access the md->map field, and make sure they call
83d5e5b0 722 * dm_put_live_table() when finished.
1da177e4 723 */
83d5e5b0 724struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
1da177e4 725{
83d5e5b0
MP
726 *srcu_idx = srcu_read_lock(&md->io_barrier);
727
728 return srcu_dereference(md->map, &md->io_barrier);
729}
1da177e4 730
83d5e5b0
MP
731void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
732{
733 srcu_read_unlock(&md->io_barrier, srcu_idx);
734}
735
736void dm_sync_table(struct mapped_device *md)
737{
738 synchronize_srcu(&md->io_barrier);
739 synchronize_rcu_expedited();
740}
741
742/*
743 * A fast alternative to dm_get_live_table/dm_put_live_table.
744 * The caller must not block between these two functions.
745 */
746static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
747{
748 rcu_read_lock();
749 return rcu_dereference(md->map);
750}
1da177e4 751
83d5e5b0
MP
752static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
753{
754 rcu_read_unlock();
1da177e4
LT
755}
756
971888c4
MS
757static char *_dm_claim_ptr = "I belong to device-mapper";
758
86f1152b
BM
759/*
760 * Open a table device so we can use it as a map destination.
761 */
762static int open_table_device(struct table_device *td, dev_t dev,
763 struct mapped_device *md)
764{
86f1152b
BM
765 struct block_device *bdev;
766
767 int r;
768
769 BUG_ON(td->dm_dev.bdev);
770
519049af 771 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
86f1152b
BM
772 if (IS_ERR(bdev))
773 return PTR_ERR(bdev);
774
775 r = bd_link_disk_holder(bdev, dm_disk(md));
776 if (r) {
777 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
778 return r;
779 }
780
781 td->dm_dev.bdev = bdev;
817bf402 782 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
86f1152b
BM
783 return 0;
784}
785
786/*
787 * Close a table device that we've been using.
788 */
789static void close_table_device(struct table_device *td, struct mapped_device *md)
790{
791 if (!td->dm_dev.bdev)
792 return;
793
794 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
795 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
817bf402 796 put_dax(td->dm_dev.dax_dev);
86f1152b 797 td->dm_dev.bdev = NULL;
817bf402 798 td->dm_dev.dax_dev = NULL;
86f1152b
BM
799}
800
801static struct table_device *find_table_device(struct list_head *l, dev_t dev,
802 fmode_t mode) {
803 struct table_device *td;
804
805 list_for_each_entry(td, l, list)
806 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
807 return td;
808
809 return NULL;
810}
811
812int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
813 struct dm_dev **result) {
814 int r;
815 struct table_device *td;
816
817 mutex_lock(&md->table_devices_lock);
818 td = find_table_device(&md->table_devices, dev, mode);
819 if (!td) {
115485e8 820 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
86f1152b
BM
821 if (!td) {
822 mutex_unlock(&md->table_devices_lock);
823 return -ENOMEM;
824 }
825
826 td->dm_dev.mode = mode;
827 td->dm_dev.bdev = NULL;
828
829 if ((r = open_table_device(td, dev, md))) {
830 mutex_unlock(&md->table_devices_lock);
831 kfree(td);
832 return r;
833 }
834
835 format_dev_t(td->dm_dev.name, dev);
836
b0b4d7c6 837 refcount_set(&td->count, 1);
86f1152b 838 list_add(&td->list, &md->table_devices);
b0b4d7c6
ER
839 } else {
840 refcount_inc(&td->count);
86f1152b 841 }
86f1152b
BM
842 mutex_unlock(&md->table_devices_lock);
843
844 *result = &td->dm_dev;
845 return 0;
846}
847EXPORT_SYMBOL_GPL(dm_get_table_device);
848
849void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
850{
851 struct table_device *td = container_of(d, struct table_device, dm_dev);
852
853 mutex_lock(&md->table_devices_lock);
b0b4d7c6 854 if (refcount_dec_and_test(&td->count)) {
86f1152b
BM
855 close_table_device(td, md);
856 list_del(&td->list);
857 kfree(td);
858 }
859 mutex_unlock(&md->table_devices_lock);
860}
861EXPORT_SYMBOL(dm_put_table_device);
862
863static void free_table_devices(struct list_head *devices)
864{
865 struct list_head *tmp, *next;
866
867 list_for_each_safe(tmp, next, devices) {
868 struct table_device *td = list_entry(tmp, struct table_device, list);
869
870 DMWARN("dm_destroy: %s still exists with %d references",
b0b4d7c6 871 td->dm_dev.name, refcount_read(&td->count));
86f1152b
BM
872 kfree(td);
873 }
874}
875
3ac51e74
DW
876/*
877 * Get the geometry associated with a dm device
878 */
879int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
880{
881 *geo = md->geometry;
882
883 return 0;
884}
885
886/*
887 * Set the geometry of a device.
888 */
889int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
890{
891 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
892
893 if (geo->start > sz) {
894 DMWARN("Start sector is beyond the geometry limits.");
895 return -EINVAL;
896 }
897
898 md->geometry = *geo;
899
900 return 0;
901}
902
2e93ccc1
KU
903static int __noflush_suspending(struct mapped_device *md)
904{
905 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
906}
907
1da177e4
LT
908/*
909 * Decrements the number of outstanding ios that a bio has been
910 * cloned into, completing the original io if necc.
911 */
4e4cbee9 912static void dec_pending(struct dm_io *io, blk_status_t error)
1da177e4 913{
2e93ccc1 914 unsigned long flags;
4e4cbee9 915 blk_status_t io_error;
b35f8caa
MB
916 struct bio *bio;
917 struct mapped_device *md = io->md;
2e93ccc1
KU
918
919 /* Push-back supersedes any I/O errors */
f88fb981
KU
920 if (unlikely(error)) {
921 spin_lock_irqsave(&io->endio_lock, flags);
745dc570 922 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
4e4cbee9 923 io->status = error;
f88fb981
KU
924 spin_unlock_irqrestore(&io->endio_lock, flags);
925 }
1da177e4
LT
926
927 if (atomic_dec_and_test(&io->io_count)) {
4e4cbee9 928 if (io->status == BLK_STS_DM_REQUEUE) {
2e93ccc1
KU
929 /*
930 * Target requested pushing back the I/O.
2e93ccc1 931 */
022c2611 932 spin_lock_irqsave(&md->deferred_lock, flags);
6a8736d1 933 if (__noflush_suspending(md))
745dc570
MS
934 /* NOTE early return due to BLK_STS_DM_REQUEUE below */
935 bio_list_add_head(&md->deferred, io->orig_bio);
6a8736d1 936 else
2e93ccc1 937 /* noflush suspend was interrupted. */
4e4cbee9 938 io->status = BLK_STS_IOERR;
022c2611 939 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
940 }
941
4e4cbee9 942 io_error = io->status;
745dc570 943 bio = io->orig_bio;
6a8736d1
TH
944 end_io_acct(io);
945 free_io(md, io);
946
4e4cbee9 947 if (io_error == BLK_STS_DM_REQUEUE)
6a8736d1 948 return;
2e93ccc1 949
1eff9d32 950 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
af7e466a 951 /*
6a8736d1 952 * Preflush done for flush with data, reissue
28a8f0d3 953 * without REQ_PREFLUSH.
af7e466a 954 */
1eff9d32 955 bio->bi_opf &= ~REQ_PREFLUSH;
6a8736d1 956 queue_io(md, bio);
af7e466a 957 } else {
b372d360 958 /* done with normal IO or empty flush */
8dd601fa
N
959 if (io_error)
960 bio->bi_status = io_error;
4246a0b6 961 bio_endio(bio);
b35f8caa 962 }
1da177e4
LT
963 }
964}
965
4cc96131 966void disable_write_same(struct mapped_device *md)
7eee4ae2
MS
967{
968 struct queue_limits *limits = dm_get_queue_limits(md);
969
970 /* device doesn't really support WRITE SAME, disable it */
971 limits->max_write_same_sectors = 0;
972}
973
ac62d620
CH
974void disable_write_zeroes(struct mapped_device *md)
975{
976 struct queue_limits *limits = dm_get_queue_limits(md);
977
978 /* device doesn't really support WRITE ZEROES, disable it */
979 limits->max_write_zeroes_sectors = 0;
980}
981
4246a0b6 982static void clone_endio(struct bio *bio)
1da177e4 983{
4e4cbee9 984 blk_status_t error = bio->bi_status;
bfc6d41c 985 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
b35f8caa 986 struct dm_io *io = tio->io;
9faf400f 987 struct mapped_device *md = tio->io->md;
1da177e4
LT
988 dm_endio_fn endio = tio->ti->type->end_io;
989
978e51ba 990 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
ac62d620 991 if (bio_op(bio) == REQ_OP_WRITE_SAME &&
74d46992 992 !bio->bi_disk->queue->limits.max_write_same_sectors)
ac62d620
CH
993 disable_write_same(md);
994 if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
74d46992 995 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
ac62d620
CH
996 disable_write_zeroes(md);
997 }
7eee4ae2 998
1be56909 999 if (endio) {
4e4cbee9 1000 int r = endio(tio->ti, bio, &error);
1be56909
CH
1001 switch (r) {
1002 case DM_ENDIO_REQUEUE:
4e4cbee9 1003 error = BLK_STS_DM_REQUEUE;
1be56909
CH
1004 /*FALLTHRU*/
1005 case DM_ENDIO_DONE:
1006 break;
1007 case DM_ENDIO_INCOMPLETE:
1008 /* The target will handle the io */
1009 return;
1010 default:
1011 DMWARN("unimplemented target endio return value: %d", r);
1012 BUG();
1013 }
1014 }
1015
cfae7529 1016 free_tio(tio);
b35f8caa 1017 dec_pending(io, error);
1da177e4
LT
1018}
1019
56a67df7
MS
1020/*
1021 * Return maximum size of I/O possible at the supplied sector up to the current
1022 * target boundary.
1023 */
1024static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1025{
1026 sector_t target_offset = dm_target_offset(ti, sector);
1027
1028 return ti->len - target_offset;
1029}
1030
1031static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1da177e4 1032{
56a67df7 1033 sector_t len = max_io_len_target_boundary(sector, ti);
542f9038 1034 sector_t offset, max_len;
1da177e4
LT
1035
1036 /*
542f9038 1037 * Does the target need to split even further?
1da177e4 1038 */
542f9038
MS
1039 if (ti->max_io_len) {
1040 offset = dm_target_offset(ti, sector);
1041 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1042 max_len = sector_div(offset, ti->max_io_len);
1043 else
1044 max_len = offset & (ti->max_io_len - 1);
1045 max_len = ti->max_io_len - max_len;
1046
1047 if (len > max_len)
1048 len = max_len;
1da177e4
LT
1049 }
1050
1051 return len;
1052}
1053
542f9038
MS
1054int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1055{
1056 if (len > UINT_MAX) {
1057 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1058 (unsigned long long)len, UINT_MAX);
1059 ti->error = "Maximum size of target IO is too large";
1060 return -EINVAL;
1061 }
1062
8f50e358
ML
1063 /*
1064 * BIO based queue uses its own splitting. When multipage bvecs
1065 * is switched on, size of the incoming bio may be too big to
1066 * be handled in some targets, such as crypt.
1067 *
1068 * When these targets are ready for the big bio, we can remove
1069 * the limit.
1070 */
1071 ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
542f9038
MS
1072
1073 return 0;
1074}
1075EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1076
f26c5719 1077static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
3d97c829
MS
1078 sector_t sector, int *srcu_idx)
1079 __acquires(md->io_barrier)
545ed20e 1080{
545ed20e
TK
1081 struct dm_table *map;
1082 struct dm_target *ti;
545ed20e 1083
f26c5719 1084 map = dm_get_live_table(md, srcu_idx);
545ed20e 1085 if (!map)
f26c5719 1086 return NULL;
545ed20e
TK
1087
1088 ti = dm_table_find_target(map, sector);
1089 if (!dm_target_is_valid(ti))
f26c5719 1090 return NULL;
545ed20e 1091
f26c5719
DW
1092 return ti;
1093}
545ed20e 1094
f26c5719 1095static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
3d97c829 1096 long nr_pages, void **kaddr, pfn_t *pfn)
f26c5719
DW
1097{
1098 struct mapped_device *md = dax_get_private(dax_dev);
1099 sector_t sector = pgoff * PAGE_SECTORS;
1100 struct dm_target *ti;
1101 long len, ret = -EIO;
1102 int srcu_idx;
545ed20e 1103
f26c5719 1104 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
545ed20e 1105
f26c5719
DW
1106 if (!ti)
1107 goto out;
1108 if (!ti->type->direct_access)
1109 goto out;
1110 len = max_io_len(sector, ti) / PAGE_SECTORS;
1111 if (len < 1)
1112 goto out;
1113 nr_pages = min(len, nr_pages);
dbc62659 1114 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
817bf402 1115
f26c5719 1116 out:
545ed20e 1117 dm_put_live_table(md, srcu_idx);
f26c5719
DW
1118
1119 return ret;
545ed20e
TK
1120}
1121
7e026c8c 1122static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
3d97c829 1123 void *addr, size_t bytes, struct iov_iter *i)
7e026c8c
DW
1124{
1125 struct mapped_device *md = dax_get_private(dax_dev);
1126 sector_t sector = pgoff * PAGE_SECTORS;
1127 struct dm_target *ti;
1128 long ret = 0;
1129 int srcu_idx;
1130
1131 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1132
1133 if (!ti)
1134 goto out;
1135 if (!ti->type->dax_copy_from_iter) {
1136 ret = copy_from_iter(addr, bytes, i);
1137 goto out;
1138 }
1139 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
1140 out:
1141 dm_put_live_table(md, srcu_idx);
1142
1143 return ret;
1144}
1145
b3a9a0c3
DW
1146static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1147 void *addr, size_t bytes, struct iov_iter *i)
1148{
1149 struct mapped_device *md = dax_get_private(dax_dev);
1150 sector_t sector = pgoff * PAGE_SECTORS;
1151 struct dm_target *ti;
1152 long ret = 0;
1153 int srcu_idx;
1154
1155 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1156
1157 if (!ti)
1158 goto out;
1159 if (!ti->type->dax_copy_to_iter) {
1160 ret = copy_to_iter(addr, bytes, i);
1161 goto out;
1162 }
1163 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
1164 out:
1165 dm_put_live_table(md, srcu_idx);
1166
1167 return ret;
1168}
1169
1dd40c3e
MP
1170/*
1171 * A target may call dm_accept_partial_bio only from the map routine. It is
c06b3e58 1172 * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
1dd40c3e
MP
1173 *
1174 * dm_accept_partial_bio informs the dm that the target only wants to process
1175 * additional n_sectors sectors of the bio and the rest of the data should be
1176 * sent in a next bio.
1177 *
1178 * A diagram that explains the arithmetics:
1179 * +--------------------+---------------+-------+
1180 * | 1 | 2 | 3 |
1181 * +--------------------+---------------+-------+
1182 *
1183 * <-------------- *tio->len_ptr --------------->
1184 * <------- bi_size ------->
1185 * <-- n_sectors -->
1186 *
1187 * Region 1 was already iterated over with bio_advance or similar function.
1188 * (it may be empty if the target doesn't use bio_advance)
1189 * Region 2 is the remaining bio size that the target wants to process.
1190 * (it may be empty if region 1 is non-empty, although there is no reason
1191 * to make it empty)
1192 * The target requires that region 3 is to be sent in the next bio.
1193 *
1194 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1195 * the partially processed part (the sum of regions 1+2) must be the same for all
1196 * copies of the bio.
1197 */
1198void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1199{
1200 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1201 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1eff9d32 1202 BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1dd40c3e
MP
1203 BUG_ON(bi_size > *tio->len_ptr);
1204 BUG_ON(n_sectors > bi_size);
1205 *tio->len_ptr -= bi_size - n_sectors;
1206 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1207}
1208EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1209
10999307 1210/*
e76239a3
CH
1211 * The zone descriptors obtained with a zone report indicate
1212 * zone positions within the underlying device of the target. The zone
1213 * descriptors must be remapped to match their position within the dm device.
1214 * The caller target should obtain the zones information using
1215 * blkdev_report_zones() to ensure that remapping for partition offset is
1216 * already handled.
10999307 1217 */
e76239a3
CH
1218void dm_remap_zone_report(struct dm_target *ti, sector_t start,
1219 struct blk_zone *zones, unsigned int *nr_zones)
10999307
DLM
1220{
1221#ifdef CONFIG_BLK_DEV_ZONED
10999307 1222 struct blk_zone *zone;
e76239a3
CH
1223 unsigned int nrz = *nr_zones;
1224 int i;
9864cd5d 1225
10999307 1226 /*
e76239a3
CH
1227 * Remap the start sector and write pointer position of the zones in
1228 * the array. Since we may have obtained from the target underlying
1229 * device more zones that the target size, also adjust the number
1230 * of zones.
10999307 1231 */
e76239a3
CH
1232 for (i = 0; i < nrz; i++) {
1233 zone = zones + i;
1234 if (zone->start >= start + ti->len) {
1235 memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
1236 break;
10999307
DLM
1237 }
1238
e76239a3
CH
1239 zone->start = zone->start + ti->begin - start;
1240 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
1241 continue;
10999307 1242
e76239a3
CH
1243 if (zone->cond == BLK_ZONE_COND_FULL)
1244 zone->wp = zone->start + zone->len;
1245 else if (zone->cond == BLK_ZONE_COND_EMPTY)
1246 zone->wp = zone->start;
1247 else
1248 zone->wp = zone->wp + ti->begin - start;
10999307
DLM
1249 }
1250
e76239a3 1251 *nr_zones = i;
10999307 1252#else /* !CONFIG_BLK_DEV_ZONED */
e76239a3 1253 *nr_zones = 0;
10999307
DLM
1254#endif
1255}
1256EXPORT_SYMBOL_GPL(dm_remap_zone_report);
1257
978e51ba 1258static blk_qc_t __map_bio(struct dm_target_io *tio)
1da177e4
LT
1259{
1260 int r;
2056a782 1261 sector_t sector;
dba14160 1262 struct bio *clone = &tio->clone;
64f52b0e 1263 struct dm_io *io = tio->io;
978e51ba 1264 struct mapped_device *md = io->md;
bd2a49b8 1265 struct dm_target *ti = tio->ti;
978e51ba 1266 blk_qc_t ret = BLK_QC_T_NONE;
1da177e4 1267
1da177e4 1268 clone->bi_end_io = clone_endio;
1da177e4
LT
1269
1270 /*
1271 * Map the clone. If r == 0 we don't need to do
1272 * anything, the target has assumed ownership of
1273 * this io.
1274 */
64f52b0e 1275 atomic_inc(&io->io_count);
4f024f37 1276 sector = clone->bi_iter.bi_sector;
d67a5f4b 1277
7de3ee57 1278 r = ti->type->map(ti, clone);
846785e6
CH
1279 switch (r) {
1280 case DM_MAPIO_SUBMITTED:
1281 break;
1282 case DM_MAPIO_REMAPPED:
1da177e4 1283 /* the bio has been remapped so dispatch it */
74d46992 1284 trace_block_bio_remap(clone->bi_disk->queue, clone,
64f52b0e 1285 bio_dev(io->orig_bio), sector);
978e51ba
MS
1286 if (md->type == DM_TYPE_NVME_BIO_BASED)
1287 ret = direct_make_request(clone);
1288 else
1289 ret = generic_make_request(clone);
846785e6
CH
1290 break;
1291 case DM_MAPIO_KILL:
4e4cbee9 1292 free_tio(tio);
64f52b0e 1293 dec_pending(io, BLK_STS_IOERR);
4e4cbee9 1294 break;
846785e6 1295 case DM_MAPIO_REQUEUE:
cfae7529 1296 free_tio(tio);
64f52b0e 1297 dec_pending(io, BLK_STS_DM_REQUEUE);
846785e6
CH
1298 break;
1299 default:
45cbcd79
KU
1300 DMWARN("unimplemented target map return value: %d", r);
1301 BUG();
1da177e4 1302 }
1da177e4 1303
978e51ba 1304 return ret;
1da177e4 1305}
1da177e4 1306
e0d6609a 1307static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
bd2a49b8 1308{
4f024f37
KO
1309 bio->bi_iter.bi_sector = sector;
1310 bio->bi_iter.bi_size = to_bytes(len);
1da177e4
LT
1311}
1312
1313/*
1314 * Creates a bio that consists of range of complete bvecs.
1315 */
c80914e8
MS
1316static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1317 sector_t sector, unsigned len)
1da177e4 1318{
dba14160 1319 struct bio *clone = &tio->clone;
1da177e4 1320
1c3b13e6
KO
1321 __bio_clone_fast(clone, bio);
1322
57c36519 1323 if (bio_integrity(bio)) {
e2460f2a
MP
1324 int r;
1325
1326 if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1327 !dm_target_passes_integrity(tio->ti->type))) {
1328 DMWARN("%s: the target %s doesn't support integrity data.",
1329 dm_device_name(tio->io->md),
1330 tio->ti->type->name);
1331 return -EIO;
1332 }
1333
1334 r = bio_integrity_clone(clone, bio, GFP_NOIO);
c80914e8
MS
1335 if (r < 0)
1336 return r;
1337 }
bd2a49b8 1338
57c36519 1339 bio_trim(clone, sector - clone->bi_iter.bi_sector, len);
c80914e8
MS
1340
1341 return 0;
1da177e4
LT
1342}
1343
318716dd
MS
1344static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1345 struct dm_target *ti, unsigned num_bios)
f9ab94ce 1346{
dba14160 1347 struct dm_target_io *tio;
318716dd 1348 int try;
dba14160 1349
318716dd
MS
1350 if (!num_bios)
1351 return;
f9ab94ce 1352
318716dd
MS
1353 if (num_bios == 1) {
1354 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1355 bio_list_add(blist, &tio->clone);
1356 return;
1357 }
9015df24 1358
318716dd
MS
1359 for (try = 0; try < 2; try++) {
1360 int bio_nr;
1361 struct bio *bio;
1362
1363 if (try)
bc02cdbe 1364 mutex_lock(&ci->io->md->table_devices_lock);
318716dd
MS
1365 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1366 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1367 if (!tio)
1368 break;
1369
1370 bio_list_add(blist, &tio->clone);
1371 }
1372 if (try)
bc02cdbe 1373 mutex_unlock(&ci->io->md->table_devices_lock);
318716dd
MS
1374 if (bio_nr == num_bios)
1375 return;
1376
1377 while ((bio = bio_list_pop(blist))) {
1378 tio = container_of(bio, struct dm_target_io, clone);
1379 free_tio(tio);
1380 }
1381 }
9015df24
AK
1382}
1383
978e51ba
MS
1384static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1385 struct dm_target_io *tio, unsigned *len)
9015df24 1386{
dba14160 1387 struct bio *clone = &tio->clone;
9015df24 1388
1dd40c3e
MP
1389 tio->len_ptr = len;
1390
99778273 1391 __bio_clone_fast(clone, ci->bio);
bd2a49b8 1392 if (len)
1dd40c3e 1393 bio_setup_sector(clone, ci->sector, *len);
f9ab94ce 1394
978e51ba 1395 return __map_bio(tio);
f9ab94ce
MP
1396}
1397
14fe594d 1398static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1399 unsigned num_bios, unsigned *len)
06a426ce 1400{
318716dd
MS
1401 struct bio_list blist = BIO_EMPTY_LIST;
1402 struct bio *bio;
1403 struct dm_target_io *tio;
1404
1405 alloc_multiple_bios(&blist, ci, ti, num_bios);
06a426ce 1406
318716dd
MS
1407 while ((bio = bio_list_pop(&blist))) {
1408 tio = container_of(bio, struct dm_target_io, clone);
978e51ba 1409 (void) __clone_and_map_simple_bio(ci, tio, len);
318716dd 1410 }
06a426ce
MS
1411}
1412
14fe594d 1413static int __send_empty_flush(struct clone_info *ci)
f9ab94ce 1414{
06a426ce 1415 unsigned target_nr = 0;
f9ab94ce
MP
1416 struct dm_target *ti;
1417
892ad71f 1418 /*
dbe3ece1
JA
1419 * Empty flush uses a statically initialized bio, as the base for
1420 * cloning. However, blkg association requires that a bdev is
1421 * associated with a gendisk, which doesn't happen until the bdev is
1422 * opened. So, blkg association is done at issue time of the flush
1423 * rather than when the device is created in alloc_dev().
892ad71f
DZ
1424 */
1425 bio_set_dev(ci->bio, ci->io->md->bdev);
1426
b372d360 1427 BUG_ON(bio_has_data(ci->bio));
f9ab94ce 1428 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1dd40c3e 1429 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
f9ab94ce 1430
892ad71f
DZ
1431 bio_disassociate_blkg(ci->bio);
1432
f9ab94ce
MP
1433 return 0;
1434}
1435
c80914e8 1436static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
f31c21e4 1437 sector_t sector, unsigned *len)
5ae89a87 1438{
dba14160 1439 struct bio *bio = ci->bio;
5ae89a87 1440 struct dm_target_io *tio;
f31c21e4 1441 int r;
5ae89a87 1442
318716dd 1443 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
f31c21e4
N
1444 tio->len_ptr = len;
1445 r = clone_bio(tio, bio, sector, *len);
1446 if (r < 0) {
1447 free_tio(tio);
1448 return r;
b0d8ed4d 1449 }
978e51ba 1450 (void) __map_bio(tio);
c80914e8 1451
f31c21e4 1452 return 0;
5ae89a87
MS
1453}
1454
55a62eef 1455typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
23508a96 1456
55a62eef 1457static unsigned get_num_discard_bios(struct dm_target *ti)
23508a96 1458{
55a62eef 1459 return ti->num_discard_bios;
23508a96
MS
1460}
1461
00716545
DS
1462static unsigned get_num_secure_erase_bios(struct dm_target *ti)
1463{
1464 return ti->num_secure_erase_bios;
1465}
1466
55a62eef 1467static unsigned get_num_write_same_bios(struct dm_target *ti)
23508a96 1468{
55a62eef 1469 return ti->num_write_same_bios;
23508a96
MS
1470}
1471
ac62d620
CH
1472static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
1473{
1474 return ti->num_write_zeroes_bios;
1475}
1476
23508a96 1477typedef bool (*is_split_required_fn)(struct dm_target *ti);
9eef87da 1478
23508a96
MS
1479static bool is_split_required_for_discard(struct dm_target *ti)
1480{
55a62eef 1481 return ti->split_discard_bios;
cec47e3d
KU
1482}
1483
3d7f4562 1484static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
53b47168 1485 unsigned num_bios, bool is_split_required)
ba1cbad9 1486{
e0d6609a 1487 unsigned len;
ba1cbad9 1488
3d7f4562
MS
1489 /*
1490 * Even though the device advertised support for this type of
1491 * request, that does not mean every target supports it, and
1492 * reconfiguration might also have changed that since the
1493 * check was performed.
1494 */
3d7f4562
MS
1495 if (!num_bios)
1496 return -EOPNOTSUPP;
ba1cbad9 1497
53b47168 1498 if (!is_split_required)
3d7f4562
MS
1499 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1500 else
1501 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
de3ec86d 1502
3d7f4562 1503 __send_duplicate_bios(ci, ti, num_bios, &len);
e262f347 1504
3d7f4562
MS
1505 ci->sector += len;
1506 ci->sector_count -= len;
5ae89a87
MS
1507
1508 return 0;
ba1cbad9
MS
1509}
1510
3d7f4562 1511static int __send_discard(struct clone_info *ci, struct dm_target *ti)
23508a96 1512{
53b47168
MS
1513 return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti),
1514 is_split_required_for_discard(ti));
23508a96 1515}
0ce65797 1516
00716545
DS
1517static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
1518{
53b47168 1519 return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti), false);
00716545
DS
1520}
1521
3d7f4562 1522static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
0ce65797 1523{
53b47168 1524 return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti), false);
0ce65797
MS
1525}
1526
3d7f4562 1527static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
ac62d620 1528{
53b47168 1529 return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti), false);
ac62d620
CH
1530}
1531
0519c71e
MS
1532static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1533 int *result)
1534{
1535 struct bio *bio = ci->bio;
1536
1537 if (bio_op(bio) == REQ_OP_DISCARD)
1538 *result = __send_discard(ci, ti);
00716545
DS
1539 else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
1540 *result = __send_secure_erase(ci, ti);
0519c71e
MS
1541 else if (bio_op(bio) == REQ_OP_WRITE_SAME)
1542 *result = __send_write_same(ci, ti);
1543 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
1544 *result = __send_write_zeroes(ci, ti);
1545 else
1546 return false;
1547
1548 return true;
1549}
1550
e4c93811
AK
1551/*
1552 * Select the correct strategy for processing a non-flush bio.
1553 */
14fe594d 1554static int __split_and_process_non_flush(struct clone_info *ci)
0ce65797 1555{
512875bd 1556 struct dm_target *ti;
1c3b13e6 1557 unsigned len;
c80914e8 1558 int r;
0ce65797 1559
512875bd
JN
1560 ti = dm_table_find_target(ci->map, ci->sector);
1561 if (!dm_target_is_valid(ti))
1562 return -EIO;
1563
0519c71e
MS
1564 if (unlikely(__process_abnormal_io(ci, ti, &r)))
1565 return r;
3d7f4562 1566
e76239a3 1567 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
0ce65797 1568
c80914e8
MS
1569 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1570 if (r < 0)
1571 return r;
0ce65797 1572
1c3b13e6
KO
1573 ci->sector += len;
1574 ci->sector_count -= len;
0ce65797 1575
1c3b13e6 1576 return 0;
0ce65797
MS
1577}
1578
978e51ba
MS
1579static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1580 struct dm_table *map, struct bio *bio)
1581{
1582 ci->map = map;
1583 ci->io = alloc_io(md, bio);
1584 ci->sector = bio->bi_iter.bi_sector;
1585}
1586
a1e1cb72
MS
1587#define __dm_part_stat_sub(part, field, subnd) \
1588 (part_stat_get(part, field) -= (subnd))
1589
1da177e4 1590/*
14fe594d 1591 * Entry point to split a bio into clones and submit them to the targets.
1da177e4 1592 */
978e51ba
MS
1593static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1594 struct dm_table *map, struct bio *bio)
0ce65797 1595{
1da177e4 1596 struct clone_info ci;
978e51ba 1597 blk_qc_t ret = BLK_QC_T_NONE;
512875bd 1598 int error = 0;
1da177e4 1599
83d5e5b0 1600 if (unlikely(!map)) {
6a8736d1 1601 bio_io_error(bio);
978e51ba 1602 return ret;
f0b9a450 1603 }
692d0eb9 1604
89f5fa47
MS
1605 blk_queue_split(md->queue, &bio);
1606
978e51ba 1607 init_clone_info(&ci, md, map, bio);
0ce65797 1608
1eff9d32 1609 if (bio->bi_opf & REQ_PREFLUSH) {
dbe3ece1
JA
1610 struct bio flush_bio;
1611
1612 /*
1613 * Use an on-stack bio for this, it's safe since we don't
1614 * need to reference it after submit. It's just used as
1615 * the basis for the clone(s).
1616 */
1617 bio_init(&flush_bio, NULL, 0);
1618 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1619 ci.bio = &flush_bio;
b372d360 1620 ci.sector_count = 0;
14fe594d 1621 error = __send_empty_flush(&ci);
b372d360 1622 /* dec_pending submits any data associated with flush */
a4aa5e56
DLM
1623 } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
1624 ci.bio = bio;
1625 ci.sector_count = 0;
1626 error = __split_and_process_non_flush(&ci);
b372d360 1627 } else {
6a8736d1 1628 ci.bio = bio;
d87f4c14 1629 ci.sector_count = bio_sectors(bio);
18a25da8 1630 while (ci.sector_count && !error) {
14fe594d 1631 error = __split_and_process_non_flush(&ci);
18a25da8
N
1632 if (current->bio_list && ci.sector_count && !error) {
1633 /*
1634 * Remainder must be passed to generic_make_request()
1635 * so that it gets handled *after* bios already submitted
1636 * have been completely processed.
1637 * We take a clone of the original to store in
745dc570 1638 * ci.io->orig_bio to be used by end_io_acct() and
18a25da8 1639 * for dec_pending to use for completion handling.
18a25da8 1640 */
f21c601a
MS
1641 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1642 GFP_NOIO, &md->queue->bio_split);
745dc570 1643 ci.io->orig_bio = b;
a1e1cb72
MS
1644
1645 /*
1646 * Adjust IO stats for each split, otherwise upon queue
1647 * reentry there will be redundant IO accounting.
1648 * NOTE: this is a stop-gap fix, a proper fix involves
1649 * significant refactoring of DM core's bio splitting
1650 * (by eliminating DM's splitting and just using bio_split)
1651 */
1652 part_stat_lock();
1653 __dm_part_stat_sub(&dm_disk(md)->part0,
1654 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1655 part_stat_unlock();
1656
18a25da8 1657 bio_chain(b, bio);
075c18c3 1658 trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
978e51ba 1659 ret = generic_make_request(bio);
18a25da8
N
1660 break;
1661 }
1662 }
d87f4c14 1663 }
0ce65797 1664
1da177e4 1665 /* drop the extra reference count */
54385bf7 1666 dec_pending(ci.io, errno_to_blk_status(error));
978e51ba 1667 return ret;
0ce65797
MS
1668}
1669
cec47e3d 1670/*
978e51ba
MS
1671 * Optimized variant of __split_and_process_bio that leverages the
1672 * fact that targets that use it do _not_ have a need to split bios.
cec47e3d 1673 */
978e51ba
MS
1674static blk_qc_t __process_bio(struct mapped_device *md,
1675 struct dm_table *map, struct bio *bio)
1676{
1677 struct clone_info ci;
1678 blk_qc_t ret = BLK_QC_T_NONE;
1679 int error = 0;
1680
1681 if (unlikely(!map)) {
1682 bio_io_error(bio);
1683 return ret;
1684 }
1685
1686 init_clone_info(&ci, md, map, bio);
1687
1688 if (bio->bi_opf & REQ_PREFLUSH) {
dbe3ece1
JA
1689 struct bio flush_bio;
1690
1691 /*
1692 * Use an on-stack bio for this, it's safe since we don't
1693 * need to reference it after submit. It's just used as
1694 * the basis for the clone(s).
1695 */
1696 bio_init(&flush_bio, NULL, 0);
1697 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1698 ci.bio = &flush_bio;
978e51ba
MS
1699 ci.sector_count = 0;
1700 error = __send_empty_flush(&ci);
1701 /* dec_pending submits any data associated with flush */
1702 } else {
1703 struct dm_target *ti = md->immutable_target;
1704 struct dm_target_io *tio;
1705
1706 /*
1707 * Defend against IO still getting in during teardown
1708 * - as was seen for a time with nvme-fcloop
1709 */
bab5d988 1710 if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) {
978e51ba
MS
1711 error = -EIO;
1712 goto out;
1713 }
1714
978e51ba
MS
1715 ci.bio = bio;
1716 ci.sector_count = bio_sectors(bio);
0519c71e
MS
1717 if (unlikely(__process_abnormal_io(&ci, ti, &error)))
1718 goto out;
1719
1720 tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
978e51ba
MS
1721 ret = __clone_and_map_simple_bio(&ci, tio, NULL);
1722 }
1723out:
1724 /* drop the extra reference count */
1725 dec_pending(ci.io, errno_to_blk_status(error));
1726 return ret;
1727}
1728
6548c7c5
MS
1729static blk_qc_t dm_process_bio(struct mapped_device *md,
1730 struct dm_table *map, struct bio *bio)
1731{
1732 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1733 return __process_bio(md, map, bio);
1734 else
1735 return __split_and_process_bio(md, map, bio);
1736}
1737
24113d48 1738static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
cec47e3d
KU
1739{
1740 struct mapped_device *md = q->queuedata;
978e51ba 1741 blk_qc_t ret = BLK_QC_T_NONE;
83d5e5b0
MP
1742 int srcu_idx;
1743 struct dm_table *map;
cec47e3d 1744
83d5e5b0 1745 map = dm_get_live_table(md, &srcu_idx);
29e4013d 1746
6a8736d1
TH
1747 /* if we're suspended, we have to queue this io for later */
1748 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
83d5e5b0 1749 dm_put_live_table(md, srcu_idx);
9eef87da 1750
1eff9d32 1751 if (!(bio->bi_opf & REQ_RAHEAD))
6a8736d1
TH
1752 queue_io(md, bio);
1753 else
54d9a1b4 1754 bio_io_error(bio);
978e51ba 1755 return ret;
cec47e3d 1756 }
1da177e4 1757
6548c7c5 1758 ret = dm_process_bio(md, map, bio);
978e51ba 1759
83d5e5b0 1760 dm_put_live_table(md, srcu_idx);
978e51ba
MS
1761 return ret;
1762}
1763
1da177e4
LT
1764static int dm_any_congested(void *congested_data, int bdi_bits)
1765{
8a57dfc6
CS
1766 int r = bdi_bits;
1767 struct mapped_device *md = congested_data;
1768 struct dm_table *map;
1da177e4 1769
1eb787ec 1770 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
e522c039 1771 if (dm_request_based(md)) {
cec47e3d 1772 /*
e522c039
MS
1773 * With request-based DM we only need to check the
1774 * top-level queue for congestion.
cec47e3d 1775 */
dc3b17cc 1776 r = md->queue->backing_dev_info->wb.state & bdi_bits;
e522c039
MS
1777 } else {
1778 map = dm_get_live_table_fast(md);
1779 if (map)
cec47e3d 1780 r = dm_table_any_congested(map, bdi_bits);
e522c039 1781 dm_put_live_table_fast(md);
8a57dfc6
CS
1782 }
1783 }
1784
1da177e4
LT
1785 return r;
1786}
1787
1788/*-----------------------------------------------------------------
1789 * An IDR is used to keep track of allocated minor numbers.
1790 *---------------------------------------------------------------*/
2b06cfff 1791static void free_minor(int minor)
1da177e4 1792{
f32c10b0 1793 spin_lock(&_minor_lock);
1da177e4 1794 idr_remove(&_minor_idr, minor);
f32c10b0 1795 spin_unlock(&_minor_lock);
1da177e4
LT
1796}
1797
1798/*
1799 * See if the device with a specific minor # is free.
1800 */
cf13ab8e 1801static int specific_minor(int minor)
1da177e4 1802{
c9d76be6 1803 int r;
1da177e4
LT
1804
1805 if (minor >= (1 << MINORBITS))
1806 return -EINVAL;
1807
c9d76be6 1808 idr_preload(GFP_KERNEL);
f32c10b0 1809 spin_lock(&_minor_lock);
1da177e4 1810
c9d76be6 1811 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1da177e4 1812
f32c10b0 1813 spin_unlock(&_minor_lock);
c9d76be6
TH
1814 idr_preload_end();
1815 if (r < 0)
1816 return r == -ENOSPC ? -EBUSY : r;
1817 return 0;
1da177e4
LT
1818}
1819
cf13ab8e 1820static int next_free_minor(int *minor)
1da177e4 1821{
c9d76be6 1822 int r;
62f75c2f 1823
c9d76be6 1824 idr_preload(GFP_KERNEL);
f32c10b0 1825 spin_lock(&_minor_lock);
1da177e4 1826
c9d76be6 1827 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1da177e4 1828
f32c10b0 1829 spin_unlock(&_minor_lock);
c9d76be6
TH
1830 idr_preload_end();
1831 if (r < 0)
1832 return r;
1833 *minor = r;
1834 return 0;
1da177e4
LT
1835}
1836
83d5cde4 1837static const struct block_device_operations dm_blk_dops;
f26c5719 1838static const struct dax_operations dm_dax_ops;
1da177e4 1839
53d5914f
MP
1840static void dm_wq_work(struct work_struct *work);
1841
c12c9a3c 1842static void dm_init_normal_md_queue(struct mapped_device *md)
bfebd1cd 1843{
bfebd1cd
MS
1844 /*
1845 * Initialize aspects of queue that aren't relevant for blk-mq
1846 */
dc3b17cc 1847 md->queue->backing_dev_info->congested_fn = dm_any_congested;
4a0b4ddf
MS
1848}
1849
0f20972f
MS
1850static void cleanup_mapped_device(struct mapped_device *md)
1851{
0f20972f
MS
1852 if (md->wq)
1853 destroy_workqueue(md->wq);
6f1c819c
KO
1854 bioset_exit(&md->bs);
1855 bioset_exit(&md->io_bs);
0f20972f 1856
f26c5719
DW
1857 if (md->dax_dev) {
1858 kill_dax(md->dax_dev);
1859 put_dax(md->dax_dev);
1860 md->dax_dev = NULL;
1861 }
1862
0f20972f
MS
1863 if (md->disk) {
1864 spin_lock(&_minor_lock);
1865 md->disk->private_data = NULL;
1866 spin_unlock(&_minor_lock);
0f20972f
MS
1867 del_gendisk(md->disk);
1868 put_disk(md->disk);
1869 }
1870
1871 if (md->queue)
1872 blk_cleanup_queue(md->queue);
1873
d09960b0
TE
1874 cleanup_srcu_struct(&md->io_barrier);
1875
0f20972f
MS
1876 if (md->bdev) {
1877 bdput(md->bdev);
1878 md->bdev = NULL;
1879 }
4cc96131 1880
d5ffebdd
MS
1881 mutex_destroy(&md->suspend_lock);
1882 mutex_destroy(&md->type_lock);
1883 mutex_destroy(&md->table_devices_lock);
1884
4cc96131 1885 dm_mq_cleanup_mapped_device(md);
0f20972f
MS
1886}
1887
1da177e4
LT
1888/*
1889 * Allocate and initialise a blank device with a given minor.
1890 */
2b06cfff 1891static struct mapped_device *alloc_dev(int minor)
1da177e4 1892{
115485e8 1893 int r, numa_node_id = dm_get_numa_node();
976431b0 1894 struct dax_device *dax_dev = NULL;
115485e8 1895 struct mapped_device *md;
ba61fdd1 1896 void *old_md;
1da177e4 1897
856eb091 1898 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1da177e4
LT
1899 if (!md) {
1900 DMWARN("unable to allocate device, out of memory.");
1901 return NULL;
1902 }
1903
10da4f79 1904 if (!try_module_get(THIS_MODULE))
6ed7ade8 1905 goto bad_module_get;
10da4f79 1906
1da177e4 1907 /* get a minor number for the dev */
2b06cfff 1908 if (minor == DM_ANY_MINOR)
cf13ab8e 1909 r = next_free_minor(&minor);
2b06cfff 1910 else
cf13ab8e 1911 r = specific_minor(minor);
1da177e4 1912 if (r < 0)
6ed7ade8 1913 goto bad_minor;
1da177e4 1914
83d5e5b0
MP
1915 r = init_srcu_struct(&md->io_barrier);
1916 if (r < 0)
1917 goto bad_io_barrier;
1918
115485e8 1919 md->numa_node_id = numa_node_id;
591ddcfc 1920 md->init_tio_pdu = false;
a5664dad 1921 md->type = DM_TYPE_NONE;
e61290a4 1922 mutex_init(&md->suspend_lock);
a5664dad 1923 mutex_init(&md->type_lock);
86f1152b 1924 mutex_init(&md->table_devices_lock);
022c2611 1925 spin_lock_init(&md->deferred_lock);
1da177e4 1926 atomic_set(&md->holders, 1);
5c6bd75d 1927 atomic_set(&md->open_count, 0);
1da177e4 1928 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1929 atomic_set(&md->uevent_seq, 0);
1930 INIT_LIST_HEAD(&md->uevent_list);
86f1152b 1931 INIT_LIST_HEAD(&md->table_devices);
7a8c3d3b 1932 spin_lock_init(&md->uevent_lock);
1da177e4 1933
6d469642 1934 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
1da177e4 1935 if (!md->queue)
0f20972f 1936 goto bad;
c12c9a3c
MS
1937 md->queue->queuedata = md;
1938 md->queue->backing_dev_info->congested_data = md;
1da177e4 1939
c12c9a3c 1940 md->disk = alloc_disk_node(1, md->numa_node_id);
1da177e4 1941 if (!md->disk)
0f20972f 1942 goto bad;
1da177e4 1943
f0b04115 1944 init_waitqueue_head(&md->wait);
53d5914f 1945 INIT_WORK(&md->work, dm_wq_work);
f0b04115 1946 init_waitqueue_head(&md->eventq);
2995fa78 1947 init_completion(&md->kobj_holder.completion);
f0b04115 1948
1da177e4
LT
1949 md->disk->major = _major;
1950 md->disk->first_minor = minor;
1951 md->disk->fops = &dm_blk_dops;
1952 md->disk->queue = md->queue;
1953 md->disk->private_data = md;
1954 sprintf(md->disk->disk_name, "dm-%d", minor);
f26c5719 1955
976431b0
DW
1956 if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
1957 dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
1958 if (!dax_dev)
1959 goto bad;
1960 }
f26c5719
DW
1961 md->dax_dev = dax_dev;
1962
c100ec49 1963 add_disk_no_queue_reg(md->disk);
7e51f257 1964 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1965
670368a8 1966 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
304f3f6a 1967 if (!md->wq)
0f20972f 1968 goto bad;
304f3f6a 1969
32a926da
MP
1970 md->bdev = bdget_disk(md->disk, 0);
1971 if (!md->bdev)
0f20972f 1972 goto bad;
32a926da 1973
fd2ed4d2
MP
1974 dm_stats_init(&md->stats);
1975
ba61fdd1 1976 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1977 spin_lock(&_minor_lock);
ba61fdd1 1978 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1979 spin_unlock(&_minor_lock);
ba61fdd1
JM
1980
1981 BUG_ON(old_md != MINOR_ALLOCED);
1982
1da177e4
LT
1983 return md;
1984
0f20972f
MS
1985bad:
1986 cleanup_mapped_device(md);
83d5e5b0 1987bad_io_barrier:
1da177e4 1988 free_minor(minor);
6ed7ade8 1989bad_minor:
10da4f79 1990 module_put(THIS_MODULE);
6ed7ade8 1991bad_module_get:
856eb091 1992 kvfree(md);
1da177e4
LT
1993 return NULL;
1994}
1995
ae9da83f
JN
1996static void unlock_fs(struct mapped_device *md);
1997
1da177e4
LT
1998static void free_dev(struct mapped_device *md)
1999{
f331c029 2000 int minor = MINOR(disk_devt(md->disk));
63d94e48 2001
32a926da 2002 unlock_fs(md);
2eb6e1e3 2003
0f20972f 2004 cleanup_mapped_device(md);
63a4f065 2005
86f1152b 2006 free_table_devices(&md->table_devices);
63a4f065 2007 dm_stats_cleanup(&md->stats);
63a4f065
MS
2008 free_minor(minor);
2009
10da4f79 2010 module_put(THIS_MODULE);
856eb091 2011 kvfree(md);
1da177e4
LT
2012}
2013
2a2a4c51 2014static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
e6ee8c0b 2015{
c0820cf5 2016 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2a2a4c51 2017 int ret = 0;
e6ee8c0b 2018
0776aa0e 2019 if (dm_table_bio_based(t)) {
64f52b0e
MS
2020 /*
2021 * The md may already have mempools that need changing.
2022 * If so, reload bioset because front_pad may have changed
2023 * because a different table was loaded.
2024 */
6f1c819c
KO
2025 bioset_exit(&md->bs);
2026 bioset_exit(&md->io_bs);
0776aa0e 2027
6f1c819c 2028 } else if (bioset_initialized(&md->bs)) {
4e6e36c3
MS
2029 /*
2030 * There's no need to reload with request-based dm
2031 * because the size of front_pad doesn't change.
2032 * Note for future: If you are to reload bioset,
2033 * prep-ed requests in the queue may refer
2034 * to bio from the old bioset, so you must walk
2035 * through the queue to unprep.
2036 */
2037 goto out;
c0820cf5 2038 }
e6ee8c0b 2039
6f1c819c
KO
2040 BUG_ON(!p ||
2041 bioset_initialized(&md->bs) ||
2042 bioset_initialized(&md->io_bs));
cbc4e3c1 2043
2a2a4c51
JA
2044 ret = bioset_init_from_src(&md->bs, &p->bs);
2045 if (ret)
2046 goto out;
2047 ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
2048 if (ret)
2049 bioset_exit(&md->bs);
e6ee8c0b 2050out:
02233342 2051 /* mempool bind completed, no longer need any mempools in the table */
e6ee8c0b 2052 dm_table_free_md_mempools(t);
2a2a4c51 2053 return ret;
e6ee8c0b
KU
2054}
2055
1da177e4
LT
2056/*
2057 * Bind a table to the device.
2058 */
2059static void event_callback(void *context)
2060{
7a8c3d3b
MA
2061 unsigned long flags;
2062 LIST_HEAD(uevents);
1da177e4
LT
2063 struct mapped_device *md = (struct mapped_device *) context;
2064
7a8c3d3b
MA
2065 spin_lock_irqsave(&md->uevent_lock, flags);
2066 list_splice_init(&md->uevent_list, &uevents);
2067 spin_unlock_irqrestore(&md->uevent_lock, flags);
2068
ed9e1982 2069 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 2070
1da177e4
LT
2071 atomic_inc(&md->event_nr);
2072 wake_up(&md->eventq);
62e08243 2073 dm_issue_global_event();
1da177e4
LT
2074}
2075
c217649b
MS
2076/*
2077 * Protected by md->suspend_lock obtained by dm_swap_table().
2078 */
4e90188b 2079static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 2080{
1ea0654e
BVA
2081 lockdep_assert_held(&md->suspend_lock);
2082
4e90188b 2083 set_capacity(md->disk, size);
1da177e4 2084
db8fef4f 2085 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1da177e4
LT
2086}
2087
042d2a9b
AK
2088/*
2089 * Returns old map, which caller must destroy.
2090 */
2091static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2092 struct queue_limits *limits)
1da177e4 2093{
042d2a9b 2094 struct dm_table *old_map;
165125e1 2095 struct request_queue *q = md->queue;
978e51ba 2096 bool request_based = dm_table_request_based(t);
1da177e4 2097 sector_t size;
2a2a4c51 2098 int ret;
1da177e4 2099
5a8f1f80
BVA
2100 lockdep_assert_held(&md->suspend_lock);
2101
1da177e4 2102 size = dm_table_get_size(t);
3ac51e74
DW
2103
2104 /*
2105 * Wipe any geometry if the size of the table changed.
2106 */
fd2ed4d2 2107 if (size != dm_get_size(md))
3ac51e74
DW
2108 memset(&md->geometry, 0, sizeof(md->geometry));
2109
32a926da 2110 __set_size(md, size);
d5816876 2111
2ca3310e
AK
2112 dm_table_event_callback(t, event_callback, md);
2113
e6ee8c0b
KU
2114 /*
2115 * The queue hasn't been stopped yet, if the old table type wasn't
2116 * for request-based during suspension. So stop it to prevent
2117 * I/O mapping before resume.
2118 * This must be done before setting the queue restrictions,
2119 * because request-based dm may be run just after the setting.
2120 */
978e51ba 2121 if (request_based)
eca7ee6d 2122 dm_stop_queue(q);
978e51ba
MS
2123
2124 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
16f12266 2125 /*
978e51ba
MS
2126 * Leverage the fact that request-based DM targets and
2127 * NVMe bio based targets are immutable singletons
2128 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
2129 * and __process_bio.
16f12266
MS
2130 */
2131 md->immutable_target = dm_table_get_immutable_target(t);
2132 }
e6ee8c0b 2133
2a2a4c51
JA
2134 ret = __bind_mempools(md, t);
2135 if (ret) {
2136 old_map = ERR_PTR(ret);
2137 goto out;
2138 }
e6ee8c0b 2139
a12f5d48 2140 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
1d3aa6f6 2141 rcu_assign_pointer(md->map, (void *)t);
36a0456f
AK
2142 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2143
754c5fc7 2144 dm_table_set_restrictions(t, q, limits);
41abc4e1
HR
2145 if (old_map)
2146 dm_sync_table(md);
1da177e4 2147
2a2a4c51 2148out:
042d2a9b 2149 return old_map;
1da177e4
LT
2150}
2151
a7940155
AK
2152/*
2153 * Returns unbound table for the caller to free.
2154 */
2155static struct dm_table *__unbind(struct mapped_device *md)
1da177e4 2156{
a12f5d48 2157 struct dm_table *map = rcu_dereference_protected(md->map, 1);
1da177e4
LT
2158
2159 if (!map)
a7940155 2160 return NULL;
1da177e4
LT
2161
2162 dm_table_event_callback(map, NULL, NULL);
9cdb8520 2163 RCU_INIT_POINTER(md->map, NULL);
83d5e5b0 2164 dm_sync_table(md);
a7940155
AK
2165
2166 return map;
1da177e4
LT
2167}
2168
2169/*
2170 * Constructor for a new device.
2171 */
2b06cfff 2172int dm_create(int minor, struct mapped_device **result)
1da177e4 2173{
c12c9a3c 2174 int r;
1da177e4
LT
2175 struct mapped_device *md;
2176
2b06cfff 2177 md = alloc_dev(minor);
1da177e4
LT
2178 if (!md)
2179 return -ENXIO;
2180
c12c9a3c
MS
2181 r = dm_sysfs_init(md);
2182 if (r) {
2183 free_dev(md);
2184 return r;
2185 }
784aae73 2186
1da177e4
LT
2187 *result = md;
2188 return 0;
2189}
2190
a5664dad
MS
2191/*
2192 * Functions to manage md->type.
2193 * All are required to hold md->type_lock.
2194 */
2195void dm_lock_md_type(struct mapped_device *md)
2196{
2197 mutex_lock(&md->type_lock);
2198}
2199
2200void dm_unlock_md_type(struct mapped_device *md)
2201{
2202 mutex_unlock(&md->type_lock);
2203}
2204
7e0d574f 2205void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
a5664dad 2206{
00c4fc3b 2207 BUG_ON(!mutex_is_locked(&md->type_lock));
a5664dad
MS
2208 md->type = type;
2209}
2210
7e0d574f 2211enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
a5664dad
MS
2212{
2213 return md->type;
2214}
2215
36a0456f
AK
2216struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2217{
2218 return md->immutable_target_type;
2219}
2220
f84cb8a4
MS
2221/*
2222 * The queue_limits are only valid as long as you have a reference
2223 * count on 'md'.
2224 */
2225struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2226{
2227 BUG_ON(!atomic_read(&md->holders));
2228 return &md->queue->limits;
2229}
2230EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2231
4a0b4ddf
MS
2232/*
2233 * Setup the DM device's queue based on md's type
2234 */
591ddcfc 2235int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
4a0b4ddf 2236{
bfebd1cd 2237 int r;
c100ec49 2238 struct queue_limits limits;
7e0d574f 2239 enum dm_queue_mode type = dm_get_md_type(md);
bfebd1cd 2240
545ed20e 2241 switch (type) {
bfebd1cd 2242 case DM_TYPE_REQUEST_BASED:
e83068a5 2243 r = dm_mq_init_request_queue(md, t);
bfebd1cd 2244 if (r) {
eca7ee6d 2245 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
bfebd1cd
MS
2246 return r;
2247 }
2248 break;
2249 case DM_TYPE_BIO_BASED:
545ed20e 2250 case DM_TYPE_DAX_BIO_BASED:
978e51ba
MS
2251 case DM_TYPE_NVME_BIO_BASED:
2252 dm_init_normal_md_queue(md);
24113d48 2253 blk_queue_make_request(md->queue, dm_make_request);
bfebd1cd 2254 break;
7e0d574f
BVA
2255 case DM_TYPE_NONE:
2256 WARN_ON_ONCE(true);
2257 break;
4a0b4ddf
MS
2258 }
2259
c100ec49
MS
2260 r = dm_calculate_queue_limits(t, &limits);
2261 if (r) {
2262 DMERR("Cannot calculate initial queue limits");
2263 return r;
2264 }
2265 dm_table_set_restrictions(t, md->queue, &limits);
2266 blk_register_queue(md->disk);
2267
4a0b4ddf
MS
2268 return 0;
2269}
2270
2bec1f4a 2271struct mapped_device *dm_get_md(dev_t dev)
1da177e4
LT
2272{
2273 struct mapped_device *md;
1da177e4
LT
2274 unsigned minor = MINOR(dev);
2275
2276 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2277 return NULL;
2278
f32c10b0 2279 spin_lock(&_minor_lock);
1da177e4
LT
2280
2281 md = idr_find(&_minor_idr, minor);
49de5769
MS
2282 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2283 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2284 md = NULL;
2285 goto out;
fba9f90e 2286 }
49de5769 2287 dm_get(md);
fba9f90e 2288out:
f32c10b0 2289 spin_unlock(&_minor_lock);
1da177e4 2290
637842cf
DT
2291 return md;
2292}
3cf2e4ba 2293EXPORT_SYMBOL_GPL(dm_get_md);
d229a958 2294
9ade92a9 2295void *dm_get_mdptr(struct mapped_device *md)
637842cf 2296{
9ade92a9 2297 return md->interface_ptr;
1da177e4
LT
2298}
2299
2300void dm_set_mdptr(struct mapped_device *md, void *ptr)
2301{
2302 md->interface_ptr = ptr;
2303}
2304
2305void dm_get(struct mapped_device *md)
2306{
2307 atomic_inc(&md->holders);
3f77316d 2308 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1da177e4
LT
2309}
2310
09ee96b2
MP
2311int dm_hold(struct mapped_device *md)
2312{
2313 spin_lock(&_minor_lock);
2314 if (test_bit(DMF_FREEING, &md->flags)) {
2315 spin_unlock(&_minor_lock);
2316 return -EBUSY;
2317 }
2318 dm_get(md);
2319 spin_unlock(&_minor_lock);
2320 return 0;
2321}
2322EXPORT_SYMBOL_GPL(dm_hold);
2323
72d94861
AK
2324const char *dm_device_name(struct mapped_device *md)
2325{
2326 return md->name;
2327}
2328EXPORT_SYMBOL_GPL(dm_device_name);
2329
3f77316d 2330static void __dm_destroy(struct mapped_device *md, bool wait)
1da177e4 2331{
1134e5ae 2332 struct dm_table *map;
83d5e5b0 2333 int srcu_idx;
1da177e4 2334
3f77316d 2335 might_sleep();
fba9f90e 2336
63a4f065 2337 spin_lock(&_minor_lock);
3f77316d
KU
2338 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2339 set_bit(DMF_FREEING, &md->flags);
2340 spin_unlock(&_minor_lock);
3b785fbc 2341
c12c9a3c 2342 blk_set_queue_dying(md->queue);
3f77316d 2343
ab7c7bb6
MP
2344 /*
2345 * Take suspend_lock so that presuspend and postsuspend methods
2346 * do not race with internal suspend.
2347 */
2348 mutex_lock(&md->suspend_lock);
2a708cff 2349 map = dm_get_live_table(md, &srcu_idx);
3f77316d
KU
2350 if (!dm_suspended_md(md)) {
2351 dm_table_presuspend_targets(map);
2352 dm_table_postsuspend_targets(map);
1da177e4 2353 }
83d5e5b0
MP
2354 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2355 dm_put_live_table(md, srcu_idx);
2a708cff 2356 mutex_unlock(&md->suspend_lock);
83d5e5b0 2357
3f77316d
KU
2358 /*
2359 * Rare, but there may be I/O requests still going to complete,
2360 * for example. Wait for all references to disappear.
2361 * No one should increment the reference count of the mapped_device,
2362 * after the mapped_device state becomes DMF_FREEING.
2363 */
2364 if (wait)
2365 while (atomic_read(&md->holders))
2366 msleep(1);
2367 else if (atomic_read(&md->holders))
2368 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2369 dm_device_name(md), atomic_read(&md->holders));
2370
2371 dm_sysfs_exit(md);
3f77316d
KU
2372 dm_table_destroy(__unbind(md));
2373 free_dev(md);
2374}
2375
2376void dm_destroy(struct mapped_device *md)
2377{
2378 __dm_destroy(md, true);
2379}
2380
2381void dm_destroy_immediate(struct mapped_device *md)
2382{
2383 __dm_destroy(md, false);
2384}
2385
2386void dm_put(struct mapped_device *md)
2387{
2388 atomic_dec(&md->holders);
1da177e4 2389}
79eb885c 2390EXPORT_SYMBOL_GPL(dm_put);
1da177e4 2391
b48633f8 2392static int dm_wait_for_completion(struct mapped_device *md, long task_state)
46125c1c
MB
2393{
2394 int r = 0;
9f4c3f87 2395 DEFINE_WAIT(wait);
46125c1c
MB
2396
2397 while (1) {
9f4c3f87 2398 prepare_to_wait(&md->wait, &wait, task_state);
46125c1c 2399
b4324fee 2400 if (!md_in_flight(md))
46125c1c
MB
2401 break;
2402
e3fabdfd 2403 if (signal_pending_state(task_state, current)) {
46125c1c
MB
2404 r = -EINTR;
2405 break;
2406 }
2407
2408 io_schedule();
2409 }
9f4c3f87 2410 finish_wait(&md->wait, &wait);
b44ebeb0 2411
46125c1c
MB
2412 return r;
2413}
2414
1da177e4
LT
2415/*
2416 * Process the deferred bios
2417 */
ef208587 2418static void dm_wq_work(struct work_struct *work)
1da177e4 2419{
ef208587
MP
2420 struct mapped_device *md = container_of(work, struct mapped_device,
2421 work);
6d6f10df 2422 struct bio *c;
83d5e5b0
MP
2423 int srcu_idx;
2424 struct dm_table *map;
1da177e4 2425
83d5e5b0 2426 map = dm_get_live_table(md, &srcu_idx);
ef208587 2427
3b00b203 2428 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
2429 spin_lock_irq(&md->deferred_lock);
2430 c = bio_list_pop(&md->deferred);
2431 spin_unlock_irq(&md->deferred_lock);
2432
6a8736d1 2433 if (!c)
df12ee99 2434 break;
022c2611 2435
e6ee8c0b 2436 if (dm_request_based(md))
6548c7c5 2437 (void) generic_make_request(c);
6a8736d1 2438 else
6548c7c5 2439 (void) dm_process_bio(md, map, c);
022c2611 2440 }
73d410c0 2441
83d5e5b0 2442 dm_put_live_table(md, srcu_idx);
1da177e4
LT
2443}
2444
9a1fb464 2445static void dm_queue_flush(struct mapped_device *md)
304f3f6a 2446{
3b00b203 2447 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
4e857c58 2448 smp_mb__after_atomic();
53d5914f 2449 queue_work(md->wq, &md->work);
304f3f6a
MB
2450}
2451
1da177e4 2452/*
042d2a9b 2453 * Swap in a new table, returning the old one for the caller to destroy.
1da177e4 2454 */
042d2a9b 2455struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1da177e4 2456{
87eb5b21 2457 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
754c5fc7 2458 struct queue_limits limits;
042d2a9b 2459 int r;
1da177e4 2460
e61290a4 2461 mutex_lock(&md->suspend_lock);
1da177e4
LT
2462
2463 /* device must be suspended */
4f186f8b 2464 if (!dm_suspended_md(md))
93c534ae 2465 goto out;
1da177e4 2466
3ae70656
MS
2467 /*
2468 * If the new table has no data devices, retain the existing limits.
2469 * This helps multipath with queue_if_no_path if all paths disappear,
2470 * then new I/O is queued based on these limits, and then some paths
2471 * reappear.
2472 */
2473 if (dm_table_has_no_data_devices(table)) {
83d5e5b0 2474 live_map = dm_get_live_table_fast(md);
3ae70656
MS
2475 if (live_map)
2476 limits = md->queue->limits;
83d5e5b0 2477 dm_put_live_table_fast(md);
3ae70656
MS
2478 }
2479
87eb5b21
MC
2480 if (!live_map) {
2481 r = dm_calculate_queue_limits(table, &limits);
2482 if (r) {
2483 map = ERR_PTR(r);
2484 goto out;
2485 }
042d2a9b 2486 }
754c5fc7 2487
042d2a9b 2488 map = __bind(md, table, &limits);
62e08243 2489 dm_issue_global_event();
1da177e4 2490
93c534ae 2491out:
e61290a4 2492 mutex_unlock(&md->suspend_lock);
042d2a9b 2493 return map;
1da177e4
LT
2494}
2495
2496/*
2497 * Functions to lock and unlock any filesystem running on the
2498 * device.
2499 */
2ca3310e 2500static int lock_fs(struct mapped_device *md)
1da177e4 2501{
e39e2e95 2502 int r;
1da177e4
LT
2503
2504 WARN_ON(md->frozen_sb);
dfbe03f6 2505
db8fef4f 2506 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 2507 if (IS_ERR(md->frozen_sb)) {
cf222b37 2508 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
2509 md->frozen_sb = NULL;
2510 return r;
dfbe03f6
AK
2511 }
2512
aa8d7c2f
AK
2513 set_bit(DMF_FROZEN, &md->flags);
2514
1da177e4
LT
2515 return 0;
2516}
2517
2ca3310e 2518static void unlock_fs(struct mapped_device *md)
1da177e4 2519{
aa8d7c2f
AK
2520 if (!test_bit(DMF_FROZEN, &md->flags))
2521 return;
2522
db8fef4f 2523 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 2524 md->frozen_sb = NULL;
aa8d7c2f 2525 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
2526}
2527
2528/*
b48633f8
BVA
2529 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2530 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2531 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2532 *
ffcc3936
MS
2533 * If __dm_suspend returns 0, the device is completely quiescent
2534 * now. There is no request-processing activity. All new requests
2535 * are being added to md->deferred list.
cec47e3d 2536 */
ffcc3936 2537static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
b48633f8 2538 unsigned suspend_flags, long task_state,
eaf9a736 2539 int dmf_suspended_flag)
1da177e4 2540{
ffcc3936
MS
2541 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2542 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2543 int r;
1da177e4 2544
5a8f1f80
BVA
2545 lockdep_assert_held(&md->suspend_lock);
2546
2e93ccc1
KU
2547 /*
2548 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2549 * This flag is cleared before dm_suspend returns.
2550 */
2551 if (noflush)
2552 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
86331f39
BVA
2553 else
2554 pr_debug("%s: suspending with flush\n", dm_device_name(md));
2e93ccc1 2555
d67ee213
MS
2556 /*
2557 * This gets reverted if there's an error later and the targets
2558 * provide the .presuspend_undo hook.
2559 */
cf222b37
AK
2560 dm_table_presuspend_targets(map);
2561
32a926da 2562 /*
9f518b27
KU
2563 * Flush I/O to the device.
2564 * Any I/O submitted after lock_fs() may not be flushed.
2565 * noflush takes precedence over do_lockfs.
2566 * (lock_fs() flushes I/Os and waits for them to complete.)
32a926da
MP
2567 */
2568 if (!noflush && do_lockfs) {
2569 r = lock_fs(md);
d67ee213
MS
2570 if (r) {
2571 dm_table_presuspend_undo_targets(map);
ffcc3936 2572 return r;
d67ee213 2573 }
aa8d7c2f 2574 }
1da177e4
LT
2575
2576 /*
3b00b203
MP
2577 * Here we must make sure that no processes are submitting requests
2578 * to target drivers i.e. no one may be executing
2579 * __split_and_process_bio. This is called from dm_request and
2580 * dm_wq_work.
2581 *
2582 * To get all processes out of __split_and_process_bio in dm_request,
2583 * we take the write lock. To prevent any process from reentering
6a8736d1
TH
2584 * __split_and_process_bio from dm_request and quiesce the thread
2585 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2586 * flush_workqueue(md->wq).
1da177e4 2587 */
1eb787ec 2588 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
41abc4e1
HR
2589 if (map)
2590 synchronize_srcu(&md->io_barrier);
1da177e4 2591
d0bcb878 2592 /*
29e4013d
TH
2593 * Stop md->queue before flushing md->wq in case request-based
2594 * dm defers requests to md->wq from md->queue.
d0bcb878 2595 */
6a23e05c 2596 if (dm_request_based(md))
eca7ee6d 2597 dm_stop_queue(md->queue);
cec47e3d 2598
d0bcb878
KU
2599 flush_workqueue(md->wq);
2600
1da177e4 2601 /*
3b00b203
MP
2602 * At this point no more requests are entering target request routines.
2603 * We call dm_wait_for_completion to wait for all existing requests
2604 * to finish.
1da177e4 2605 */
b48633f8 2606 r = dm_wait_for_completion(md, task_state);
eaf9a736
MS
2607 if (!r)
2608 set_bit(dmf_suspended_flag, &md->flags);
1da177e4 2609
6d6f10df 2610 if (noflush)
022c2611 2611 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
41abc4e1
HR
2612 if (map)
2613 synchronize_srcu(&md->io_barrier);
2e93ccc1 2614
1da177e4 2615 /* were we interrupted ? */
46125c1c 2616 if (r < 0) {
9a1fb464 2617 dm_queue_flush(md);
73d410c0 2618
cec47e3d 2619 if (dm_request_based(md))
eca7ee6d 2620 dm_start_queue(md->queue);
cec47e3d 2621
2ca3310e 2622 unlock_fs(md);
d67ee213 2623 dm_table_presuspend_undo_targets(map);
ffcc3936 2624 /* pushback list is already flushed, so skip flush */
2ca3310e 2625 }
1da177e4 2626
ffcc3936
MS
2627 return r;
2628}
2629
2630/*
2631 * We need to be able to change a mapping table under a mounted
2632 * filesystem. For example we might want to move some data in
2633 * the background. Before the table can be swapped with
2634 * dm_bind_table, dm_suspend must be called to flush any in
2635 * flight bios and ensure that any further io gets deferred.
2636 */
2637/*
2638 * Suspend mechanism in request-based dm.
2639 *
2640 * 1. Flush all I/Os by lock_fs() if needed.
2641 * 2. Stop dispatching any I/O by stopping the request_queue.
2642 * 3. Wait for all in-flight I/Os to be completed or requeued.
2643 *
2644 * To abort suspend, start the request_queue.
2645 */
2646int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2647{
2648 struct dm_table *map = NULL;
2649 int r = 0;
2650
2651retry:
2652 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2653
2654 if (dm_suspended_md(md)) {
2655 r = -EINVAL;
2656 goto out_unlock;
2657 }
2658
2659 if (dm_suspended_internally_md(md)) {
2660 /* already internally suspended, wait for internal resume */
2661 mutex_unlock(&md->suspend_lock);
2662 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2663 if (r)
2664 return r;
2665 goto retry;
2666 }
2667
a12f5d48 2668 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936 2669
eaf9a736 2670 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
ffcc3936
MS
2671 if (r)
2672 goto out_unlock;
3b00b203 2673
4d4471cb
KU
2674 dm_table_postsuspend_targets(map);
2675
d287483d 2676out_unlock:
e61290a4 2677 mutex_unlock(&md->suspend_lock);
cf222b37 2678 return r;
1da177e4
LT
2679}
2680
ffcc3936
MS
2681static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2682{
2683 if (map) {
2684 int r = dm_table_resume_targets(map);
2685 if (r)
2686 return r;
2687 }
2688
2689 dm_queue_flush(md);
2690
2691 /*
2692 * Flushing deferred I/Os must be done after targets are resumed
2693 * so that mapping of targets can work correctly.
2694 * Request-based dm is queueing the deferred I/Os in its request_queue.
2695 */
2696 if (dm_request_based(md))
eca7ee6d 2697 dm_start_queue(md->queue);
ffcc3936
MS
2698
2699 unlock_fs(md);
2700
2701 return 0;
2702}
2703
1da177e4
LT
2704int dm_resume(struct mapped_device *md)
2705{
8dc23658 2706 int r;
cf222b37 2707 struct dm_table *map = NULL;
1da177e4 2708
ffcc3936 2709retry:
8dc23658 2710 r = -EINVAL;
ffcc3936
MS
2711 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2712
4f186f8b 2713 if (!dm_suspended_md(md))
cf222b37 2714 goto out;
cf222b37 2715
ffcc3936
MS
2716 if (dm_suspended_internally_md(md)) {
2717 /* already internally suspended, wait for internal resume */
2718 mutex_unlock(&md->suspend_lock);
2719 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2720 if (r)
2721 return r;
2722 goto retry;
2723 }
2724
a12f5d48 2725 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2ca3310e 2726 if (!map || !dm_table_get_size(map))
cf222b37 2727 goto out;
1da177e4 2728
ffcc3936 2729 r = __dm_resume(md, map);
8757b776
MB
2730 if (r)
2731 goto out;
2ca3310e 2732
2ca3310e 2733 clear_bit(DMF_SUSPENDED, &md->flags);
cf222b37 2734out:
e61290a4 2735 mutex_unlock(&md->suspend_lock);
2ca3310e 2736
cf222b37 2737 return r;
1da177e4
LT
2738}
2739
fd2ed4d2
MP
2740/*
2741 * Internal suspend/resume works like userspace-driven suspend. It waits
2742 * until all bios finish and prevents issuing new bios to the target drivers.
2743 * It may be used only from the kernel.
fd2ed4d2
MP
2744 */
2745
ffcc3936 2746static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
fd2ed4d2 2747{
ffcc3936
MS
2748 struct dm_table *map = NULL;
2749
1ea0654e
BVA
2750 lockdep_assert_held(&md->suspend_lock);
2751
96b26c8c 2752 if (md->internal_suspend_count++)
ffcc3936
MS
2753 return; /* nested internal suspend */
2754
2755 if (dm_suspended_md(md)) {
2756 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2757 return; /* nest suspend */
2758 }
2759
a12f5d48 2760 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936
MS
2761
2762 /*
2763 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2764 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2765 * would require changing .presuspend to return an error -- avoid this
2766 * until there is a need for more elaborate variants of internal suspend.
2767 */
eaf9a736
MS
2768 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2769 DMF_SUSPENDED_INTERNALLY);
ffcc3936
MS
2770
2771 dm_table_postsuspend_targets(map);
2772}
2773
2774static void __dm_internal_resume(struct mapped_device *md)
2775{
96b26c8c
MP
2776 BUG_ON(!md->internal_suspend_count);
2777
2778 if (--md->internal_suspend_count)
ffcc3936
MS
2779 return; /* resume from nested internal suspend */
2780
fd2ed4d2 2781 if (dm_suspended_md(md))
ffcc3936
MS
2782 goto done; /* resume from nested suspend */
2783
2784 /*
2785 * NOTE: existing callers don't need to call dm_table_resume_targets
2786 * (which may fail -- so best to avoid it for now by passing NULL map)
2787 */
2788 (void) __dm_resume(md, NULL);
2789
2790done:
2791 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2792 smp_mb__after_atomic();
2793 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2794}
2795
2796void dm_internal_suspend_noflush(struct mapped_device *md)
2797{
2798 mutex_lock(&md->suspend_lock);
2799 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2800 mutex_unlock(&md->suspend_lock);
2801}
2802EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2803
2804void dm_internal_resume(struct mapped_device *md)
2805{
2806 mutex_lock(&md->suspend_lock);
2807 __dm_internal_resume(md);
2808 mutex_unlock(&md->suspend_lock);
2809}
2810EXPORT_SYMBOL_GPL(dm_internal_resume);
2811
2812/*
2813 * Fast variants of internal suspend/resume hold md->suspend_lock,
2814 * which prevents interaction with userspace-driven suspend.
2815 */
2816
2817void dm_internal_suspend_fast(struct mapped_device *md)
2818{
2819 mutex_lock(&md->suspend_lock);
2820 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
2821 return;
2822
2823 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2824 synchronize_srcu(&md->io_barrier);
2825 flush_workqueue(md->wq);
2826 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2827}
b735fede 2828EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
fd2ed4d2 2829
ffcc3936 2830void dm_internal_resume_fast(struct mapped_device *md)
fd2ed4d2 2831{
ffcc3936 2832 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
2833 goto done;
2834
2835 dm_queue_flush(md);
2836
2837done:
2838 mutex_unlock(&md->suspend_lock);
2839}
b735fede 2840EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
fd2ed4d2 2841
1da177e4
LT
2842/*-----------------------------------------------------------------
2843 * Event notification.
2844 *---------------------------------------------------------------*/
3abf85b5 2845int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
60935eb2 2846 unsigned cookie)
69267a30 2847{
60935eb2
MB
2848 char udev_cookie[DM_COOKIE_LENGTH];
2849 char *envp[] = { udev_cookie, NULL };
2850
2851 if (!cookie)
3abf85b5 2852 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
60935eb2
MB
2853 else {
2854 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2855 DM_COOKIE_ENV_VAR_NAME, cookie);
3abf85b5
PR
2856 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2857 action, envp);
60935eb2 2858 }
69267a30
AK
2859}
2860
7a8c3d3b
MA
2861uint32_t dm_next_uevent_seq(struct mapped_device *md)
2862{
2863 return atomic_add_return(1, &md->uevent_seq);
2864}
2865
1da177e4
LT
2866uint32_t dm_get_event_nr(struct mapped_device *md)
2867{
2868 return atomic_read(&md->event_nr);
2869}
2870
2871int dm_wait_event(struct mapped_device *md, int event_nr)
2872{
2873 return wait_event_interruptible(md->eventq,
2874 (event_nr != atomic_read(&md->event_nr)));
2875}
2876
7a8c3d3b
MA
2877void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2878{
2879 unsigned long flags;
2880
2881 spin_lock_irqsave(&md->uevent_lock, flags);
2882 list_add(elist, &md->uevent_list);
2883 spin_unlock_irqrestore(&md->uevent_lock, flags);
2884}
2885
1da177e4
LT
2886/*
2887 * The gendisk is only valid as long as you have a reference
2888 * count on 'md'.
2889 */
2890struct gendisk *dm_disk(struct mapped_device *md)
2891{
2892 return md->disk;
2893}
65ff5b7d 2894EXPORT_SYMBOL_GPL(dm_disk);
1da177e4 2895
784aae73
MB
2896struct kobject *dm_kobject(struct mapped_device *md)
2897{
2995fa78 2898 return &md->kobj_holder.kobj;
784aae73
MB
2899}
2900
784aae73
MB
2901struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2902{
2903 struct mapped_device *md;
2904
2995fa78 2905 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
784aae73 2906
b9a41d21
HT
2907 spin_lock(&_minor_lock);
2908 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2909 md = NULL;
2910 goto out;
2911 }
784aae73 2912 dm_get(md);
b9a41d21
HT
2913out:
2914 spin_unlock(&_minor_lock);
2915
784aae73
MB
2916 return md;
2917}
2918
4f186f8b 2919int dm_suspended_md(struct mapped_device *md)
1da177e4
LT
2920{
2921 return test_bit(DMF_SUSPENDED, &md->flags);
2922}
2923
ffcc3936
MS
2924int dm_suspended_internally_md(struct mapped_device *md)
2925{
2926 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2927}
2928
2c140a24
MP
2929int dm_test_deferred_remove_flag(struct mapped_device *md)
2930{
2931 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2932}
2933
64dbce58
KU
2934int dm_suspended(struct dm_target *ti)
2935{
ecdb2e25 2936 return dm_suspended_md(dm_table_get_md(ti->table));
64dbce58
KU
2937}
2938EXPORT_SYMBOL_GPL(dm_suspended);
2939
2e93ccc1
KU
2940int dm_noflush_suspending(struct dm_target *ti)
2941{
ecdb2e25 2942 return __noflush_suspending(dm_table_get_md(ti->table));
2e93ccc1
KU
2943}
2944EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2945
7e0d574f 2946struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
0776aa0e
MS
2947 unsigned integrity, unsigned per_io_data_size,
2948 unsigned min_pool_size)
e6ee8c0b 2949{
115485e8 2950 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
78d8e58a 2951 unsigned int pool_size = 0;
64f52b0e 2952 unsigned int front_pad, io_front_pad;
6f1c819c 2953 int ret;
e6ee8c0b
KU
2954
2955 if (!pools)
4e6e36c3 2956 return NULL;
e6ee8c0b 2957
78d8e58a
MS
2958 switch (type) {
2959 case DM_TYPE_BIO_BASED:
545ed20e 2960 case DM_TYPE_DAX_BIO_BASED:
22c11858 2961 case DM_TYPE_NVME_BIO_BASED:
0776aa0e 2962 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
30187e1d 2963 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
64f52b0e 2964 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
6f1c819c
KO
2965 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
2966 if (ret)
64f52b0e 2967 goto out;
6f1c819c 2968 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
eb8db831 2969 goto out;
78d8e58a
MS
2970 break;
2971 case DM_TYPE_REQUEST_BASED:
0776aa0e 2972 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
78d8e58a 2973 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
591ddcfc 2974 /* per_io_data_size is used for blk-mq pdu at queue allocation */
78d8e58a
MS
2975 break;
2976 default:
2977 BUG();
2978 }
2979
6f1c819c
KO
2980 ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
2981 if (ret)
5f015204 2982 goto out;
e6ee8c0b 2983
6f1c819c 2984 if (integrity && bioset_integrity_create(&pools->bs, pool_size))
5f015204 2985 goto out;
a91a2785 2986
e6ee8c0b 2987 return pools;
5f1b670d 2988
5f1b670d
CH
2989out:
2990 dm_free_md_mempools(pools);
78d8e58a 2991
4e6e36c3 2992 return NULL;
e6ee8c0b
KU
2993}
2994
2995void dm_free_md_mempools(struct dm_md_mempools *pools)
2996{
2997 if (!pools)
2998 return;
2999
6f1c819c
KO
3000 bioset_exit(&pools->bs);
3001 bioset_exit(&pools->io_bs);
e6ee8c0b
KU
3002
3003 kfree(pools);
3004}
3005
9c72bad1
CH
3006struct dm_pr {
3007 u64 old_key;
3008 u64 new_key;
3009 u32 flags;
3010 bool fail_early;
3011};
3012
3013static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3014 void *data)
71cdb697
CH
3015{
3016 struct mapped_device *md = bdev->bd_disk->private_data;
9c72bad1
CH
3017 struct dm_table *table;
3018 struct dm_target *ti;
3019 int ret = -ENOTTY, srcu_idx;
71cdb697 3020
9c72bad1
CH
3021 table = dm_get_live_table(md, &srcu_idx);
3022 if (!table || !dm_table_get_size(table))
3023 goto out;
71cdb697 3024
9c72bad1
CH
3025 /* We only support devices that have a single target */
3026 if (dm_table_get_num_targets(table) != 1)
3027 goto out;
3028 ti = dm_table_get_target(table, 0);
71cdb697 3029
9c72bad1
CH
3030 ret = -EINVAL;
3031 if (!ti->type->iterate_devices)
3032 goto out;
3033
3034 ret = ti->type->iterate_devices(ti, fn, data);
3035out:
3036 dm_put_live_table(md, srcu_idx);
3037 return ret;
3038}
3039
3040/*
3041 * For register / unregister we need to manually call out to every path.
3042 */
3043static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3044 sector_t start, sector_t len, void *data)
3045{
3046 struct dm_pr *pr = data;
3047 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3048
3049 if (!ops || !ops->pr_register)
3050 return -EOPNOTSUPP;
3051 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3052}
3053
3054static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3055 u32 flags)
3056{
3057 struct dm_pr pr = {
3058 .old_key = old_key,
3059 .new_key = new_key,
3060 .flags = flags,
3061 .fail_early = true,
3062 };
3063 int ret;
3064
3065 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3066 if (ret && new_key) {
3067 /* unregister all paths if we failed to register any path */
3068 pr.old_key = new_key;
3069 pr.new_key = 0;
3070 pr.flags = 0;
3071 pr.fail_early = false;
3072 dm_call_pr(bdev, __dm_pr_register, &pr);
3073 }
3074
3075 return ret;
71cdb697
CH
3076}
3077
3078static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
956a4025 3079 u32 flags)
71cdb697
CH
3080{
3081 struct mapped_device *md = bdev->bd_disk->private_data;
3082 const struct pr_ops *ops;
971888c4 3083 int r, srcu_idx;
71cdb697 3084
5bd5e8d8 3085 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3086 if (r < 0)
971888c4 3087 goto out;
71cdb697
CH
3088
3089 ops = bdev->bd_disk->fops->pr_ops;
3090 if (ops && ops->pr_reserve)
3091 r = ops->pr_reserve(bdev, key, type, flags);
3092 else
3093 r = -EOPNOTSUPP;
971888c4
MS
3094out:
3095 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3096 return r;
3097}
3098
3099static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3100{
3101 struct mapped_device *md = bdev->bd_disk->private_data;
3102 const struct pr_ops *ops;
971888c4 3103 int r, srcu_idx;
71cdb697 3104
5bd5e8d8 3105 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3106 if (r < 0)
971888c4 3107 goto out;
71cdb697
CH
3108
3109 ops = bdev->bd_disk->fops->pr_ops;
3110 if (ops && ops->pr_release)
3111 r = ops->pr_release(bdev, key, type);
3112 else
3113 r = -EOPNOTSUPP;
971888c4
MS
3114out:
3115 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3116 return r;
3117}
3118
3119static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
956a4025 3120 enum pr_type type, bool abort)
71cdb697
CH
3121{
3122 struct mapped_device *md = bdev->bd_disk->private_data;
3123 const struct pr_ops *ops;
971888c4 3124 int r, srcu_idx;
71cdb697 3125
5bd5e8d8 3126 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3127 if (r < 0)
971888c4 3128 goto out;
71cdb697
CH
3129
3130 ops = bdev->bd_disk->fops->pr_ops;
3131 if (ops && ops->pr_preempt)
3132 r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3133 else
3134 r = -EOPNOTSUPP;
971888c4
MS
3135out:
3136 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3137 return r;
3138}
3139
3140static int dm_pr_clear(struct block_device *bdev, u64 key)
3141{
3142 struct mapped_device *md = bdev->bd_disk->private_data;
3143 const struct pr_ops *ops;
971888c4 3144 int r, srcu_idx;
71cdb697 3145
5bd5e8d8 3146 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3147 if (r < 0)
971888c4 3148 goto out;
71cdb697
CH
3149
3150 ops = bdev->bd_disk->fops->pr_ops;
3151 if (ops && ops->pr_clear)
3152 r = ops->pr_clear(bdev, key);
3153 else
3154 r = -EOPNOTSUPP;
971888c4
MS
3155out:
3156 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3157 return r;
3158}
3159
3160static const struct pr_ops dm_pr_ops = {
3161 .pr_register = dm_pr_register,
3162 .pr_reserve = dm_pr_reserve,
3163 .pr_release = dm_pr_release,
3164 .pr_preempt = dm_pr_preempt,
3165 .pr_clear = dm_pr_clear,
3166};
3167
83d5cde4 3168static const struct block_device_operations dm_blk_dops = {
1da177e4
LT
3169 .open = dm_blk_open,
3170 .release = dm_blk_close,
aa129a22 3171 .ioctl = dm_blk_ioctl,
3ac51e74 3172 .getgeo = dm_blk_getgeo,
e76239a3 3173 .report_zones = dm_blk_report_zones,
71cdb697 3174 .pr_ops = &dm_pr_ops,
1da177e4
LT
3175 .owner = THIS_MODULE
3176};
3177
f26c5719
DW
3178static const struct dax_operations dm_dax_ops = {
3179 .direct_access = dm_dax_direct_access,
7e026c8c 3180 .copy_from_iter = dm_dax_copy_from_iter,
b3a9a0c3 3181 .copy_to_iter = dm_dax_copy_to_iter,
f26c5719
DW
3182};
3183
1da177e4
LT
3184/*
3185 * module hooks
3186 */
3187module_init(dm_init);
3188module_exit(dm_exit);
3189
3190module_param(major, uint, 0);
3191MODULE_PARM_DESC(major, "The major number of the device mapper");
f4790826 3192
e8603136
MS
3193module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3194MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3195
115485e8
MS
3196module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3197MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3198
1da177e4
LT
3199MODULE_DESCRIPTION(DM_NAME " driver");
3200MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3201MODULE_LICENSE("GPL");