]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/md/dm.c
dm table: stack 'chunk_sectors' limit to account for target-specific splitting
[mirror_ubuntu-jammy-kernel.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
4cc96131
MS
8#include "dm-core.h"
9#include "dm-rq.h"
51e5b2bd 10#include "dm-uevent.h"
1da177e4
LT
11
12#include <linux/init.h>
13#include <linux/module.h>
48c9c27b 14#include <linux/mutex.h>
6958c1c6 15#include <linux/sched/mm.h>
174cd4b1 16#include <linux/sched/signal.h>
1da177e4
LT
17#include <linux/blkpg.h>
18#include <linux/bio.h>
1da177e4 19#include <linux/mempool.h>
f26c5719 20#include <linux/dax.h>
1da177e4
LT
21#include <linux/slab.h>
22#include <linux/idr.h>
7e026c8c 23#include <linux/uio.h>
3ac51e74 24#include <linux/hdreg.h>
3f77316d 25#include <linux/delay.h>
ffcc3936 26#include <linux/wait.h>
71cdb697 27#include <linux/pr.h>
b0b4d7c6 28#include <linux/refcount.h>
c6a564ff 29#include <linux/part_stat.h>
a892c8d5 30#include <linux/blk-crypto.h>
55782138 31
72d94861
AK
32#define DM_MSG_PREFIX "core"
33
60935eb2
MB
34/*
35 * Cookies are numeric values sent with CHANGE and REMOVE
36 * uevents while resuming, removing or renaming the device.
37 */
38#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
39#define DM_COOKIE_LENGTH 24
40
1da177e4
LT
41static const char *_name = DM_NAME;
42
43static unsigned int major = 0;
44static unsigned int _major = 0;
45
d15b774c
AK
46static DEFINE_IDR(_minor_idr);
47
f32c10b0 48static DEFINE_SPINLOCK(_minor_lock);
2c140a24
MP
49
50static void do_deferred_remove(struct work_struct *w);
51
52static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
53
acfe0ad7
MP
54static struct workqueue_struct *deferred_remove_workqueue;
55
93e6442c
MP
56atomic_t dm_global_event_nr = ATOMIC_INIT(0);
57DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
58
62e08243
MP
59void dm_issue_global_event(void)
60{
61 atomic_inc(&dm_global_event_nr);
62 wake_up(&dm_global_eventq);
63}
64
1da177e4 65/*
64f52b0e 66 * One of these is allocated (on-stack) per original bio.
1da177e4 67 */
64f52b0e 68struct clone_info {
64f52b0e
MS
69 struct dm_table *map;
70 struct bio *bio;
71 struct dm_io *io;
72 sector_t sector;
73 unsigned sector_count;
74};
75
76/*
77 * One of these is allocated per clone bio.
78 */
79#define DM_TIO_MAGIC 7282014
80struct dm_target_io {
81 unsigned magic;
82 struct dm_io *io;
83 struct dm_target *ti;
84 unsigned target_bio_nr;
85 unsigned *len_ptr;
86 bool inside_dm_io;
87 struct bio clone;
88};
89
1da177e4 90/*
745dc570 91 * One of these is allocated per original bio.
64f52b0e 92 * It contains the first clone used for that original.
1da177e4 93 */
64f52b0e 94#define DM_IO_MAGIC 5191977
1da177e4 95struct dm_io {
64f52b0e 96 unsigned magic;
1da177e4 97 struct mapped_device *md;
4e4cbee9 98 blk_status_t status;
1da177e4 99 atomic_t io_count;
745dc570 100 struct bio *orig_bio;
3eaf840e 101 unsigned long start_time;
f88fb981 102 spinlock_t endio_lock;
fd2ed4d2 103 struct dm_stats_aux stats_aux;
64f52b0e
MS
104 /* last member of dm_target_io is 'struct bio' */
105 struct dm_target_io tio;
1da177e4
LT
106};
107
64f52b0e
MS
108void *dm_per_bio_data(struct bio *bio, size_t data_size)
109{
110 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
111 if (!tio->inside_dm_io)
112 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
113 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
114}
115EXPORT_SYMBOL_GPL(dm_per_bio_data);
116
117struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
118{
119 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
120 if (io->magic == DM_IO_MAGIC)
121 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
122 BUG_ON(io->magic != DM_TIO_MAGIC);
123 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
124}
125EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
126
127unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
128{
129 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
130}
131EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
132
ba61fdd1
JM
133#define MINOR_ALLOCED ((void *)-1)
134
1da177e4
LT
135/*
136 * Bits for the md->flags field.
137 */
1eb787ec 138#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 139#define DMF_SUSPENDED 1
aa8d7c2f 140#define DMF_FROZEN 2
fba9f90e 141#define DMF_FREEING 3
5c6bd75d 142#define DMF_DELETING 4
2e93ccc1 143#define DMF_NOFLUSH_SUSPENDING 5
8ae12666
KO
144#define DMF_DEFERRED_REMOVE 6
145#define DMF_SUSPENDED_INTERNALLY 7
5df96f2b 146#define DMF_POST_SUSPENDING 8
1da177e4 147
115485e8 148#define DM_NUMA_NODE NUMA_NO_NODE
115485e8 149static int dm_numa_node = DM_NUMA_NODE;
faad87df 150
e6ee8c0b
KU
151/*
152 * For mempools pre-allocation at the table loading time.
153 */
154struct dm_md_mempools {
6f1c819c
KO
155 struct bio_set bs;
156 struct bio_set io_bs;
e6ee8c0b
KU
157};
158
86f1152b
BM
159struct table_device {
160 struct list_head list;
b0b4d7c6 161 refcount_t count;
86f1152b
BM
162 struct dm_dev dm_dev;
163};
164
e8603136
MS
165/*
166 * Bio-based DM's mempools' reserved IOs set by the user.
167 */
4cc96131 168#define RESERVED_BIO_BASED_IOS 16
e8603136
MS
169static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
170
115485e8
MS
171static int __dm_get_module_param_int(int *module_param, int min, int max)
172{
6aa7de05 173 int param = READ_ONCE(*module_param);
115485e8
MS
174 int modified_param = 0;
175 bool modified = true;
176
177 if (param < min)
178 modified_param = min;
179 else if (param > max)
180 modified_param = max;
181 else
182 modified = false;
183
184 if (modified) {
185 (void)cmpxchg(module_param, param, modified_param);
186 param = modified_param;
187 }
188
189 return param;
190}
191
4cc96131
MS
192unsigned __dm_get_module_param(unsigned *module_param,
193 unsigned def, unsigned max)
f4790826 194{
6aa7de05 195 unsigned param = READ_ONCE(*module_param);
09c2d531 196 unsigned modified_param = 0;
f4790826 197
09c2d531
MS
198 if (!param)
199 modified_param = def;
200 else if (param > max)
201 modified_param = max;
f4790826 202
09c2d531
MS
203 if (modified_param) {
204 (void)cmpxchg(module_param, param, modified_param);
205 param = modified_param;
f4790826
MS
206 }
207
09c2d531 208 return param;
f4790826
MS
209}
210
e8603136
MS
211unsigned dm_get_reserved_bio_based_ios(void)
212{
09c2d531 213 return __dm_get_module_param(&reserved_bio_based_ios,
4cc96131 214 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
e8603136
MS
215}
216EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
217
115485e8
MS
218static unsigned dm_get_numa_node(void)
219{
220 return __dm_get_module_param_int(&dm_numa_node,
221 DM_NUMA_NODE, num_online_nodes() - 1);
222}
223
1da177e4
LT
224static int __init local_init(void)
225{
e689fbab 226 int r;
1ae49ea2 227
51e5b2bd 228 r = dm_uevent_init();
51157b4a 229 if (r)
e689fbab 230 return r;
51e5b2bd 231
acfe0ad7
MP
232 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
233 if (!deferred_remove_workqueue) {
234 r = -ENOMEM;
235 goto out_uevent_exit;
236 }
237
1da177e4
LT
238 _major = major;
239 r = register_blkdev(_major, _name);
51157b4a 240 if (r < 0)
acfe0ad7 241 goto out_free_workqueue;
1da177e4
LT
242
243 if (!_major)
244 _major = r;
245
246 return 0;
51157b4a 247
acfe0ad7
MP
248out_free_workqueue:
249 destroy_workqueue(deferred_remove_workqueue);
51157b4a
KU
250out_uevent_exit:
251 dm_uevent_exit();
51157b4a
KU
252
253 return r;
1da177e4
LT
254}
255
256static void local_exit(void)
257{
2c140a24 258 flush_scheduled_work();
acfe0ad7 259 destroy_workqueue(deferred_remove_workqueue);
2c140a24 260
00d59405 261 unregister_blkdev(_major, _name);
51e5b2bd 262 dm_uevent_exit();
1da177e4
LT
263
264 _major = 0;
265
266 DMINFO("cleaned up");
267}
268
b9249e55 269static int (*_inits[])(void) __initdata = {
1da177e4
LT
270 local_init,
271 dm_target_init,
272 dm_linear_init,
273 dm_stripe_init,
952b3557 274 dm_io_init,
945fa4d2 275 dm_kcopyd_init,
1da177e4 276 dm_interface_init,
fd2ed4d2 277 dm_statistics_init,
1da177e4
LT
278};
279
b9249e55 280static void (*_exits[])(void) = {
1da177e4
LT
281 local_exit,
282 dm_target_exit,
283 dm_linear_exit,
284 dm_stripe_exit,
952b3557 285 dm_io_exit,
945fa4d2 286 dm_kcopyd_exit,
1da177e4 287 dm_interface_exit,
fd2ed4d2 288 dm_statistics_exit,
1da177e4
LT
289};
290
291static int __init dm_init(void)
292{
293 const int count = ARRAY_SIZE(_inits);
294
295 int r, i;
296
297 for (i = 0; i < count; i++) {
298 r = _inits[i]();
299 if (r)
300 goto bad;
301 }
302
303 return 0;
304
305 bad:
306 while (i--)
307 _exits[i]();
308
309 return r;
310}
311
312static void __exit dm_exit(void)
313{
314 int i = ARRAY_SIZE(_exits);
315
316 while (i--)
317 _exits[i]();
d15b774c
AK
318
319 /*
320 * Should be empty by this point.
321 */
d15b774c 322 idr_destroy(&_minor_idr);
1da177e4
LT
323}
324
325/*
326 * Block device functions
327 */
432a212c
MA
328int dm_deleting_md(struct mapped_device *md)
329{
330 return test_bit(DMF_DELETING, &md->flags);
331}
332
fe5f9f2c 333static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
334{
335 struct mapped_device *md;
336
fba9f90e
JM
337 spin_lock(&_minor_lock);
338
fe5f9f2c 339 md = bdev->bd_disk->private_data;
fba9f90e
JM
340 if (!md)
341 goto out;
342
5c6bd75d 343 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 344 dm_deleting_md(md)) {
fba9f90e
JM
345 md = NULL;
346 goto out;
347 }
348
1da177e4 349 dm_get(md);
5c6bd75d 350 atomic_inc(&md->open_count);
fba9f90e
JM
351out:
352 spin_unlock(&_minor_lock);
353
354 return md ? 0 : -ENXIO;
1da177e4
LT
355}
356
db2a144b 357static void dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 358{
63a4f065 359 struct mapped_device *md;
6e9624b8 360
4a1aeb98
MB
361 spin_lock(&_minor_lock);
362
63a4f065
MS
363 md = disk->private_data;
364 if (WARN_ON(!md))
365 goto out;
366
2c140a24
MP
367 if (atomic_dec_and_test(&md->open_count) &&
368 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
acfe0ad7 369 queue_work(deferred_remove_workqueue, &deferred_remove_work);
2c140a24 370
1da177e4 371 dm_put(md);
63a4f065 372out:
4a1aeb98 373 spin_unlock(&_minor_lock);
1da177e4
LT
374}
375
5c6bd75d
AK
376int dm_open_count(struct mapped_device *md)
377{
378 return atomic_read(&md->open_count);
379}
380
381/*
382 * Guarantees nothing is using the device before it's deleted.
383 */
2c140a24 384int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
5c6bd75d
AK
385{
386 int r = 0;
387
388 spin_lock(&_minor_lock);
389
2c140a24 390 if (dm_open_count(md)) {
5c6bd75d 391 r = -EBUSY;
2c140a24
MP
392 if (mark_deferred)
393 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
394 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
395 r = -EEXIST;
5c6bd75d
AK
396 else
397 set_bit(DMF_DELETING, &md->flags);
398
399 spin_unlock(&_minor_lock);
400
401 return r;
402}
403
2c140a24
MP
404int dm_cancel_deferred_remove(struct mapped_device *md)
405{
406 int r = 0;
407
408 spin_lock(&_minor_lock);
409
410 if (test_bit(DMF_DELETING, &md->flags))
411 r = -EBUSY;
412 else
413 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
414
415 spin_unlock(&_minor_lock);
416
417 return r;
418}
419
420static void do_deferred_remove(struct work_struct *w)
421{
422 dm_deferred_remove();
423}
424
fd2ed4d2
MP
425sector_t dm_get_size(struct mapped_device *md)
426{
427 return get_capacity(md->disk);
428}
429
9974fa2c
MS
430struct request_queue *dm_get_md_queue(struct mapped_device *md)
431{
432 return md->queue;
433}
434
fd2ed4d2
MP
435struct dm_stats *dm_get_stats(struct mapped_device *md)
436{
437 return &md->stats;
438}
439
3ac51e74
DW
440static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
441{
442 struct mapped_device *md = bdev->bd_disk->private_data;
443
444 return dm_get_geometry(md, geo);
445}
446
d4100351
CH
447#ifdef CONFIG_BLK_DEV_ZONED
448int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
449{
450 struct dm_report_zones_args *args = data;
451 sector_t sector_diff = args->tgt->begin - args->start;
452
453 /*
454 * Ignore zones beyond the target range.
455 */
456 if (zone->start >= args->start + args->tgt->len)
457 return 0;
458
459 /*
460 * Remap the start sector and write pointer position of the zone
461 * to match its position in the target range.
462 */
463 zone->start += sector_diff;
464 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
465 if (zone->cond == BLK_ZONE_COND_FULL)
466 zone->wp = zone->start + zone->len;
467 else if (zone->cond == BLK_ZONE_COND_EMPTY)
468 zone->wp = zone->start;
469 else
470 zone->wp += sector_diff;
471 }
472
473 args->next_sector = zone->start + zone->len;
474 return args->orig_cb(zone, args->zone_idx++, args->orig_data);
475}
476EXPORT_SYMBOL_GPL(dm_report_zones_cb);
477
e76239a3 478static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
d4100351 479 unsigned int nr_zones, report_zones_cb cb, void *data)
e76239a3 480{
e76239a3 481 struct mapped_device *md = disk->private_data;
e76239a3
CH
482 struct dm_table *map;
483 int srcu_idx, ret;
d4100351
CH
484 struct dm_report_zones_args args = {
485 .next_sector = sector,
486 .orig_data = data,
487 .orig_cb = cb,
488 };
e76239a3
CH
489
490 if (dm_suspended_md(md))
491 return -EAGAIN;
492
493 map = dm_get_live_table(md, &srcu_idx);
494 if (!map)
495 return -EIO;
496
d4100351
CH
497 do {
498 struct dm_target *tgt;
e76239a3 499
d4100351
CH
500 tgt = dm_table_find_target(map, args.next_sector);
501 if (WARN_ON_ONCE(!tgt->type->report_zones)) {
502 ret = -EIO;
503 goto out;
504 }
e76239a3 505
d4100351 506 args.tgt = tgt;
a9cb9f41
JT
507 ret = tgt->type->report_zones(tgt, &args,
508 nr_zones - args.zone_idx);
d4100351
CH
509 if (ret < 0)
510 goto out;
511 } while (args.zone_idx < nr_zones &&
512 args.next_sector < get_capacity(disk));
e76239a3 513
d4100351 514 ret = args.zone_idx;
e76239a3
CH
515out:
516 dm_put_live_table(md, srcu_idx);
517 return ret;
e76239a3 518}
d4100351
CH
519#else
520#define dm_blk_report_zones NULL
521#endif /* CONFIG_BLK_DEV_ZONED */
e76239a3 522
971888c4 523static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
5bd5e8d8 524 struct block_device **bdev)
971888c4 525 __acquires(md->io_barrier)
aa129a22 526{
66482026 527 struct dm_target *tgt;
6c182cd8 528 struct dm_table *map;
971888c4 529 int r;
aa129a22 530
6c182cd8 531retry:
e56f81e0 532 r = -ENOTTY;
971888c4 533 map = dm_get_live_table(md, srcu_idx);
aa129a22 534 if (!map || !dm_table_get_size(map))
971888c4 535 return r;
aa129a22
MB
536
537 /* We only support devices that have a single target */
538 if (dm_table_get_num_targets(map) != 1)
971888c4 539 return r;
aa129a22 540
66482026
MS
541 tgt = dm_table_get_target(map, 0);
542 if (!tgt->type->prepare_ioctl)
971888c4 543 return r;
519049af 544
971888c4
MS
545 if (dm_suspended_md(md))
546 return -EAGAIN;
aa129a22 547
5bd5e8d8 548 r = tgt->type->prepare_ioctl(tgt, bdev);
5bbbfdf6 549 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
971888c4 550 dm_put_live_table(md, *srcu_idx);
6c182cd8
HR
551 msleep(10);
552 goto retry;
553 }
971888c4 554
e56f81e0
CH
555 return r;
556}
557
971888c4
MS
558static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
559 __releases(md->io_barrier)
560{
561 dm_put_live_table(md, srcu_idx);
562}
563
e56f81e0
CH
564static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
565 unsigned int cmd, unsigned long arg)
566{
567 struct mapped_device *md = bdev->bd_disk->private_data;
971888c4 568 int r, srcu_idx;
e56f81e0 569
5bd5e8d8 570 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
e56f81e0 571 if (r < 0)
971888c4 572 goto out;
6c182cd8 573
e56f81e0
CH
574 if (r > 0) {
575 /*
e980f623
CH
576 * Target determined this ioctl is being issued against a
577 * subset of the parent bdev; require extra privileges.
e56f81e0 578 */
e980f623
CH
579 if (!capable(CAP_SYS_RAWIO)) {
580 DMWARN_LIMIT(
581 "%s: sending ioctl %x to DM device without required privilege.",
582 current->comm, cmd);
583 r = -ENOIOCTLCMD;
e56f81e0 584 goto out;
e980f623 585 }
e56f81e0 586 }
6c182cd8 587
66482026 588 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
e56f81e0 589out:
971888c4 590 dm_unprepare_ioctl(md, srcu_idx);
aa129a22
MB
591 return r;
592}
593
978e51ba
MS
594static void start_io_acct(struct dm_io *io);
595
596static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
1da177e4 597{
64f52b0e
MS
598 struct dm_io *io;
599 struct dm_target_io *tio;
600 struct bio *clone;
601
6f1c819c 602 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
64f52b0e
MS
603 if (!clone)
604 return NULL;
605
606 tio = container_of(clone, struct dm_target_io, clone);
607 tio->inside_dm_io = true;
608 tio->io = NULL;
609
610 io = container_of(tio, struct dm_io, tio);
611 io->magic = DM_IO_MAGIC;
978e51ba
MS
612 io->status = 0;
613 atomic_set(&io->io_count, 1);
614 io->orig_bio = bio;
615 io->md = md;
616 spin_lock_init(&io->endio_lock);
617
618 start_io_acct(io);
64f52b0e
MS
619
620 return io;
1da177e4
LT
621}
622
028867ac 623static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4 624{
64f52b0e
MS
625 bio_put(&io->tio.clone);
626}
627
628static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
629 unsigned target_bio_nr, gfp_t gfp_mask)
630{
631 struct dm_target_io *tio;
632
633 if (!ci->io->tio.io) {
634 /* the dm_target_io embedded in ci->io is available */
635 tio = &ci->io->tio;
636 } else {
6f1c819c 637 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
64f52b0e
MS
638 if (!clone)
639 return NULL;
640
641 tio = container_of(clone, struct dm_target_io, clone);
642 tio->inside_dm_io = false;
643 }
644
645 tio->magic = DM_TIO_MAGIC;
646 tio->io = ci->io;
647 tio->ti = ti;
648 tio->target_bio_nr = target_bio_nr;
649
650 return tio;
1da177e4
LT
651}
652
cfae7529 653static void free_tio(struct dm_target_io *tio)
1da177e4 654{
64f52b0e
MS
655 if (tio->inside_dm_io)
656 return;
dba14160 657 bio_put(&tio->clone);
1da177e4
LT
658}
659
087615bf
GKB
660u64 dm_start_time_ns_from_clone(struct bio *bio)
661{
662 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
663 struct dm_io *io = tio->io;
664
665 return jiffies_to_nsecs(io->start_time);
666}
667EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
668
3eaf840e
JNN
669static void start_io_acct(struct dm_io *io)
670{
671 struct mapped_device *md = io->md;
745dc570 672 struct bio *bio = io->orig_bio;
3eaf840e 673
86240d5b 674 io->start_time = bio_start_io_acct(bio);
fd2ed4d2 675 if (unlikely(dm_stats_used(&md->stats)))
528ec5ab
MC
676 dm_stats_account_io(&md->stats, bio_data_dir(bio),
677 bio->bi_iter.bi_sector, bio_sectors(bio),
678 false, 0, &io->stats_aux);
3eaf840e
JNN
679}
680
d221d2e7 681static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
682{
683 struct mapped_device *md = io->md;
745dc570 684 struct bio *bio = io->orig_bio;
3eaf840e 685 unsigned long duration = jiffies - io->start_time;
3eaf840e 686
86240d5b 687 bio_end_io_acct(bio, io->start_time);
3eaf840e 688
fd2ed4d2 689 if (unlikely(dm_stats_used(&md->stats)))
528ec5ab
MC
690 dm_stats_account_io(&md->stats, bio_data_dir(bio),
691 bio->bi_iter.bi_sector, bio_sectors(bio),
692 true, duration, &io->stats_aux);
fd2ed4d2 693
d221d2e7 694 /* nudge anyone waiting on suspend queue */
645efa84 695 if (unlikely(wq_has_sleeper(&md->wait)))
d221d2e7 696 wake_up(&md->wait);
3eaf840e
JNN
697}
698
1da177e4
LT
699/*
700 * Add the bio to the list of deferred io.
701 */
92c63902 702static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 703{
05447420 704 unsigned long flags;
1da177e4 705
05447420 706 spin_lock_irqsave(&md->deferred_lock, flags);
1da177e4 707 bio_list_add(&md->deferred, bio);
05447420 708 spin_unlock_irqrestore(&md->deferred_lock, flags);
6a8736d1 709 queue_work(md->wq, &md->work);
1da177e4
LT
710}
711
712/*
713 * Everyone (including functions in this file), should use this
714 * function to access the md->map field, and make sure they call
83d5e5b0 715 * dm_put_live_table() when finished.
1da177e4 716 */
83d5e5b0 717struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
1da177e4 718{
83d5e5b0
MP
719 *srcu_idx = srcu_read_lock(&md->io_barrier);
720
721 return srcu_dereference(md->map, &md->io_barrier);
722}
1da177e4 723
83d5e5b0
MP
724void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
725{
726 srcu_read_unlock(&md->io_barrier, srcu_idx);
727}
728
729void dm_sync_table(struct mapped_device *md)
730{
731 synchronize_srcu(&md->io_barrier);
732 synchronize_rcu_expedited();
733}
734
735/*
736 * A fast alternative to dm_get_live_table/dm_put_live_table.
737 * The caller must not block between these two functions.
738 */
739static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
740{
741 rcu_read_lock();
742 return rcu_dereference(md->map);
743}
1da177e4 744
83d5e5b0
MP
745static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
746{
747 rcu_read_unlock();
1da177e4
LT
748}
749
971888c4
MS
750static char *_dm_claim_ptr = "I belong to device-mapper";
751
86f1152b
BM
752/*
753 * Open a table device so we can use it as a map destination.
754 */
755static int open_table_device(struct table_device *td, dev_t dev,
756 struct mapped_device *md)
757{
86f1152b
BM
758 struct block_device *bdev;
759
760 int r;
761
762 BUG_ON(td->dm_dev.bdev);
763
519049af 764 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
86f1152b
BM
765 if (IS_ERR(bdev))
766 return PTR_ERR(bdev);
767
768 r = bd_link_disk_holder(bdev, dm_disk(md));
769 if (r) {
770 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
771 return r;
772 }
773
774 td->dm_dev.bdev = bdev;
817bf402 775 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
86f1152b
BM
776 return 0;
777}
778
779/*
780 * Close a table device that we've been using.
781 */
782static void close_table_device(struct table_device *td, struct mapped_device *md)
783{
784 if (!td->dm_dev.bdev)
785 return;
786
787 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
788 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
817bf402 789 put_dax(td->dm_dev.dax_dev);
86f1152b 790 td->dm_dev.bdev = NULL;
817bf402 791 td->dm_dev.dax_dev = NULL;
86f1152b
BM
792}
793
794static struct table_device *find_table_device(struct list_head *l, dev_t dev,
8454fca4
SS
795 fmode_t mode)
796{
86f1152b
BM
797 struct table_device *td;
798
799 list_for_each_entry(td, l, list)
800 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
801 return td;
802
803 return NULL;
804}
805
806int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
8454fca4
SS
807 struct dm_dev **result)
808{
86f1152b
BM
809 int r;
810 struct table_device *td;
811
812 mutex_lock(&md->table_devices_lock);
813 td = find_table_device(&md->table_devices, dev, mode);
814 if (!td) {
115485e8 815 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
86f1152b
BM
816 if (!td) {
817 mutex_unlock(&md->table_devices_lock);
818 return -ENOMEM;
819 }
820
821 td->dm_dev.mode = mode;
822 td->dm_dev.bdev = NULL;
823
824 if ((r = open_table_device(td, dev, md))) {
825 mutex_unlock(&md->table_devices_lock);
826 kfree(td);
827 return r;
828 }
829
830 format_dev_t(td->dm_dev.name, dev);
831
b0b4d7c6 832 refcount_set(&td->count, 1);
86f1152b 833 list_add(&td->list, &md->table_devices);
b0b4d7c6
ER
834 } else {
835 refcount_inc(&td->count);
86f1152b 836 }
86f1152b
BM
837 mutex_unlock(&md->table_devices_lock);
838
839 *result = &td->dm_dev;
840 return 0;
841}
842EXPORT_SYMBOL_GPL(dm_get_table_device);
843
844void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
845{
846 struct table_device *td = container_of(d, struct table_device, dm_dev);
847
848 mutex_lock(&md->table_devices_lock);
b0b4d7c6 849 if (refcount_dec_and_test(&td->count)) {
86f1152b
BM
850 close_table_device(td, md);
851 list_del(&td->list);
852 kfree(td);
853 }
854 mutex_unlock(&md->table_devices_lock);
855}
856EXPORT_SYMBOL(dm_put_table_device);
857
858static void free_table_devices(struct list_head *devices)
859{
860 struct list_head *tmp, *next;
861
862 list_for_each_safe(tmp, next, devices) {
863 struct table_device *td = list_entry(tmp, struct table_device, list);
864
865 DMWARN("dm_destroy: %s still exists with %d references",
b0b4d7c6 866 td->dm_dev.name, refcount_read(&td->count));
86f1152b
BM
867 kfree(td);
868 }
869}
870
3ac51e74
DW
871/*
872 * Get the geometry associated with a dm device
873 */
874int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
875{
876 *geo = md->geometry;
877
878 return 0;
879}
880
881/*
882 * Set the geometry of a device.
883 */
884int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
885{
886 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
887
888 if (geo->start > sz) {
889 DMWARN("Start sector is beyond the geometry limits.");
890 return -EINVAL;
891 }
892
893 md->geometry = *geo;
894
895 return 0;
896}
897
2e93ccc1
KU
898static int __noflush_suspending(struct mapped_device *md)
899{
900 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
901}
902
1da177e4
LT
903/*
904 * Decrements the number of outstanding ios that a bio has been
905 * cloned into, completing the original io if necc.
906 */
4e4cbee9 907static void dec_pending(struct dm_io *io, blk_status_t error)
1da177e4 908{
2e93ccc1 909 unsigned long flags;
4e4cbee9 910 blk_status_t io_error;
b35f8caa
MB
911 struct bio *bio;
912 struct mapped_device *md = io->md;
2e93ccc1
KU
913
914 /* Push-back supersedes any I/O errors */
f88fb981
KU
915 if (unlikely(error)) {
916 spin_lock_irqsave(&io->endio_lock, flags);
745dc570 917 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
4e4cbee9 918 io->status = error;
f88fb981
KU
919 spin_unlock_irqrestore(&io->endio_lock, flags);
920 }
1da177e4
LT
921
922 if (atomic_dec_and_test(&io->io_count)) {
4e4cbee9 923 if (io->status == BLK_STS_DM_REQUEUE) {
2e93ccc1
KU
924 /*
925 * Target requested pushing back the I/O.
2e93ccc1 926 */
022c2611 927 spin_lock_irqsave(&md->deferred_lock, flags);
6a8736d1 928 if (__noflush_suspending(md))
745dc570
MS
929 /* NOTE early return due to BLK_STS_DM_REQUEUE below */
930 bio_list_add_head(&md->deferred, io->orig_bio);
6a8736d1 931 else
2e93ccc1 932 /* noflush suspend was interrupted. */
4e4cbee9 933 io->status = BLK_STS_IOERR;
022c2611 934 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
935 }
936
4e4cbee9 937 io_error = io->status;
745dc570 938 bio = io->orig_bio;
6a8736d1
TH
939 end_io_acct(io);
940 free_io(md, io);
941
4e4cbee9 942 if (io_error == BLK_STS_DM_REQUEUE)
6a8736d1 943 return;
2e93ccc1 944
1eff9d32 945 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
af7e466a 946 /*
6a8736d1 947 * Preflush done for flush with data, reissue
28a8f0d3 948 * without REQ_PREFLUSH.
af7e466a 949 */
1eff9d32 950 bio->bi_opf &= ~REQ_PREFLUSH;
6a8736d1 951 queue_io(md, bio);
af7e466a 952 } else {
b372d360 953 /* done with normal IO or empty flush */
8dd601fa
N
954 if (io_error)
955 bio->bi_status = io_error;
4246a0b6 956 bio_endio(bio);
b35f8caa 957 }
1da177e4
LT
958 }
959}
960
bcb44433
MS
961void disable_discard(struct mapped_device *md)
962{
963 struct queue_limits *limits = dm_get_queue_limits(md);
964
965 /* device doesn't really support DISCARD, disable it */
966 limits->max_discard_sectors = 0;
967 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
968}
969
4cc96131 970void disable_write_same(struct mapped_device *md)
7eee4ae2
MS
971{
972 struct queue_limits *limits = dm_get_queue_limits(md);
973
974 /* device doesn't really support WRITE SAME, disable it */
975 limits->max_write_same_sectors = 0;
976}
977
ac62d620
CH
978void disable_write_zeroes(struct mapped_device *md)
979{
980 struct queue_limits *limits = dm_get_queue_limits(md);
981
982 /* device doesn't really support WRITE ZEROES, disable it */
983 limits->max_write_zeroes_sectors = 0;
984}
985
4246a0b6 986static void clone_endio(struct bio *bio)
1da177e4 987{
4e4cbee9 988 blk_status_t error = bio->bi_status;
bfc6d41c 989 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
b35f8caa 990 struct dm_io *io = tio->io;
9faf400f 991 struct mapped_device *md = tio->io->md;
1da177e4 992 dm_endio_fn endio = tio->ti->type->end_io;
415c79e1 993 struct bio *orig_bio = io->orig_bio;
1da177e4 994
978e51ba 995 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
bcb44433
MS
996 if (bio_op(bio) == REQ_OP_DISCARD &&
997 !bio->bi_disk->queue->limits.max_discard_sectors)
998 disable_discard(md);
999 else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
1000 !bio->bi_disk->queue->limits.max_write_same_sectors)
ac62d620 1001 disable_write_same(md);
bcb44433
MS
1002 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1003 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
ac62d620
CH
1004 disable_write_zeroes(md);
1005 }
7eee4ae2 1006
415c79e1
JT
1007 /*
1008 * For zone-append bios get offset in zone of the written
1009 * sector and add that to the original bio sector pos.
1010 */
1011 if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
1012 sector_t written_sector = bio->bi_iter.bi_sector;
1013 struct request_queue *q = orig_bio->bi_disk->queue;
1014 u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
1015
1016 orig_bio->bi_iter.bi_sector += written_sector & mask;
1017 }
1018
1be56909 1019 if (endio) {
4e4cbee9 1020 int r = endio(tio->ti, bio, &error);
1be56909
CH
1021 switch (r) {
1022 case DM_ENDIO_REQUEUE:
4e4cbee9 1023 error = BLK_STS_DM_REQUEUE;
df561f66 1024 fallthrough;
1be56909
CH
1025 case DM_ENDIO_DONE:
1026 break;
1027 case DM_ENDIO_INCOMPLETE:
1028 /* The target will handle the io */
1029 return;
1030 default:
1031 DMWARN("unimplemented target endio return value: %d", r);
1032 BUG();
1033 }
1034 }
1035
cfae7529 1036 free_tio(tio);
b35f8caa 1037 dec_pending(io, error);
1da177e4
LT
1038}
1039
56a67df7
MS
1040/*
1041 * Return maximum size of I/O possible at the supplied sector up to the current
1042 * target boundary.
1043 */
1044static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1045{
1046 sector_t target_offset = dm_target_offset(ti, sector);
1047
1048 return ti->len - target_offset;
1049}
1050
1051static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1da177e4 1052{
56a67df7 1053 sector_t len = max_io_len_target_boundary(sector, ti);
542f9038 1054 sector_t offset, max_len;
1da177e4
LT
1055
1056 /*
542f9038 1057 * Does the target need to split even further?
1da177e4 1058 */
542f9038
MS
1059 if (ti->max_io_len) {
1060 offset = dm_target_offset(ti, sector);
1061 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1062 max_len = sector_div(offset, ti->max_io_len);
1063 else
1064 max_len = offset & (ti->max_io_len - 1);
1065 max_len = ti->max_io_len - max_len;
1066
1067 if (len > max_len)
1068 len = max_len;
1da177e4
LT
1069 }
1070
1071 return len;
1072}
1073
542f9038
MS
1074int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1075{
1076 if (len > UINT_MAX) {
1077 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1078 (unsigned long long)len, UINT_MAX);
1079 ti->error = "Maximum size of target IO is too large";
1080 return -EINVAL;
1081 }
1082
75ae1936 1083 ti->max_io_len = (uint32_t) len;
542f9038
MS
1084
1085 return 0;
1086}
1087EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1088
f26c5719 1089static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
3d97c829
MS
1090 sector_t sector, int *srcu_idx)
1091 __acquires(md->io_barrier)
545ed20e 1092{
545ed20e
TK
1093 struct dm_table *map;
1094 struct dm_target *ti;
545ed20e 1095
f26c5719 1096 map = dm_get_live_table(md, srcu_idx);
545ed20e 1097 if (!map)
f26c5719 1098 return NULL;
545ed20e
TK
1099
1100 ti = dm_table_find_target(map, sector);
123d87d5 1101 if (!ti)
f26c5719 1102 return NULL;
545ed20e 1103
f26c5719
DW
1104 return ti;
1105}
545ed20e 1106
f26c5719 1107static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
3d97c829 1108 long nr_pages, void **kaddr, pfn_t *pfn)
f26c5719
DW
1109{
1110 struct mapped_device *md = dax_get_private(dax_dev);
1111 sector_t sector = pgoff * PAGE_SECTORS;
1112 struct dm_target *ti;
1113 long len, ret = -EIO;
1114 int srcu_idx;
545ed20e 1115
f26c5719 1116 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
545ed20e 1117
f26c5719
DW
1118 if (!ti)
1119 goto out;
1120 if (!ti->type->direct_access)
1121 goto out;
1122 len = max_io_len(sector, ti) / PAGE_SECTORS;
1123 if (len < 1)
1124 goto out;
1125 nr_pages = min(len, nr_pages);
dbc62659 1126 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
817bf402 1127
f26c5719 1128 out:
545ed20e 1129 dm_put_live_table(md, srcu_idx);
f26c5719
DW
1130
1131 return ret;
545ed20e
TK
1132}
1133
7bf7eac8
DW
1134static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
1135 int blocksize, sector_t start, sector_t len)
1136{
1137 struct mapped_device *md = dax_get_private(dax_dev);
1138 struct dm_table *map;
02186d88 1139 bool ret = false;
7bf7eac8 1140 int srcu_idx;
7bf7eac8
DW
1141
1142 map = dm_get_live_table(md, &srcu_idx);
1143 if (!map)
02186d88 1144 goto out;
7bf7eac8 1145
2e9ee095 1146 ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
7bf7eac8 1147
02186d88 1148out:
7bf7eac8
DW
1149 dm_put_live_table(md, srcu_idx);
1150
1151 return ret;
1152}
1153
7e026c8c 1154static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
3d97c829 1155 void *addr, size_t bytes, struct iov_iter *i)
7e026c8c
DW
1156{
1157 struct mapped_device *md = dax_get_private(dax_dev);
1158 sector_t sector = pgoff * PAGE_SECTORS;
1159 struct dm_target *ti;
1160 long ret = 0;
1161 int srcu_idx;
1162
1163 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1164
1165 if (!ti)
1166 goto out;
1167 if (!ti->type->dax_copy_from_iter) {
1168 ret = copy_from_iter(addr, bytes, i);
1169 goto out;
1170 }
1171 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
1172 out:
1173 dm_put_live_table(md, srcu_idx);
1174
1175 return ret;
1176}
1177
b3a9a0c3
DW
1178static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1179 void *addr, size_t bytes, struct iov_iter *i)
1180{
1181 struct mapped_device *md = dax_get_private(dax_dev);
1182 sector_t sector = pgoff * PAGE_SECTORS;
1183 struct dm_target *ti;
1184 long ret = 0;
1185 int srcu_idx;
1186
1187 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1188
1189 if (!ti)
1190 goto out;
1191 if (!ti->type->dax_copy_to_iter) {
1192 ret = copy_to_iter(addr, bytes, i);
1193 goto out;
1194 }
1195 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
1196 out:
1197 dm_put_live_table(md, srcu_idx);
1198
1199 return ret;
1200}
1201
cdf6cdcd
VG
1202static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1203 size_t nr_pages)
1204{
1205 struct mapped_device *md = dax_get_private(dax_dev);
1206 sector_t sector = pgoff * PAGE_SECTORS;
1207 struct dm_target *ti;
1208 int ret = -EIO;
1209 int srcu_idx;
1210
1211 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1212
1213 if (!ti)
1214 goto out;
1215 if (WARN_ON(!ti->type->dax_zero_page_range)) {
1216 /*
1217 * ->zero_page_range() is mandatory dax operation. If we are
1218 * here, something is wrong.
1219 */
1220 dm_put_live_table(md, srcu_idx);
1221 goto out;
1222 }
1223 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1224
1225 out:
1226 dm_put_live_table(md, srcu_idx);
1227
1228 return ret;
1229}
1230
1dd40c3e
MP
1231/*
1232 * A target may call dm_accept_partial_bio only from the map routine. It is
2e2d6f7e
AJ
1233 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
1234 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
1dd40c3e
MP
1235 *
1236 * dm_accept_partial_bio informs the dm that the target only wants to process
1237 * additional n_sectors sectors of the bio and the rest of the data should be
1238 * sent in a next bio.
1239 *
1240 * A diagram that explains the arithmetics:
1241 * +--------------------+---------------+-------+
1242 * | 1 | 2 | 3 |
1243 * +--------------------+---------------+-------+
1244 *
1245 * <-------------- *tio->len_ptr --------------->
1246 * <------- bi_size ------->
1247 * <-- n_sectors -->
1248 *
1249 * Region 1 was already iterated over with bio_advance or similar function.
1250 * (it may be empty if the target doesn't use bio_advance)
1251 * Region 2 is the remaining bio size that the target wants to process.
1252 * (it may be empty if region 1 is non-empty, although there is no reason
1253 * to make it empty)
1254 * The target requires that region 3 is to be sent in the next bio.
1255 *
1256 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1257 * the partially processed part (the sum of regions 1+2) must be the same for all
1258 * copies of the bio.
1259 */
1260void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1261{
1262 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1263 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1eff9d32 1264 BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1dd40c3e
MP
1265 BUG_ON(bi_size > *tio->len_ptr);
1266 BUG_ON(n_sectors > bi_size);
1267 *tio->len_ptr -= bi_size - n_sectors;
1268 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1269}
1270EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1271
978e51ba 1272static blk_qc_t __map_bio(struct dm_target_io *tio)
1da177e4
LT
1273{
1274 int r;
2056a782 1275 sector_t sector;
dba14160 1276 struct bio *clone = &tio->clone;
64f52b0e 1277 struct dm_io *io = tio->io;
bd2a49b8 1278 struct dm_target *ti = tio->ti;
978e51ba 1279 blk_qc_t ret = BLK_QC_T_NONE;
1da177e4 1280
1da177e4 1281 clone->bi_end_io = clone_endio;
1da177e4
LT
1282
1283 /*
1284 * Map the clone. If r == 0 we don't need to do
1285 * anything, the target has assumed ownership of
1286 * this io.
1287 */
64f52b0e 1288 atomic_inc(&io->io_count);
4f024f37 1289 sector = clone->bi_iter.bi_sector;
d67a5f4b 1290
7de3ee57 1291 r = ti->type->map(ti, clone);
846785e6
CH
1292 switch (r) {
1293 case DM_MAPIO_SUBMITTED:
1294 break;
1295 case DM_MAPIO_REMAPPED:
1da177e4 1296 /* the bio has been remapped so dispatch it */
74d46992 1297 trace_block_bio_remap(clone->bi_disk->queue, clone,
64f52b0e 1298 bio_dev(io->orig_bio), sector);
5a6c35f9 1299 ret = submit_bio_noacct(clone);
846785e6
CH
1300 break;
1301 case DM_MAPIO_KILL:
4e4cbee9 1302 free_tio(tio);
64f52b0e 1303 dec_pending(io, BLK_STS_IOERR);
4e4cbee9 1304 break;
846785e6 1305 case DM_MAPIO_REQUEUE:
cfae7529 1306 free_tio(tio);
64f52b0e 1307 dec_pending(io, BLK_STS_DM_REQUEUE);
846785e6
CH
1308 break;
1309 default:
45cbcd79
KU
1310 DMWARN("unimplemented target map return value: %d", r);
1311 BUG();
1da177e4 1312 }
1da177e4 1313
978e51ba 1314 return ret;
1da177e4 1315}
1da177e4 1316
e0d6609a 1317static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
bd2a49b8 1318{
4f024f37
KO
1319 bio->bi_iter.bi_sector = sector;
1320 bio->bi_iter.bi_size = to_bytes(len);
1da177e4
LT
1321}
1322
1323/*
1324 * Creates a bio that consists of range of complete bvecs.
1325 */
c80914e8
MS
1326static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1327 sector_t sector, unsigned len)
1da177e4 1328{
dba14160 1329 struct bio *clone = &tio->clone;
1da177e4 1330
1c3b13e6
KO
1331 __bio_clone_fast(clone, bio);
1332
a892c8d5
ST
1333 bio_crypt_clone(clone, bio, GFP_NOIO);
1334
57c36519 1335 if (bio_integrity(bio)) {
e2460f2a
MP
1336 int r;
1337
1338 if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1339 !dm_target_passes_integrity(tio->ti->type))) {
1340 DMWARN("%s: the target %s doesn't support integrity data.",
1341 dm_device_name(tio->io->md),
1342 tio->ti->type->name);
1343 return -EIO;
1344 }
1345
1346 r = bio_integrity_clone(clone, bio, GFP_NOIO);
c80914e8
MS
1347 if (r < 0)
1348 return r;
1349 }
bd2a49b8 1350
fa8db494
MS
1351 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1352 clone->bi_iter.bi_size = to_bytes(len);
1353
1354 if (bio_integrity(bio))
1355 bio_integrity_trim(clone);
c80914e8
MS
1356
1357 return 0;
1da177e4
LT
1358}
1359
318716dd
MS
1360static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1361 struct dm_target *ti, unsigned num_bios)
f9ab94ce 1362{
dba14160 1363 struct dm_target_io *tio;
318716dd 1364 int try;
dba14160 1365
318716dd
MS
1366 if (!num_bios)
1367 return;
f9ab94ce 1368
318716dd
MS
1369 if (num_bios == 1) {
1370 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1371 bio_list_add(blist, &tio->clone);
1372 return;
1373 }
9015df24 1374
318716dd
MS
1375 for (try = 0; try < 2; try++) {
1376 int bio_nr;
1377 struct bio *bio;
1378
1379 if (try)
bc02cdbe 1380 mutex_lock(&ci->io->md->table_devices_lock);
318716dd
MS
1381 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1382 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1383 if (!tio)
1384 break;
1385
1386 bio_list_add(blist, &tio->clone);
1387 }
1388 if (try)
bc02cdbe 1389 mutex_unlock(&ci->io->md->table_devices_lock);
318716dd
MS
1390 if (bio_nr == num_bios)
1391 return;
1392
1393 while ((bio = bio_list_pop(blist))) {
1394 tio = container_of(bio, struct dm_target_io, clone);
1395 free_tio(tio);
1396 }
1397 }
9015df24
AK
1398}
1399
978e51ba
MS
1400static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1401 struct dm_target_io *tio, unsigned *len)
9015df24 1402{
dba14160 1403 struct bio *clone = &tio->clone;
9015df24 1404
1dd40c3e
MP
1405 tio->len_ptr = len;
1406
99778273 1407 __bio_clone_fast(clone, ci->bio);
bd2a49b8 1408 if (len)
1dd40c3e 1409 bio_setup_sector(clone, ci->sector, *len);
f9ab94ce 1410
978e51ba 1411 return __map_bio(tio);
f9ab94ce
MP
1412}
1413
14fe594d 1414static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1415 unsigned num_bios, unsigned *len)
06a426ce 1416{
318716dd
MS
1417 struct bio_list blist = BIO_EMPTY_LIST;
1418 struct bio *bio;
1419 struct dm_target_io *tio;
1420
1421 alloc_multiple_bios(&blist, ci, ti, num_bios);
06a426ce 1422
318716dd
MS
1423 while ((bio = bio_list_pop(&blist))) {
1424 tio = container_of(bio, struct dm_target_io, clone);
978e51ba 1425 (void) __clone_and_map_simple_bio(ci, tio, len);
318716dd 1426 }
06a426ce
MS
1427}
1428
14fe594d 1429static int __send_empty_flush(struct clone_info *ci)
f9ab94ce 1430{
06a426ce 1431 unsigned target_nr = 0;
f9ab94ce
MP
1432 struct dm_target *ti;
1433
892ad71f 1434 /*
dbe3ece1
JA
1435 * Empty flush uses a statically initialized bio, as the base for
1436 * cloning. However, blkg association requires that a bdev is
1437 * associated with a gendisk, which doesn't happen until the bdev is
1438 * opened. So, blkg association is done at issue time of the flush
1439 * rather than when the device is created in alloc_dev().
892ad71f
DZ
1440 */
1441 bio_set_dev(ci->bio, ci->io->md->bdev);
1442
b372d360 1443 BUG_ON(bio_has_data(ci->bio));
f9ab94ce 1444 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1dd40c3e 1445 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
f9ab94ce
MP
1446 return 0;
1447}
1448
c80914e8 1449static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
f31c21e4 1450 sector_t sector, unsigned *len)
5ae89a87 1451{
dba14160 1452 struct bio *bio = ci->bio;
5ae89a87 1453 struct dm_target_io *tio;
f31c21e4 1454 int r;
5ae89a87 1455
318716dd 1456 tio = alloc_tio(ci, ti, 0, GFP_NOIO);
f31c21e4
N
1457 tio->len_ptr = len;
1458 r = clone_bio(tio, bio, sector, *len);
1459 if (r < 0) {
1460 free_tio(tio);
1461 return r;
b0d8ed4d 1462 }
978e51ba 1463 (void) __map_bio(tio);
c80914e8 1464
f31c21e4 1465 return 0;
5ae89a87
MS
1466}
1467
55a62eef 1468typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
23508a96 1469
55a62eef 1470static unsigned get_num_discard_bios(struct dm_target *ti)
23508a96 1471{
55a62eef 1472 return ti->num_discard_bios;
23508a96
MS
1473}
1474
00716545
DS
1475static unsigned get_num_secure_erase_bios(struct dm_target *ti)
1476{
1477 return ti->num_secure_erase_bios;
1478}
1479
55a62eef 1480static unsigned get_num_write_same_bios(struct dm_target *ti)
23508a96 1481{
55a62eef 1482 return ti->num_write_same_bios;
23508a96
MS
1483}
1484
ac62d620
CH
1485static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
1486{
1487 return ti->num_write_zeroes_bios;
1488}
1489
3d7f4562 1490static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
61697a6a 1491 unsigned num_bios)
ba1cbad9 1492{
51b86f9a 1493 unsigned len;
ba1cbad9 1494
3d7f4562
MS
1495 /*
1496 * Even though the device advertised support for this type of
1497 * request, that does not mean every target supports it, and
1498 * reconfiguration might also have changed that since the
1499 * check was performed.
1500 */
3d7f4562
MS
1501 if (!num_bios)
1502 return -EOPNOTSUPP;
ba1cbad9 1503
51b86f9a
ML
1504 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1505
3d7f4562 1506 __send_duplicate_bios(ci, ti, num_bios, &len);
e262f347 1507
3d7f4562
MS
1508 ci->sector += len;
1509 ci->sector_count -= len;
5ae89a87
MS
1510
1511 return 0;
ba1cbad9
MS
1512}
1513
3d7f4562 1514static int __send_discard(struct clone_info *ci, struct dm_target *ti)
23508a96 1515{
61697a6a 1516 return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
23508a96 1517}
0ce65797 1518
00716545
DS
1519static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
1520{
61697a6a 1521 return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
00716545
DS
1522}
1523
3d7f4562 1524static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
0ce65797 1525{
61697a6a 1526 return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
0ce65797
MS
1527}
1528
3d7f4562 1529static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
ac62d620 1530{
61697a6a 1531 return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
ac62d620
CH
1532}
1533
568c73a3
MS
1534static bool is_abnormal_io(struct bio *bio)
1535{
1536 bool r = false;
1537
1538 switch (bio_op(bio)) {
1539 case REQ_OP_DISCARD:
1540 case REQ_OP_SECURE_ERASE:
1541 case REQ_OP_WRITE_SAME:
1542 case REQ_OP_WRITE_ZEROES:
1543 r = true;
1544 break;
1545 }
1546
1547 return r;
1548}
1549
0519c71e
MS
1550static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1551 int *result)
1552{
1553 struct bio *bio = ci->bio;
1554
1555 if (bio_op(bio) == REQ_OP_DISCARD)
1556 *result = __send_discard(ci, ti);
00716545
DS
1557 else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
1558 *result = __send_secure_erase(ci, ti);
0519c71e
MS
1559 else if (bio_op(bio) == REQ_OP_WRITE_SAME)
1560 *result = __send_write_same(ci, ti);
1561 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
1562 *result = __send_write_zeroes(ci, ti);
1563 else
1564 return false;
1565
1566 return true;
1567}
1568
e4c93811
AK
1569/*
1570 * Select the correct strategy for processing a non-flush bio.
1571 */
14fe594d 1572static int __split_and_process_non_flush(struct clone_info *ci)
0ce65797 1573{
512875bd 1574 struct dm_target *ti;
1c3b13e6 1575 unsigned len;
c80914e8 1576 int r;
0ce65797 1577
512875bd 1578 ti = dm_table_find_target(ci->map, ci->sector);
123d87d5 1579 if (!ti)
512875bd
JN
1580 return -EIO;
1581
568c73a3 1582 if (__process_abnormal_io(ci, ti, &r))
0519c71e 1583 return r;
3d7f4562 1584
e76239a3 1585 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
0ce65797 1586
c80914e8
MS
1587 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1588 if (r < 0)
1589 return r;
0ce65797 1590
1c3b13e6
KO
1591 ci->sector += len;
1592 ci->sector_count -= len;
0ce65797 1593
1c3b13e6 1594 return 0;
0ce65797
MS
1595}
1596
978e51ba
MS
1597static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1598 struct dm_table *map, struct bio *bio)
1599{
1600 ci->map = map;
1601 ci->io = alloc_io(md, bio);
1602 ci->sector = bio->bi_iter.bi_sector;
1603}
1604
a1e1cb72
MS
1605#define __dm_part_stat_sub(part, field, subnd) \
1606 (part_stat_get(part, field) -= (subnd))
1607
1da177e4 1608/*
14fe594d 1609 * Entry point to split a bio into clones and submit them to the targets.
1da177e4 1610 */
978e51ba
MS
1611static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1612 struct dm_table *map, struct bio *bio)
0ce65797 1613{
1da177e4 1614 struct clone_info ci;
978e51ba 1615 blk_qc_t ret = BLK_QC_T_NONE;
512875bd 1616 int error = 0;
1da177e4 1617
978e51ba 1618 init_clone_info(&ci, md, map, bio);
0ce65797 1619
1eff9d32 1620 if (bio->bi_opf & REQ_PREFLUSH) {
dbe3ece1
JA
1621 struct bio flush_bio;
1622
1623 /*
1624 * Use an on-stack bio for this, it's safe since we don't
1625 * need to reference it after submit. It's just used as
1626 * the basis for the clone(s).
1627 */
1628 bio_init(&flush_bio, NULL, 0);
1629 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1630 ci.bio = &flush_bio;
b372d360 1631 ci.sector_count = 0;
14fe594d 1632 error = __send_empty_flush(&ci);
382761dc 1633 bio_uninit(ci.bio);
b372d360 1634 /* dec_pending submits any data associated with flush */
2e2d6f7e 1635 } else if (op_is_zone_mgmt(bio_op(bio))) {
a4aa5e56
DLM
1636 ci.bio = bio;
1637 ci.sector_count = 0;
1638 error = __split_and_process_non_flush(&ci);
b372d360 1639 } else {
6a8736d1 1640 ci.bio = bio;
d87f4c14 1641 ci.sector_count = bio_sectors(bio);
18a25da8 1642 while (ci.sector_count && !error) {
14fe594d 1643 error = __split_and_process_non_flush(&ci);
18a25da8
N
1644 if (current->bio_list && ci.sector_count && !error) {
1645 /*
ed00aabd 1646 * Remainder must be passed to submit_bio_noacct()
18a25da8
N
1647 * so that it gets handled *after* bios already submitted
1648 * have been completely processed.
1649 * We take a clone of the original to store in
745dc570 1650 * ci.io->orig_bio to be used by end_io_acct() and
18a25da8 1651 * for dec_pending to use for completion handling.
18a25da8 1652 */
f21c601a
MS
1653 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1654 GFP_NOIO, &md->queue->bio_split);
745dc570 1655 ci.io->orig_bio = b;
a1e1cb72
MS
1656
1657 /*
1658 * Adjust IO stats for each split, otherwise upon queue
1659 * reentry there will be redundant IO accounting.
1660 * NOTE: this is a stop-gap fix, a proper fix involves
1661 * significant refactoring of DM core's bio splitting
1662 * (by eliminating DM's splitting and just using bio_split)
1663 */
1664 part_stat_lock();
1665 __dm_part_stat_sub(&dm_disk(md)->part0,
1666 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1667 part_stat_unlock();
1668
18a25da8 1669 bio_chain(b, bio);
075c18c3 1670 trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
ed00aabd 1671 ret = submit_bio_noacct(bio);
18a25da8
N
1672 break;
1673 }
1674 }
d87f4c14 1675 }
0ce65797 1676
1da177e4 1677 /* drop the extra reference count */
54385bf7 1678 dec_pending(ci.io, errno_to_blk_status(error));
978e51ba 1679 return ret;
0ce65797
MS
1680}
1681
cec47e3d 1682/*
978e51ba
MS
1683 * Optimized variant of __split_and_process_bio that leverages the
1684 * fact that targets that use it do _not_ have a need to split bios.
cec47e3d 1685 */
568c73a3
MS
1686static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
1687 struct bio *bio, struct dm_target *ti)
978e51ba
MS
1688{
1689 struct clone_info ci;
1690 blk_qc_t ret = BLK_QC_T_NONE;
1691 int error = 0;
1692
978e51ba
MS
1693 init_clone_info(&ci, md, map, bio);
1694
1695 if (bio->bi_opf & REQ_PREFLUSH) {
dbe3ece1
JA
1696 struct bio flush_bio;
1697
1698 /*
1699 * Use an on-stack bio for this, it's safe since we don't
1700 * need to reference it after submit. It's just used as
1701 * the basis for the clone(s).
1702 */
1703 bio_init(&flush_bio, NULL, 0);
1704 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1705 ci.bio = &flush_bio;
978e51ba
MS
1706 ci.sector_count = 0;
1707 error = __send_empty_flush(&ci);
382761dc 1708 bio_uninit(ci.bio);
978e51ba
MS
1709 /* dec_pending submits any data associated with flush */
1710 } else {
978e51ba
MS
1711 struct dm_target_io *tio;
1712
978e51ba
MS
1713 ci.bio = bio;
1714 ci.sector_count = bio_sectors(bio);
568c73a3 1715 if (__process_abnormal_io(&ci, ti, &error))
0519c71e
MS
1716 goto out;
1717
1718 tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
978e51ba
MS
1719 ret = __clone_and_map_simple_bio(&ci, tio, NULL);
1720 }
1721out:
1722 /* drop the extra reference count */
1723 dec_pending(ci.io, errno_to_blk_status(error));
1724 return ret;
1725}
1726
6548c7c5
MS
1727static blk_qc_t dm_process_bio(struct mapped_device *md,
1728 struct dm_table *map, struct bio *bio)
1729{
568c73a3
MS
1730 blk_qc_t ret = BLK_QC_T_NONE;
1731 struct dm_target *ti = md->immutable_target;
1732
1733 if (unlikely(!map)) {
1734 bio_io_error(bio);
1735 return ret;
1736 }
1737
1738 if (!ti) {
1739 ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
123d87d5 1740 if (unlikely(!ti)) {
568c73a3
MS
1741 bio_io_error(bio);
1742 return ret;
1743 }
1744 }
1745
1746 /*
cf9c3786 1747 * If in ->submit_bio we need to use blk_queue_split(), otherwise
568c73a3
MS
1748 * queue_limits for abnormal requests (e.g. discard, writesame, etc)
1749 * won't be imposed.
cf9c3786
MS
1750 * If called from dm_wq_work() for deferred bio processing, bio
1751 * was already handled by following code with previous ->submit_bio.
568c73a3
MS
1752 */
1753 if (current->bio_list) {
120c9257 1754 if (is_abnormal_io(bio))
f695ca38 1755 blk_queue_split(&bio);
ee1dfad5 1756 /* regular IO is split by __split_and_process_bio */
568c73a3
MS
1757 }
1758
6548c7c5 1759 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
568c73a3 1760 return __process_bio(md, map, bio, ti);
ee1dfad5 1761 return __split_and_process_bio(md, map, bio);
6548c7c5
MS
1762}
1763
c62b37d9 1764static blk_qc_t dm_submit_bio(struct bio *bio)
cec47e3d 1765{
c4a59c4e 1766 struct mapped_device *md = bio->bi_disk->private_data;
978e51ba 1767 blk_qc_t ret = BLK_QC_T_NONE;
83d5e5b0
MP
1768 int srcu_idx;
1769 struct dm_table *map;
cec47e3d 1770
ac7c5675
CH
1771 if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
1772 /*
1773 * We are called with a live reference on q_usage_counter, but
1774 * that one will be released as soon as we return. Grab an
c62b37d9
CH
1775 * extra one as blk_mq_submit_bio expects to be able to consume
1776 * a reference (which lives until the request is freed in case a
1777 * request is allocated).
ac7c5675 1778 */
c62b37d9
CH
1779 percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
1780 return blk_mq_submit_bio(bio);
ac7c5675 1781 }
8cf7961d 1782
83d5e5b0 1783 map = dm_get_live_table(md, &srcu_idx);
29e4013d 1784
6a8736d1
TH
1785 /* if we're suspended, we have to queue this io for later */
1786 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
83d5e5b0 1787 dm_put_live_table(md, srcu_idx);
9eef87da 1788
6abc4946
KK
1789 if (bio->bi_opf & REQ_NOWAIT)
1790 bio_wouldblock_error(bio);
1791 else if (!(bio->bi_opf & REQ_RAHEAD))
6a8736d1
TH
1792 queue_io(md, bio);
1793 else
54d9a1b4 1794 bio_io_error(bio);
978e51ba 1795 return ret;
cec47e3d 1796 }
1da177e4 1797
6548c7c5 1798 ret = dm_process_bio(md, map, bio);
978e51ba 1799
83d5e5b0 1800 dm_put_live_table(md, srcu_idx);
978e51ba
MS
1801 return ret;
1802}
1803
1da177e4
LT
1804/*-----------------------------------------------------------------
1805 * An IDR is used to keep track of allocated minor numbers.
1806 *---------------------------------------------------------------*/
2b06cfff 1807static void free_minor(int minor)
1da177e4 1808{
f32c10b0 1809 spin_lock(&_minor_lock);
1da177e4 1810 idr_remove(&_minor_idr, minor);
f32c10b0 1811 spin_unlock(&_minor_lock);
1da177e4
LT
1812}
1813
1814/*
1815 * See if the device with a specific minor # is free.
1816 */
cf13ab8e 1817static int specific_minor(int minor)
1da177e4 1818{
c9d76be6 1819 int r;
1da177e4
LT
1820
1821 if (minor >= (1 << MINORBITS))
1822 return -EINVAL;
1823
c9d76be6 1824 idr_preload(GFP_KERNEL);
f32c10b0 1825 spin_lock(&_minor_lock);
1da177e4 1826
c9d76be6 1827 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1da177e4 1828
f32c10b0 1829 spin_unlock(&_minor_lock);
c9d76be6
TH
1830 idr_preload_end();
1831 if (r < 0)
1832 return r == -ENOSPC ? -EBUSY : r;
1833 return 0;
1da177e4
LT
1834}
1835
cf13ab8e 1836static int next_free_minor(int *minor)
1da177e4 1837{
c9d76be6 1838 int r;
62f75c2f 1839
c9d76be6 1840 idr_preload(GFP_KERNEL);
f32c10b0 1841 spin_lock(&_minor_lock);
1da177e4 1842
c9d76be6 1843 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1da177e4 1844
f32c10b0 1845 spin_unlock(&_minor_lock);
c9d76be6
TH
1846 idr_preload_end();
1847 if (r < 0)
1848 return r;
1849 *minor = r;
1850 return 0;
1da177e4
LT
1851}
1852
83d5cde4 1853static const struct block_device_operations dm_blk_dops;
f26c5719 1854static const struct dax_operations dm_dax_ops;
1da177e4 1855
53d5914f
MP
1856static void dm_wq_work(struct work_struct *work);
1857
0f20972f
MS
1858static void cleanup_mapped_device(struct mapped_device *md)
1859{
0f20972f
MS
1860 if (md->wq)
1861 destroy_workqueue(md->wq);
6f1c819c
KO
1862 bioset_exit(&md->bs);
1863 bioset_exit(&md->io_bs);
0f20972f 1864
f26c5719
DW
1865 if (md->dax_dev) {
1866 kill_dax(md->dax_dev);
1867 put_dax(md->dax_dev);
1868 md->dax_dev = NULL;
1869 }
1870
0f20972f
MS
1871 if (md->disk) {
1872 spin_lock(&_minor_lock);
1873 md->disk->private_data = NULL;
1874 spin_unlock(&_minor_lock);
0f20972f
MS
1875 del_gendisk(md->disk);
1876 put_disk(md->disk);
1877 }
1878
1879 if (md->queue)
1880 blk_cleanup_queue(md->queue);
1881
d09960b0
TE
1882 cleanup_srcu_struct(&md->io_barrier);
1883
0f20972f
MS
1884 if (md->bdev) {
1885 bdput(md->bdev);
1886 md->bdev = NULL;
1887 }
4cc96131 1888
d5ffebdd
MS
1889 mutex_destroy(&md->suspend_lock);
1890 mutex_destroy(&md->type_lock);
1891 mutex_destroy(&md->table_devices_lock);
1892
4cc96131 1893 dm_mq_cleanup_mapped_device(md);
0f20972f
MS
1894}
1895
1da177e4
LT
1896/*
1897 * Allocate and initialise a blank device with a given minor.
1898 */
2b06cfff 1899static struct mapped_device *alloc_dev(int minor)
1da177e4 1900{
115485e8
MS
1901 int r, numa_node_id = dm_get_numa_node();
1902 struct mapped_device *md;
ba61fdd1 1903 void *old_md;
1da177e4 1904
856eb091 1905 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1da177e4
LT
1906 if (!md) {
1907 DMWARN("unable to allocate device, out of memory.");
1908 return NULL;
1909 }
1910
10da4f79 1911 if (!try_module_get(THIS_MODULE))
6ed7ade8 1912 goto bad_module_get;
10da4f79 1913
1da177e4 1914 /* get a minor number for the dev */
2b06cfff 1915 if (minor == DM_ANY_MINOR)
cf13ab8e 1916 r = next_free_minor(&minor);
2b06cfff 1917 else
cf13ab8e 1918 r = specific_minor(minor);
1da177e4 1919 if (r < 0)
6ed7ade8 1920 goto bad_minor;
1da177e4 1921
83d5e5b0
MP
1922 r = init_srcu_struct(&md->io_barrier);
1923 if (r < 0)
1924 goto bad_io_barrier;
1925
115485e8 1926 md->numa_node_id = numa_node_id;
591ddcfc 1927 md->init_tio_pdu = false;
a5664dad 1928 md->type = DM_TYPE_NONE;
e61290a4 1929 mutex_init(&md->suspend_lock);
a5664dad 1930 mutex_init(&md->type_lock);
86f1152b 1931 mutex_init(&md->table_devices_lock);
022c2611 1932 spin_lock_init(&md->deferred_lock);
1da177e4 1933 atomic_set(&md->holders, 1);
5c6bd75d 1934 atomic_set(&md->open_count, 0);
1da177e4 1935 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1936 atomic_set(&md->uevent_seq, 0);
1937 INIT_LIST_HEAD(&md->uevent_list);
86f1152b 1938 INIT_LIST_HEAD(&md->table_devices);
7a8c3d3b 1939 spin_lock_init(&md->uevent_lock);
1da177e4 1940
47ace7e0 1941 /*
c62b37d9
CH
1942 * default to bio-based until DM table is loaded and md->type
1943 * established. If request-based table is loaded: blk-mq will
1944 * override accordingly.
47ace7e0 1945 */
c62b37d9 1946 md->queue = blk_alloc_queue(numa_node_id);
3d745ea5
CH
1947 if (!md->queue)
1948 goto bad;
1da177e4 1949
c12c9a3c 1950 md->disk = alloc_disk_node(1, md->numa_node_id);
1da177e4 1951 if (!md->disk)
0f20972f 1952 goto bad;
1da177e4 1953
f0b04115 1954 init_waitqueue_head(&md->wait);
53d5914f 1955 INIT_WORK(&md->work, dm_wq_work);
f0b04115 1956 init_waitqueue_head(&md->eventq);
2995fa78 1957 init_completion(&md->kobj_holder.completion);
f0b04115 1958
1da177e4
LT
1959 md->disk->major = _major;
1960 md->disk->first_minor = minor;
1961 md->disk->fops = &dm_blk_dops;
1962 md->disk->queue = md->queue;
1963 md->disk->private_data = md;
1964 sprintf(md->disk->disk_name, "dm-%d", minor);
f26c5719 1965
976431b0 1966 if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
fefc1d97
PG
1967 md->dax_dev = alloc_dax(md, md->disk->disk_name,
1968 &dm_dax_ops, 0);
4e4ced93 1969 if (IS_ERR(md->dax_dev))
976431b0
DW
1970 goto bad;
1971 }
f26c5719 1972
c100ec49 1973 add_disk_no_queue_reg(md->disk);
7e51f257 1974 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1975
670368a8 1976 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
304f3f6a 1977 if (!md->wq)
0f20972f 1978 goto bad;
304f3f6a 1979
32a926da
MP
1980 md->bdev = bdget_disk(md->disk, 0);
1981 if (!md->bdev)
0f20972f 1982 goto bad;
32a926da 1983
fd2ed4d2
MP
1984 dm_stats_init(&md->stats);
1985
ba61fdd1 1986 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1987 spin_lock(&_minor_lock);
ba61fdd1 1988 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1989 spin_unlock(&_minor_lock);
ba61fdd1
JM
1990
1991 BUG_ON(old_md != MINOR_ALLOCED);
1992
1da177e4
LT
1993 return md;
1994
0f20972f
MS
1995bad:
1996 cleanup_mapped_device(md);
83d5e5b0 1997bad_io_barrier:
1da177e4 1998 free_minor(minor);
6ed7ade8 1999bad_minor:
10da4f79 2000 module_put(THIS_MODULE);
6ed7ade8 2001bad_module_get:
856eb091 2002 kvfree(md);
1da177e4
LT
2003 return NULL;
2004}
2005
ae9da83f
JN
2006static void unlock_fs(struct mapped_device *md);
2007
1da177e4
LT
2008static void free_dev(struct mapped_device *md)
2009{
f331c029 2010 int minor = MINOR(disk_devt(md->disk));
63d94e48 2011
32a926da 2012 unlock_fs(md);
2eb6e1e3 2013
0f20972f 2014 cleanup_mapped_device(md);
63a4f065 2015
86f1152b 2016 free_table_devices(&md->table_devices);
63a4f065 2017 dm_stats_cleanup(&md->stats);
63a4f065
MS
2018 free_minor(minor);
2019
10da4f79 2020 module_put(THIS_MODULE);
856eb091 2021 kvfree(md);
1da177e4
LT
2022}
2023
2a2a4c51 2024static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
e6ee8c0b 2025{
c0820cf5 2026 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2a2a4c51 2027 int ret = 0;
e6ee8c0b 2028
0776aa0e 2029 if (dm_table_bio_based(t)) {
64f52b0e
MS
2030 /*
2031 * The md may already have mempools that need changing.
2032 * If so, reload bioset because front_pad may have changed
2033 * because a different table was loaded.
2034 */
6f1c819c
KO
2035 bioset_exit(&md->bs);
2036 bioset_exit(&md->io_bs);
0776aa0e 2037
6f1c819c 2038 } else if (bioset_initialized(&md->bs)) {
4e6e36c3
MS
2039 /*
2040 * There's no need to reload with request-based dm
2041 * because the size of front_pad doesn't change.
2042 * Note for future: If you are to reload bioset,
2043 * prep-ed requests in the queue may refer
2044 * to bio from the old bioset, so you must walk
2045 * through the queue to unprep.
2046 */
2047 goto out;
c0820cf5 2048 }
e6ee8c0b 2049
6f1c819c
KO
2050 BUG_ON(!p ||
2051 bioset_initialized(&md->bs) ||
2052 bioset_initialized(&md->io_bs));
cbc4e3c1 2053
2a2a4c51
JA
2054 ret = bioset_init_from_src(&md->bs, &p->bs);
2055 if (ret)
2056 goto out;
2057 ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
2058 if (ret)
2059 bioset_exit(&md->bs);
e6ee8c0b 2060out:
02233342 2061 /* mempool bind completed, no longer need any mempools in the table */
e6ee8c0b 2062 dm_table_free_md_mempools(t);
2a2a4c51 2063 return ret;
e6ee8c0b
KU
2064}
2065
1da177e4
LT
2066/*
2067 * Bind a table to the device.
2068 */
2069static void event_callback(void *context)
2070{
7a8c3d3b
MA
2071 unsigned long flags;
2072 LIST_HEAD(uevents);
1da177e4
LT
2073 struct mapped_device *md = (struct mapped_device *) context;
2074
7a8c3d3b
MA
2075 spin_lock_irqsave(&md->uevent_lock, flags);
2076 list_splice_init(&md->uevent_list, &uevents);
2077 spin_unlock_irqrestore(&md->uevent_lock, flags);
2078
ed9e1982 2079 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 2080
1da177e4
LT
2081 atomic_inc(&md->event_nr);
2082 wake_up(&md->eventq);
62e08243 2083 dm_issue_global_event();
1da177e4
LT
2084}
2085
042d2a9b
AK
2086/*
2087 * Returns old map, which caller must destroy.
2088 */
2089static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2090 struct queue_limits *limits)
1da177e4 2091{
042d2a9b 2092 struct dm_table *old_map;
165125e1 2093 struct request_queue *q = md->queue;
978e51ba 2094 bool request_based = dm_table_request_based(t);
1da177e4 2095 sector_t size;
2a2a4c51 2096 int ret;
1da177e4 2097
5a8f1f80
BVA
2098 lockdep_assert_held(&md->suspend_lock);
2099
1da177e4 2100 size = dm_table_get_size(t);
3ac51e74
DW
2101
2102 /*
2103 * Wipe any geometry if the size of the table changed.
2104 */
fd2ed4d2 2105 if (size != dm_get_size(md))
3ac51e74
DW
2106 memset(&md->geometry, 0, sizeof(md->geometry));
2107
c2b4bb8c
CH
2108 set_capacity(md->disk, size);
2109 bd_set_nr_sectors(md->bdev, size);
d5816876 2110
2ca3310e
AK
2111 dm_table_event_callback(t, event_callback, md);
2112
e6ee8c0b
KU
2113 /*
2114 * The queue hasn't been stopped yet, if the old table type wasn't
2115 * for request-based during suspension. So stop it to prevent
2116 * I/O mapping before resume.
2117 * This must be done before setting the queue restrictions,
2118 * because request-based dm may be run just after the setting.
2119 */
978e51ba 2120 if (request_based)
eca7ee6d 2121 dm_stop_queue(q);
978e51ba
MS
2122
2123 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
16f12266 2124 /*
978e51ba
MS
2125 * Leverage the fact that request-based DM targets and
2126 * NVMe bio based targets are immutable singletons
2127 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
2128 * and __process_bio.
16f12266
MS
2129 */
2130 md->immutable_target = dm_table_get_immutable_target(t);
2131 }
e6ee8c0b 2132
2a2a4c51
JA
2133 ret = __bind_mempools(md, t);
2134 if (ret) {
2135 old_map = ERR_PTR(ret);
2136 goto out;
2137 }
e6ee8c0b 2138
a12f5d48 2139 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
1d3aa6f6 2140 rcu_assign_pointer(md->map, (void *)t);
36a0456f
AK
2141 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2142
754c5fc7 2143 dm_table_set_restrictions(t, q, limits);
41abc4e1
HR
2144 if (old_map)
2145 dm_sync_table(md);
1da177e4 2146
2a2a4c51 2147out:
042d2a9b 2148 return old_map;
1da177e4
LT
2149}
2150
a7940155
AK
2151/*
2152 * Returns unbound table for the caller to free.
2153 */
2154static struct dm_table *__unbind(struct mapped_device *md)
1da177e4 2155{
a12f5d48 2156 struct dm_table *map = rcu_dereference_protected(md->map, 1);
1da177e4
LT
2157
2158 if (!map)
a7940155 2159 return NULL;
1da177e4
LT
2160
2161 dm_table_event_callback(map, NULL, NULL);
9cdb8520 2162 RCU_INIT_POINTER(md->map, NULL);
83d5e5b0 2163 dm_sync_table(md);
a7940155
AK
2164
2165 return map;
1da177e4
LT
2166}
2167
2168/*
2169 * Constructor for a new device.
2170 */
2b06cfff 2171int dm_create(int minor, struct mapped_device **result)
1da177e4 2172{
c12c9a3c 2173 int r;
1da177e4
LT
2174 struct mapped_device *md;
2175
2b06cfff 2176 md = alloc_dev(minor);
1da177e4
LT
2177 if (!md)
2178 return -ENXIO;
2179
c12c9a3c
MS
2180 r = dm_sysfs_init(md);
2181 if (r) {
2182 free_dev(md);
2183 return r;
2184 }
784aae73 2185
1da177e4
LT
2186 *result = md;
2187 return 0;
2188}
2189
a5664dad
MS
2190/*
2191 * Functions to manage md->type.
2192 * All are required to hold md->type_lock.
2193 */
2194void dm_lock_md_type(struct mapped_device *md)
2195{
2196 mutex_lock(&md->type_lock);
2197}
2198
2199void dm_unlock_md_type(struct mapped_device *md)
2200{
2201 mutex_unlock(&md->type_lock);
2202}
2203
7e0d574f 2204void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
a5664dad 2205{
00c4fc3b 2206 BUG_ON(!mutex_is_locked(&md->type_lock));
a5664dad
MS
2207 md->type = type;
2208}
2209
7e0d574f 2210enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
a5664dad
MS
2211{
2212 return md->type;
2213}
2214
36a0456f
AK
2215struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2216{
2217 return md->immutable_target_type;
2218}
2219
f84cb8a4
MS
2220/*
2221 * The queue_limits are only valid as long as you have a reference
2222 * count on 'md'.
2223 */
2224struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2225{
2226 BUG_ON(!atomic_read(&md->holders));
2227 return &md->queue->limits;
2228}
2229EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2230
4a0b4ddf
MS
2231/*
2232 * Setup the DM device's queue based on md's type
2233 */
591ddcfc 2234int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
4a0b4ddf 2235{
bfebd1cd 2236 int r;
c100ec49 2237 struct queue_limits limits;
7e0d574f 2238 enum dm_queue_mode type = dm_get_md_type(md);
bfebd1cd 2239
545ed20e 2240 switch (type) {
bfebd1cd 2241 case DM_TYPE_REQUEST_BASED:
e83068a5 2242 r = dm_mq_init_request_queue(md, t);
bfebd1cd 2243 if (r) {
eca7ee6d 2244 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
bfebd1cd
MS
2245 return r;
2246 }
2247 break;
2248 case DM_TYPE_BIO_BASED:
545ed20e 2249 case DM_TYPE_DAX_BIO_BASED:
978e51ba 2250 case DM_TYPE_NVME_BIO_BASED:
bfebd1cd 2251 break;
7e0d574f
BVA
2252 case DM_TYPE_NONE:
2253 WARN_ON_ONCE(true);
2254 break;
4a0b4ddf
MS
2255 }
2256
c100ec49
MS
2257 r = dm_calculate_queue_limits(t, &limits);
2258 if (r) {
2259 DMERR("Cannot calculate initial queue limits");
2260 return r;
2261 }
2262 dm_table_set_restrictions(t, md->queue, &limits);
2263 blk_register_queue(md->disk);
2264
4a0b4ddf
MS
2265 return 0;
2266}
2267
2bec1f4a 2268struct mapped_device *dm_get_md(dev_t dev)
1da177e4
LT
2269{
2270 struct mapped_device *md;
1da177e4
LT
2271 unsigned minor = MINOR(dev);
2272
2273 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2274 return NULL;
2275
f32c10b0 2276 spin_lock(&_minor_lock);
1da177e4
LT
2277
2278 md = idr_find(&_minor_idr, minor);
49de5769
MS
2279 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2280 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2281 md = NULL;
2282 goto out;
fba9f90e 2283 }
49de5769 2284 dm_get(md);
fba9f90e 2285out:
f32c10b0 2286 spin_unlock(&_minor_lock);
1da177e4 2287
637842cf
DT
2288 return md;
2289}
3cf2e4ba 2290EXPORT_SYMBOL_GPL(dm_get_md);
d229a958 2291
9ade92a9 2292void *dm_get_mdptr(struct mapped_device *md)
637842cf 2293{
9ade92a9 2294 return md->interface_ptr;
1da177e4
LT
2295}
2296
2297void dm_set_mdptr(struct mapped_device *md, void *ptr)
2298{
2299 md->interface_ptr = ptr;
2300}
2301
2302void dm_get(struct mapped_device *md)
2303{
2304 atomic_inc(&md->holders);
3f77316d 2305 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1da177e4
LT
2306}
2307
09ee96b2
MP
2308int dm_hold(struct mapped_device *md)
2309{
2310 spin_lock(&_minor_lock);
2311 if (test_bit(DMF_FREEING, &md->flags)) {
2312 spin_unlock(&_minor_lock);
2313 return -EBUSY;
2314 }
2315 dm_get(md);
2316 spin_unlock(&_minor_lock);
2317 return 0;
2318}
2319EXPORT_SYMBOL_GPL(dm_hold);
2320
72d94861
AK
2321const char *dm_device_name(struct mapped_device *md)
2322{
2323 return md->name;
2324}
2325EXPORT_SYMBOL_GPL(dm_device_name);
2326
3f77316d 2327static void __dm_destroy(struct mapped_device *md, bool wait)
1da177e4 2328{
1134e5ae 2329 struct dm_table *map;
83d5e5b0 2330 int srcu_idx;
1da177e4 2331
3f77316d 2332 might_sleep();
fba9f90e 2333
63a4f065 2334 spin_lock(&_minor_lock);
3f77316d
KU
2335 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2336 set_bit(DMF_FREEING, &md->flags);
2337 spin_unlock(&_minor_lock);
3b785fbc 2338
c12c9a3c 2339 blk_set_queue_dying(md->queue);
3f77316d 2340
ab7c7bb6
MP
2341 /*
2342 * Take suspend_lock so that presuspend and postsuspend methods
2343 * do not race with internal suspend.
2344 */
2345 mutex_lock(&md->suspend_lock);
2a708cff 2346 map = dm_get_live_table(md, &srcu_idx);
3f77316d
KU
2347 if (!dm_suspended_md(md)) {
2348 dm_table_presuspend_targets(map);
adc0daad 2349 set_bit(DMF_SUSPENDED, &md->flags);
5df96f2b 2350 set_bit(DMF_POST_SUSPENDING, &md->flags);
3f77316d 2351 dm_table_postsuspend_targets(map);
1da177e4 2352 }
83d5e5b0
MP
2353 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2354 dm_put_live_table(md, srcu_idx);
2a708cff 2355 mutex_unlock(&md->suspend_lock);
83d5e5b0 2356
3f77316d
KU
2357 /*
2358 * Rare, but there may be I/O requests still going to complete,
2359 * for example. Wait for all references to disappear.
2360 * No one should increment the reference count of the mapped_device,
2361 * after the mapped_device state becomes DMF_FREEING.
2362 */
2363 if (wait)
2364 while (atomic_read(&md->holders))
2365 msleep(1);
2366 else if (atomic_read(&md->holders))
2367 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2368 dm_device_name(md), atomic_read(&md->holders));
2369
2370 dm_sysfs_exit(md);
3f77316d
KU
2371 dm_table_destroy(__unbind(md));
2372 free_dev(md);
2373}
2374
2375void dm_destroy(struct mapped_device *md)
2376{
2377 __dm_destroy(md, true);
2378}
2379
2380void dm_destroy_immediate(struct mapped_device *md)
2381{
2382 __dm_destroy(md, false);
2383}
2384
2385void dm_put(struct mapped_device *md)
2386{
2387 atomic_dec(&md->holders);
1da177e4 2388}
79eb885c 2389EXPORT_SYMBOL_GPL(dm_put);
1da177e4 2390
85067747
ML
2391static bool md_in_flight_bios(struct mapped_device *md)
2392{
2393 int cpu;
2394 struct hd_struct *part = &dm_disk(md)->part0;
2395 long sum = 0;
2396
2397 for_each_possible_cpu(cpu) {
2398 sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
2399 sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
2400 }
2401
2402 return sum != 0;
2403}
2404
2405static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
46125c1c
MB
2406{
2407 int r = 0;
9f4c3f87 2408 DEFINE_WAIT(wait);
46125c1c 2409
85067747 2410 while (true) {
9f4c3f87 2411 prepare_to_wait(&md->wait, &wait, task_state);
46125c1c 2412
85067747 2413 if (!md_in_flight_bios(md))
46125c1c
MB
2414 break;
2415
e3fabdfd 2416 if (signal_pending_state(task_state, current)) {
46125c1c
MB
2417 r = -EINTR;
2418 break;
2419 }
2420
2421 io_schedule();
2422 }
9f4c3f87 2423 finish_wait(&md->wait, &wait);
b44ebeb0 2424
46125c1c
MB
2425 return r;
2426}
2427
85067747
ML
2428static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2429{
2430 int r = 0;
2431
2432 if (!queue_is_mq(md->queue))
2433 return dm_wait_for_bios_completion(md, task_state);
2434
2435 while (true) {
2436 if (!blk_mq_queue_inflight(md->queue))
2437 break;
2438
2439 if (signal_pending_state(task_state, current)) {
2440 r = -EINTR;
2441 break;
2442 }
2443
2444 msleep(5);
2445 }
2446
2447 return r;
2448}
2449
1da177e4
LT
2450/*
2451 * Process the deferred bios
2452 */
ef208587 2453static void dm_wq_work(struct work_struct *work)
1da177e4 2454{
ef208587
MP
2455 struct mapped_device *md = container_of(work, struct mapped_device,
2456 work);
6d6f10df 2457 struct bio *c;
83d5e5b0
MP
2458 int srcu_idx;
2459 struct dm_table *map;
1da177e4 2460
83d5e5b0 2461 map = dm_get_live_table(md, &srcu_idx);
ef208587 2462
3b00b203 2463 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
2464 spin_lock_irq(&md->deferred_lock);
2465 c = bio_list_pop(&md->deferred);
2466 spin_unlock_irq(&md->deferred_lock);
2467
6a8736d1 2468 if (!c)
df12ee99 2469 break;
022c2611 2470
e6ee8c0b 2471 if (dm_request_based(md))
ed00aabd 2472 (void) submit_bio_noacct(c);
6a8736d1 2473 else
6548c7c5 2474 (void) dm_process_bio(md, map, c);
022c2611 2475 }
73d410c0 2476
83d5e5b0 2477 dm_put_live_table(md, srcu_idx);
1da177e4
LT
2478}
2479
9a1fb464 2480static void dm_queue_flush(struct mapped_device *md)
304f3f6a 2481{
3b00b203 2482 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
4e857c58 2483 smp_mb__after_atomic();
53d5914f 2484 queue_work(md->wq, &md->work);
304f3f6a
MB
2485}
2486
1da177e4 2487/*
042d2a9b 2488 * Swap in a new table, returning the old one for the caller to destroy.
1da177e4 2489 */
042d2a9b 2490struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1da177e4 2491{
87eb5b21 2492 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
754c5fc7 2493 struct queue_limits limits;
042d2a9b 2494 int r;
1da177e4 2495
e61290a4 2496 mutex_lock(&md->suspend_lock);
1da177e4
LT
2497
2498 /* device must be suspended */
4f186f8b 2499 if (!dm_suspended_md(md))
93c534ae 2500 goto out;
1da177e4 2501
3ae70656
MS
2502 /*
2503 * If the new table has no data devices, retain the existing limits.
2504 * This helps multipath with queue_if_no_path if all paths disappear,
2505 * then new I/O is queued based on these limits, and then some paths
2506 * reappear.
2507 */
2508 if (dm_table_has_no_data_devices(table)) {
83d5e5b0 2509 live_map = dm_get_live_table_fast(md);
3ae70656
MS
2510 if (live_map)
2511 limits = md->queue->limits;
83d5e5b0 2512 dm_put_live_table_fast(md);
3ae70656
MS
2513 }
2514
87eb5b21
MC
2515 if (!live_map) {
2516 r = dm_calculate_queue_limits(table, &limits);
2517 if (r) {
2518 map = ERR_PTR(r);
2519 goto out;
2520 }
042d2a9b 2521 }
754c5fc7 2522
042d2a9b 2523 map = __bind(md, table, &limits);
62e08243 2524 dm_issue_global_event();
1da177e4 2525
93c534ae 2526out:
e61290a4 2527 mutex_unlock(&md->suspend_lock);
042d2a9b 2528 return map;
1da177e4
LT
2529}
2530
2531/*
2532 * Functions to lock and unlock any filesystem running on the
2533 * device.
2534 */
2ca3310e 2535static int lock_fs(struct mapped_device *md)
1da177e4 2536{
e39e2e95 2537 int r;
1da177e4
LT
2538
2539 WARN_ON(md->frozen_sb);
dfbe03f6 2540
db8fef4f 2541 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 2542 if (IS_ERR(md->frozen_sb)) {
cf222b37 2543 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
2544 md->frozen_sb = NULL;
2545 return r;
dfbe03f6
AK
2546 }
2547
aa8d7c2f
AK
2548 set_bit(DMF_FROZEN, &md->flags);
2549
1da177e4
LT
2550 return 0;
2551}
2552
2ca3310e 2553static void unlock_fs(struct mapped_device *md)
1da177e4 2554{
aa8d7c2f
AK
2555 if (!test_bit(DMF_FROZEN, &md->flags))
2556 return;
2557
db8fef4f 2558 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 2559 md->frozen_sb = NULL;
aa8d7c2f 2560 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
2561}
2562
2563/*
b48633f8
BVA
2564 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2565 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2566 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2567 *
ffcc3936
MS
2568 * If __dm_suspend returns 0, the device is completely quiescent
2569 * now. There is no request-processing activity. All new requests
2570 * are being added to md->deferred list.
cec47e3d 2571 */
ffcc3936 2572static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
b48633f8 2573 unsigned suspend_flags, long task_state,
eaf9a736 2574 int dmf_suspended_flag)
1da177e4 2575{
ffcc3936
MS
2576 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2577 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2578 int r;
1da177e4 2579
5a8f1f80
BVA
2580 lockdep_assert_held(&md->suspend_lock);
2581
2e93ccc1
KU
2582 /*
2583 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2584 * This flag is cleared before dm_suspend returns.
2585 */
2586 if (noflush)
2587 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
86331f39 2588 else
ac75b09f 2589 DMDEBUG("%s: suspending with flush", dm_device_name(md));
2e93ccc1 2590
d67ee213
MS
2591 /*
2592 * This gets reverted if there's an error later and the targets
2593 * provide the .presuspend_undo hook.
2594 */
cf222b37
AK
2595 dm_table_presuspend_targets(map);
2596
32a926da 2597 /*
9f518b27
KU
2598 * Flush I/O to the device.
2599 * Any I/O submitted after lock_fs() may not be flushed.
2600 * noflush takes precedence over do_lockfs.
2601 * (lock_fs() flushes I/Os and waits for them to complete.)
32a926da
MP
2602 */
2603 if (!noflush && do_lockfs) {
2604 r = lock_fs(md);
d67ee213
MS
2605 if (r) {
2606 dm_table_presuspend_undo_targets(map);
ffcc3936 2607 return r;
d67ee213 2608 }
aa8d7c2f 2609 }
1da177e4
LT
2610
2611 /*
3b00b203
MP
2612 * Here we must make sure that no processes are submitting requests
2613 * to target drivers i.e. no one may be executing
2614 * __split_and_process_bio. This is called from dm_request and
2615 * dm_wq_work.
2616 *
2617 * To get all processes out of __split_and_process_bio in dm_request,
2618 * we take the write lock. To prevent any process from reentering
6a8736d1
TH
2619 * __split_and_process_bio from dm_request and quiesce the thread
2620 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2621 * flush_workqueue(md->wq).
1da177e4 2622 */
1eb787ec 2623 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
41abc4e1
HR
2624 if (map)
2625 synchronize_srcu(&md->io_barrier);
1da177e4 2626
d0bcb878 2627 /*
29e4013d
TH
2628 * Stop md->queue before flushing md->wq in case request-based
2629 * dm defers requests to md->wq from md->queue.
d0bcb878 2630 */
6a23e05c 2631 if (dm_request_based(md))
eca7ee6d 2632 dm_stop_queue(md->queue);
cec47e3d 2633
d0bcb878
KU
2634 flush_workqueue(md->wq);
2635
1da177e4 2636 /*
3b00b203
MP
2637 * At this point no more requests are entering target request routines.
2638 * We call dm_wait_for_completion to wait for all existing requests
2639 * to finish.
1da177e4 2640 */
b48633f8 2641 r = dm_wait_for_completion(md, task_state);
eaf9a736
MS
2642 if (!r)
2643 set_bit(dmf_suspended_flag, &md->flags);
1da177e4 2644
6d6f10df 2645 if (noflush)
022c2611 2646 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
41abc4e1
HR
2647 if (map)
2648 synchronize_srcu(&md->io_barrier);
2e93ccc1 2649
1da177e4 2650 /* were we interrupted ? */
46125c1c 2651 if (r < 0) {
9a1fb464 2652 dm_queue_flush(md);
73d410c0 2653
cec47e3d 2654 if (dm_request_based(md))
eca7ee6d 2655 dm_start_queue(md->queue);
cec47e3d 2656
2ca3310e 2657 unlock_fs(md);
d67ee213 2658 dm_table_presuspend_undo_targets(map);
ffcc3936 2659 /* pushback list is already flushed, so skip flush */
2ca3310e 2660 }
1da177e4 2661
ffcc3936
MS
2662 return r;
2663}
2664
2665/*
2666 * We need to be able to change a mapping table under a mounted
2667 * filesystem. For example we might want to move some data in
2668 * the background. Before the table can be swapped with
2669 * dm_bind_table, dm_suspend must be called to flush any in
2670 * flight bios and ensure that any further io gets deferred.
2671 */
2672/*
2673 * Suspend mechanism in request-based dm.
2674 *
2675 * 1. Flush all I/Os by lock_fs() if needed.
2676 * 2. Stop dispatching any I/O by stopping the request_queue.
2677 * 3. Wait for all in-flight I/Os to be completed or requeued.
2678 *
2679 * To abort suspend, start the request_queue.
2680 */
2681int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2682{
2683 struct dm_table *map = NULL;
2684 int r = 0;
2685
2686retry:
2687 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2688
2689 if (dm_suspended_md(md)) {
2690 r = -EINVAL;
2691 goto out_unlock;
2692 }
2693
2694 if (dm_suspended_internally_md(md)) {
2695 /* already internally suspended, wait for internal resume */
2696 mutex_unlock(&md->suspend_lock);
2697 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2698 if (r)
2699 return r;
2700 goto retry;
2701 }
2702
a12f5d48 2703 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936 2704
eaf9a736 2705 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
ffcc3936
MS
2706 if (r)
2707 goto out_unlock;
3b00b203 2708
5df96f2b 2709 set_bit(DMF_POST_SUSPENDING, &md->flags);
4d4471cb 2710 dm_table_postsuspend_targets(map);
5df96f2b 2711 clear_bit(DMF_POST_SUSPENDING, &md->flags);
4d4471cb 2712
d287483d 2713out_unlock:
e61290a4 2714 mutex_unlock(&md->suspend_lock);
cf222b37 2715 return r;
1da177e4
LT
2716}
2717
ffcc3936
MS
2718static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2719{
2720 if (map) {
2721 int r = dm_table_resume_targets(map);
2722 if (r)
2723 return r;
2724 }
2725
2726 dm_queue_flush(md);
2727
2728 /*
2729 * Flushing deferred I/Os must be done after targets are resumed
2730 * so that mapping of targets can work correctly.
2731 * Request-based dm is queueing the deferred I/Os in its request_queue.
2732 */
2733 if (dm_request_based(md))
eca7ee6d 2734 dm_start_queue(md->queue);
ffcc3936
MS
2735
2736 unlock_fs(md);
2737
2738 return 0;
2739}
2740
1da177e4
LT
2741int dm_resume(struct mapped_device *md)
2742{
8dc23658 2743 int r;
cf222b37 2744 struct dm_table *map = NULL;
1da177e4 2745
ffcc3936 2746retry:
8dc23658 2747 r = -EINVAL;
ffcc3936
MS
2748 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2749
4f186f8b 2750 if (!dm_suspended_md(md))
cf222b37 2751 goto out;
cf222b37 2752
ffcc3936
MS
2753 if (dm_suspended_internally_md(md)) {
2754 /* already internally suspended, wait for internal resume */
2755 mutex_unlock(&md->suspend_lock);
2756 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2757 if (r)
2758 return r;
2759 goto retry;
2760 }
2761
a12f5d48 2762 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2ca3310e 2763 if (!map || !dm_table_get_size(map))
cf222b37 2764 goto out;
1da177e4 2765
ffcc3936 2766 r = __dm_resume(md, map);
8757b776
MB
2767 if (r)
2768 goto out;
2ca3310e 2769
2ca3310e 2770 clear_bit(DMF_SUSPENDED, &md->flags);
cf222b37 2771out:
e61290a4 2772 mutex_unlock(&md->suspend_lock);
2ca3310e 2773
cf222b37 2774 return r;
1da177e4
LT
2775}
2776
fd2ed4d2
MP
2777/*
2778 * Internal suspend/resume works like userspace-driven suspend. It waits
2779 * until all bios finish and prevents issuing new bios to the target drivers.
2780 * It may be used only from the kernel.
fd2ed4d2
MP
2781 */
2782
ffcc3936 2783static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
fd2ed4d2 2784{
ffcc3936
MS
2785 struct dm_table *map = NULL;
2786
1ea0654e
BVA
2787 lockdep_assert_held(&md->suspend_lock);
2788
96b26c8c 2789 if (md->internal_suspend_count++)
ffcc3936
MS
2790 return; /* nested internal suspend */
2791
2792 if (dm_suspended_md(md)) {
2793 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2794 return; /* nest suspend */
2795 }
2796
a12f5d48 2797 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936
MS
2798
2799 /*
2800 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2801 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2802 * would require changing .presuspend to return an error -- avoid this
2803 * until there is a need for more elaborate variants of internal suspend.
2804 */
eaf9a736
MS
2805 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2806 DMF_SUSPENDED_INTERNALLY);
ffcc3936 2807
5df96f2b 2808 set_bit(DMF_POST_SUSPENDING, &md->flags);
ffcc3936 2809 dm_table_postsuspend_targets(map);
5df96f2b 2810 clear_bit(DMF_POST_SUSPENDING, &md->flags);
ffcc3936
MS
2811}
2812
2813static void __dm_internal_resume(struct mapped_device *md)
2814{
96b26c8c
MP
2815 BUG_ON(!md->internal_suspend_count);
2816
2817 if (--md->internal_suspend_count)
ffcc3936
MS
2818 return; /* resume from nested internal suspend */
2819
fd2ed4d2 2820 if (dm_suspended_md(md))
ffcc3936
MS
2821 goto done; /* resume from nested suspend */
2822
2823 /*
2824 * NOTE: existing callers don't need to call dm_table_resume_targets
2825 * (which may fail -- so best to avoid it for now by passing NULL map)
2826 */
2827 (void) __dm_resume(md, NULL);
2828
2829done:
2830 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2831 smp_mb__after_atomic();
2832 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2833}
2834
2835void dm_internal_suspend_noflush(struct mapped_device *md)
2836{
2837 mutex_lock(&md->suspend_lock);
2838 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2839 mutex_unlock(&md->suspend_lock);
2840}
2841EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2842
2843void dm_internal_resume(struct mapped_device *md)
2844{
2845 mutex_lock(&md->suspend_lock);
2846 __dm_internal_resume(md);
2847 mutex_unlock(&md->suspend_lock);
2848}
2849EXPORT_SYMBOL_GPL(dm_internal_resume);
2850
2851/*
2852 * Fast variants of internal suspend/resume hold md->suspend_lock,
2853 * which prevents interaction with userspace-driven suspend.
2854 */
2855
2856void dm_internal_suspend_fast(struct mapped_device *md)
2857{
2858 mutex_lock(&md->suspend_lock);
2859 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
2860 return;
2861
2862 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2863 synchronize_srcu(&md->io_barrier);
2864 flush_workqueue(md->wq);
2865 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2866}
b735fede 2867EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
fd2ed4d2 2868
ffcc3936 2869void dm_internal_resume_fast(struct mapped_device *md)
fd2ed4d2 2870{
ffcc3936 2871 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
2872 goto done;
2873
2874 dm_queue_flush(md);
2875
2876done:
2877 mutex_unlock(&md->suspend_lock);
2878}
b735fede 2879EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
fd2ed4d2 2880
1da177e4
LT
2881/*-----------------------------------------------------------------
2882 * Event notification.
2883 *---------------------------------------------------------------*/
3abf85b5 2884int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
60935eb2 2885 unsigned cookie)
69267a30 2886{
6958c1c6
MP
2887 int r;
2888 unsigned noio_flag;
60935eb2
MB
2889 char udev_cookie[DM_COOKIE_LENGTH];
2890 char *envp[] = { udev_cookie, NULL };
2891
6958c1c6
MP
2892 noio_flag = memalloc_noio_save();
2893
60935eb2 2894 if (!cookie)
6958c1c6 2895 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
60935eb2
MB
2896 else {
2897 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2898 DM_COOKIE_ENV_VAR_NAME, cookie);
6958c1c6
MP
2899 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2900 action, envp);
60935eb2 2901 }
6958c1c6
MP
2902
2903 memalloc_noio_restore(noio_flag);
2904
2905 return r;
69267a30
AK
2906}
2907
7a8c3d3b
MA
2908uint32_t dm_next_uevent_seq(struct mapped_device *md)
2909{
2910 return atomic_add_return(1, &md->uevent_seq);
2911}
2912
1da177e4
LT
2913uint32_t dm_get_event_nr(struct mapped_device *md)
2914{
2915 return atomic_read(&md->event_nr);
2916}
2917
2918int dm_wait_event(struct mapped_device *md, int event_nr)
2919{
2920 return wait_event_interruptible(md->eventq,
2921 (event_nr != atomic_read(&md->event_nr)));
2922}
2923
7a8c3d3b
MA
2924void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2925{
2926 unsigned long flags;
2927
2928 spin_lock_irqsave(&md->uevent_lock, flags);
2929 list_add(elist, &md->uevent_list);
2930 spin_unlock_irqrestore(&md->uevent_lock, flags);
2931}
2932
1da177e4
LT
2933/*
2934 * The gendisk is only valid as long as you have a reference
2935 * count on 'md'.
2936 */
2937struct gendisk *dm_disk(struct mapped_device *md)
2938{
2939 return md->disk;
2940}
65ff5b7d 2941EXPORT_SYMBOL_GPL(dm_disk);
1da177e4 2942
784aae73
MB
2943struct kobject *dm_kobject(struct mapped_device *md)
2944{
2995fa78 2945 return &md->kobj_holder.kobj;
784aae73
MB
2946}
2947
784aae73
MB
2948struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2949{
2950 struct mapped_device *md;
2951
2995fa78 2952 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
784aae73 2953
b9a41d21
HT
2954 spin_lock(&_minor_lock);
2955 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2956 md = NULL;
2957 goto out;
2958 }
784aae73 2959 dm_get(md);
b9a41d21
HT
2960out:
2961 spin_unlock(&_minor_lock);
2962
784aae73
MB
2963 return md;
2964}
2965
4f186f8b 2966int dm_suspended_md(struct mapped_device *md)
1da177e4
LT
2967{
2968 return test_bit(DMF_SUSPENDED, &md->flags);
2969}
2970
5df96f2b
MP
2971static int dm_post_suspending_md(struct mapped_device *md)
2972{
2973 return test_bit(DMF_POST_SUSPENDING, &md->flags);
2974}
2975
ffcc3936
MS
2976int dm_suspended_internally_md(struct mapped_device *md)
2977{
2978 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2979}
2980
2c140a24
MP
2981int dm_test_deferred_remove_flag(struct mapped_device *md)
2982{
2983 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2984}
2985
64dbce58
KU
2986int dm_suspended(struct dm_target *ti)
2987{
ecdb2e25 2988 return dm_suspended_md(dm_table_get_md(ti->table));
64dbce58
KU
2989}
2990EXPORT_SYMBOL_GPL(dm_suspended);
2991
5df96f2b
MP
2992int dm_post_suspending(struct dm_target *ti)
2993{
2994 return dm_post_suspending_md(dm_table_get_md(ti->table));
2995}
2996EXPORT_SYMBOL_GPL(dm_post_suspending);
2997
2e93ccc1
KU
2998int dm_noflush_suspending(struct dm_target *ti)
2999{
ecdb2e25 3000 return __noflush_suspending(dm_table_get_md(ti->table));
2e93ccc1
KU
3001}
3002EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3003
7e0d574f 3004struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
0776aa0e
MS
3005 unsigned integrity, unsigned per_io_data_size,
3006 unsigned min_pool_size)
e6ee8c0b 3007{
115485e8 3008 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
78d8e58a 3009 unsigned int pool_size = 0;
64f52b0e 3010 unsigned int front_pad, io_front_pad;
6f1c819c 3011 int ret;
e6ee8c0b
KU
3012
3013 if (!pools)
4e6e36c3 3014 return NULL;
e6ee8c0b 3015
78d8e58a
MS
3016 switch (type) {
3017 case DM_TYPE_BIO_BASED:
545ed20e 3018 case DM_TYPE_DAX_BIO_BASED:
22c11858 3019 case DM_TYPE_NVME_BIO_BASED:
0776aa0e 3020 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
30187e1d 3021 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
64f52b0e 3022 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
6f1c819c
KO
3023 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
3024 if (ret)
64f52b0e 3025 goto out;
6f1c819c 3026 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
eb8db831 3027 goto out;
78d8e58a
MS
3028 break;
3029 case DM_TYPE_REQUEST_BASED:
0776aa0e 3030 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
78d8e58a 3031 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
591ddcfc 3032 /* per_io_data_size is used for blk-mq pdu at queue allocation */
78d8e58a
MS
3033 break;
3034 default:
3035 BUG();
3036 }
3037
6f1c819c
KO
3038 ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
3039 if (ret)
5f015204 3040 goto out;
e6ee8c0b 3041
6f1c819c 3042 if (integrity && bioset_integrity_create(&pools->bs, pool_size))
5f015204 3043 goto out;
a91a2785 3044
e6ee8c0b 3045 return pools;
5f1b670d 3046
5f1b670d
CH
3047out:
3048 dm_free_md_mempools(pools);
78d8e58a 3049
4e6e36c3 3050 return NULL;
e6ee8c0b
KU
3051}
3052
3053void dm_free_md_mempools(struct dm_md_mempools *pools)
3054{
3055 if (!pools)
3056 return;
3057
6f1c819c
KO
3058 bioset_exit(&pools->bs);
3059 bioset_exit(&pools->io_bs);
e6ee8c0b
KU
3060
3061 kfree(pools);
3062}
3063
9c72bad1
CH
3064struct dm_pr {
3065 u64 old_key;
3066 u64 new_key;
3067 u32 flags;
3068 bool fail_early;
3069};
3070
3071static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3072 void *data)
71cdb697
CH
3073{
3074 struct mapped_device *md = bdev->bd_disk->private_data;
9c72bad1
CH
3075 struct dm_table *table;
3076 struct dm_target *ti;
3077 int ret = -ENOTTY, srcu_idx;
71cdb697 3078
9c72bad1
CH
3079 table = dm_get_live_table(md, &srcu_idx);
3080 if (!table || !dm_table_get_size(table))
3081 goto out;
71cdb697 3082
9c72bad1
CH
3083 /* We only support devices that have a single target */
3084 if (dm_table_get_num_targets(table) != 1)
3085 goto out;
3086 ti = dm_table_get_target(table, 0);
71cdb697 3087
9c72bad1
CH
3088 ret = -EINVAL;
3089 if (!ti->type->iterate_devices)
3090 goto out;
3091
3092 ret = ti->type->iterate_devices(ti, fn, data);
3093out:
3094 dm_put_live_table(md, srcu_idx);
3095 return ret;
3096}
3097
3098/*
3099 * For register / unregister we need to manually call out to every path.
3100 */
3101static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3102 sector_t start, sector_t len, void *data)
3103{
3104 struct dm_pr *pr = data;
3105 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3106
3107 if (!ops || !ops->pr_register)
3108 return -EOPNOTSUPP;
3109 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3110}
3111
3112static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3113 u32 flags)
3114{
3115 struct dm_pr pr = {
3116 .old_key = old_key,
3117 .new_key = new_key,
3118 .flags = flags,
3119 .fail_early = true,
3120 };
3121 int ret;
3122
3123 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3124 if (ret && new_key) {
3125 /* unregister all paths if we failed to register any path */
3126 pr.old_key = new_key;
3127 pr.new_key = 0;
3128 pr.flags = 0;
3129 pr.fail_early = false;
3130 dm_call_pr(bdev, __dm_pr_register, &pr);
3131 }
3132
3133 return ret;
71cdb697
CH
3134}
3135
3136static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
956a4025 3137 u32 flags)
71cdb697
CH
3138{
3139 struct mapped_device *md = bdev->bd_disk->private_data;
3140 const struct pr_ops *ops;
971888c4 3141 int r, srcu_idx;
71cdb697 3142
5bd5e8d8 3143 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3144 if (r < 0)
971888c4 3145 goto out;
71cdb697
CH
3146
3147 ops = bdev->bd_disk->fops->pr_ops;
3148 if (ops && ops->pr_reserve)
3149 r = ops->pr_reserve(bdev, key, type, flags);
3150 else
3151 r = -EOPNOTSUPP;
971888c4
MS
3152out:
3153 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3154 return r;
3155}
3156
3157static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3158{
3159 struct mapped_device *md = bdev->bd_disk->private_data;
3160 const struct pr_ops *ops;
971888c4 3161 int r, srcu_idx;
71cdb697 3162
5bd5e8d8 3163 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3164 if (r < 0)
971888c4 3165 goto out;
71cdb697
CH
3166
3167 ops = bdev->bd_disk->fops->pr_ops;
3168 if (ops && ops->pr_release)
3169 r = ops->pr_release(bdev, key, type);
3170 else
3171 r = -EOPNOTSUPP;
971888c4
MS
3172out:
3173 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3174 return r;
3175}
3176
3177static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
956a4025 3178 enum pr_type type, bool abort)
71cdb697
CH
3179{
3180 struct mapped_device *md = bdev->bd_disk->private_data;
3181 const struct pr_ops *ops;
971888c4 3182 int r, srcu_idx;
71cdb697 3183
5bd5e8d8 3184 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3185 if (r < 0)
971888c4 3186 goto out;
71cdb697
CH
3187
3188 ops = bdev->bd_disk->fops->pr_ops;
3189 if (ops && ops->pr_preempt)
3190 r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3191 else
3192 r = -EOPNOTSUPP;
971888c4
MS
3193out:
3194 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3195 return r;
3196}
3197
3198static int dm_pr_clear(struct block_device *bdev, u64 key)
3199{
3200 struct mapped_device *md = bdev->bd_disk->private_data;
3201 const struct pr_ops *ops;
971888c4 3202 int r, srcu_idx;
71cdb697 3203
5bd5e8d8 3204 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
71cdb697 3205 if (r < 0)
971888c4 3206 goto out;
71cdb697
CH
3207
3208 ops = bdev->bd_disk->fops->pr_ops;
3209 if (ops && ops->pr_clear)
3210 r = ops->pr_clear(bdev, key);
3211 else
3212 r = -EOPNOTSUPP;
971888c4
MS
3213out:
3214 dm_unprepare_ioctl(md, srcu_idx);
71cdb697
CH
3215 return r;
3216}
3217
3218static const struct pr_ops dm_pr_ops = {
3219 .pr_register = dm_pr_register,
3220 .pr_reserve = dm_pr_reserve,
3221 .pr_release = dm_pr_release,
3222 .pr_preempt = dm_pr_preempt,
3223 .pr_clear = dm_pr_clear,
3224};
3225
83d5cde4 3226static const struct block_device_operations dm_blk_dops = {
c62b37d9 3227 .submit_bio = dm_submit_bio,
1da177e4
LT
3228 .open = dm_blk_open,
3229 .release = dm_blk_close,
aa129a22 3230 .ioctl = dm_blk_ioctl,
3ac51e74 3231 .getgeo = dm_blk_getgeo,
e76239a3 3232 .report_zones = dm_blk_report_zones,
71cdb697 3233 .pr_ops = &dm_pr_ops,
1da177e4
LT
3234 .owner = THIS_MODULE
3235};
3236
f26c5719
DW
3237static const struct dax_operations dm_dax_ops = {
3238 .direct_access = dm_dax_direct_access,
7bf7eac8 3239 .dax_supported = dm_dax_supported,
7e026c8c 3240 .copy_from_iter = dm_dax_copy_from_iter,
b3a9a0c3 3241 .copy_to_iter = dm_dax_copy_to_iter,
cdf6cdcd 3242 .zero_page_range = dm_dax_zero_page_range,
f26c5719
DW
3243};
3244
1da177e4
LT
3245/*
3246 * module hooks
3247 */
3248module_init(dm_init);
3249module_exit(dm_exit);
3250
3251module_param(major, uint, 0);
3252MODULE_PARM_DESC(major, "The major number of the device mapper");
f4790826 3253
e8603136
MS
3254module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3255MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3256
115485e8
MS
3257module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3258MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3259
1da177e4
LT
3260MODULE_DESCRIPTION(DM_NAME " driver");
3261MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3262MODULE_LICENSE("GPL");