]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/md/dm.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
4cc96131
MS
8#include "dm-core.h"
9#include "dm-rq.h"
51e5b2bd 10#include "dm-uevent.h"
1da177e4
LT
11
12#include <linux/init.h>
13#include <linux/module.h>
48c9c27b 14#include <linux/mutex.h>
1da177e4
LT
15#include <linux/blkpg.h>
16#include <linux/bio.h>
1da177e4
LT
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
3ac51e74 20#include <linux/hdreg.h>
3f77316d 21#include <linux/delay.h>
ffcc3936 22#include <linux/wait.h>
71cdb697 23#include <linux/pr.h>
55782138 24
72d94861
AK
25#define DM_MSG_PREFIX "core"
26
71a16736
NK
27#ifdef CONFIG_PRINTK
28/*
29 * ratelimit state to be used in DMXXX_LIMIT().
30 */
31DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
32 DEFAULT_RATELIMIT_INTERVAL,
33 DEFAULT_RATELIMIT_BURST);
34EXPORT_SYMBOL(dm_ratelimit_state);
35#endif
36
60935eb2
MB
37/*
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
40 */
41#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42#define DM_COOKIE_LENGTH 24
43
1da177e4
LT
44static const char *_name = DM_NAME;
45
46static unsigned int major = 0;
47static unsigned int _major = 0;
48
d15b774c
AK
49static DEFINE_IDR(_minor_idr);
50
f32c10b0 51static DEFINE_SPINLOCK(_minor_lock);
2c140a24
MP
52
53static void do_deferred_remove(struct work_struct *w);
54
55static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
56
acfe0ad7
MP
57static struct workqueue_struct *deferred_remove_workqueue;
58
1da177e4
LT
59/*
60 * One of these is allocated per bio.
61 */
62struct dm_io {
63 struct mapped_device *md;
64 int error;
1da177e4 65 atomic_t io_count;
6ae2fa67 66 struct bio *bio;
3eaf840e 67 unsigned long start_time;
f88fb981 68 spinlock_t endio_lock;
fd2ed4d2 69 struct dm_stats_aux stats_aux;
1da177e4
LT
70};
71
ba61fdd1
JM
72#define MINOR_ALLOCED ((void *)-1)
73
1da177e4
LT
74/*
75 * Bits for the md->flags field.
76 */
1eb787ec 77#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 78#define DMF_SUSPENDED 1
aa8d7c2f 79#define DMF_FROZEN 2
fba9f90e 80#define DMF_FREEING 3
5c6bd75d 81#define DMF_DELETING 4
2e93ccc1 82#define DMF_NOFLUSH_SUSPENDING 5
8ae12666
KO
83#define DMF_DEFERRED_REMOVE 6
84#define DMF_SUSPENDED_INTERNALLY 7
1da177e4 85
115485e8 86#define DM_NUMA_NODE NUMA_NO_NODE
115485e8 87static int dm_numa_node = DM_NUMA_NODE;
faad87df 88
e6ee8c0b
KU
89/*
90 * For mempools pre-allocation at the table loading time.
91 */
92struct dm_md_mempools {
93 mempool_t *io_pool;
e6ee8c0b
KU
94 struct bio_set *bs;
95};
96
86f1152b
BM
97struct table_device {
98 struct list_head list;
99 atomic_t count;
100 struct dm_dev dm_dev;
101};
102
e18b890b 103static struct kmem_cache *_io_cache;
8fbf26ad 104static struct kmem_cache *_rq_tio_cache;
1ae49ea2 105static struct kmem_cache *_rq_cache;
94818742 106
e8603136
MS
107/*
108 * Bio-based DM's mempools' reserved IOs set by the user.
109 */
4cc96131 110#define RESERVED_BIO_BASED_IOS 16
e8603136
MS
111static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
112
115485e8
MS
113static int __dm_get_module_param_int(int *module_param, int min, int max)
114{
115 int param = ACCESS_ONCE(*module_param);
116 int modified_param = 0;
117 bool modified = true;
118
119 if (param < min)
120 modified_param = min;
121 else if (param > max)
122 modified_param = max;
123 else
124 modified = false;
125
126 if (modified) {
127 (void)cmpxchg(module_param, param, modified_param);
128 param = modified_param;
129 }
130
131 return param;
132}
133
4cc96131
MS
134unsigned __dm_get_module_param(unsigned *module_param,
135 unsigned def, unsigned max)
f4790826 136{
09c2d531
MS
137 unsigned param = ACCESS_ONCE(*module_param);
138 unsigned modified_param = 0;
f4790826 139
09c2d531
MS
140 if (!param)
141 modified_param = def;
142 else if (param > max)
143 modified_param = max;
f4790826 144
09c2d531
MS
145 if (modified_param) {
146 (void)cmpxchg(module_param, param, modified_param);
147 param = modified_param;
f4790826
MS
148 }
149
09c2d531 150 return param;
f4790826
MS
151}
152
e8603136
MS
153unsigned dm_get_reserved_bio_based_ios(void)
154{
09c2d531 155 return __dm_get_module_param(&reserved_bio_based_ios,
4cc96131 156 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
e8603136
MS
157}
158EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
159
115485e8
MS
160static unsigned dm_get_numa_node(void)
161{
162 return __dm_get_module_param_int(&dm_numa_node,
163 DM_NUMA_NODE, num_online_nodes() - 1);
164}
165
1da177e4
LT
166static int __init local_init(void)
167{
51157b4a 168 int r = -ENOMEM;
1da177e4 169
1da177e4 170 /* allocate a slab for the dm_ios */
028867ac 171 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 172 if (!_io_cache)
51157b4a 173 return r;
1da177e4 174
8fbf26ad
KU
175 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
176 if (!_rq_tio_cache)
dba14160 177 goto out_free_io_cache;
8fbf26ad 178
eca7ee6d 179 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
1ae49ea2
MS
180 __alignof__(struct request), 0, NULL);
181 if (!_rq_cache)
182 goto out_free_rq_tio_cache;
183
51e5b2bd 184 r = dm_uevent_init();
51157b4a 185 if (r)
1ae49ea2 186 goto out_free_rq_cache;
51e5b2bd 187
acfe0ad7
MP
188 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
189 if (!deferred_remove_workqueue) {
190 r = -ENOMEM;
191 goto out_uevent_exit;
192 }
193
1da177e4
LT
194 _major = major;
195 r = register_blkdev(_major, _name);
51157b4a 196 if (r < 0)
acfe0ad7 197 goto out_free_workqueue;
1da177e4
LT
198
199 if (!_major)
200 _major = r;
201
202 return 0;
51157b4a 203
acfe0ad7
MP
204out_free_workqueue:
205 destroy_workqueue(deferred_remove_workqueue);
51157b4a
KU
206out_uevent_exit:
207 dm_uevent_exit();
1ae49ea2
MS
208out_free_rq_cache:
209 kmem_cache_destroy(_rq_cache);
8fbf26ad
KU
210out_free_rq_tio_cache:
211 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
212out_free_io_cache:
213 kmem_cache_destroy(_io_cache);
214
215 return r;
1da177e4
LT
216}
217
218static void local_exit(void)
219{
2c140a24 220 flush_scheduled_work();
acfe0ad7 221 destroy_workqueue(deferred_remove_workqueue);
2c140a24 222
1ae49ea2 223 kmem_cache_destroy(_rq_cache);
8fbf26ad 224 kmem_cache_destroy(_rq_tio_cache);
1da177e4 225 kmem_cache_destroy(_io_cache);
00d59405 226 unregister_blkdev(_major, _name);
51e5b2bd 227 dm_uevent_exit();
1da177e4
LT
228
229 _major = 0;
230
231 DMINFO("cleaned up");
232}
233
b9249e55 234static int (*_inits[])(void) __initdata = {
1da177e4
LT
235 local_init,
236 dm_target_init,
237 dm_linear_init,
238 dm_stripe_init,
952b3557 239 dm_io_init,
945fa4d2 240 dm_kcopyd_init,
1da177e4 241 dm_interface_init,
fd2ed4d2 242 dm_statistics_init,
1da177e4
LT
243};
244
b9249e55 245static void (*_exits[])(void) = {
1da177e4
LT
246 local_exit,
247 dm_target_exit,
248 dm_linear_exit,
249 dm_stripe_exit,
952b3557 250 dm_io_exit,
945fa4d2 251 dm_kcopyd_exit,
1da177e4 252 dm_interface_exit,
fd2ed4d2 253 dm_statistics_exit,
1da177e4
LT
254};
255
256static int __init dm_init(void)
257{
258 const int count = ARRAY_SIZE(_inits);
259
260 int r, i;
261
262 for (i = 0; i < count; i++) {
263 r = _inits[i]();
264 if (r)
265 goto bad;
266 }
267
268 return 0;
269
270 bad:
271 while (i--)
272 _exits[i]();
273
274 return r;
275}
276
277static void __exit dm_exit(void)
278{
279 int i = ARRAY_SIZE(_exits);
280
281 while (i--)
282 _exits[i]();
d15b774c
AK
283
284 /*
285 * Should be empty by this point.
286 */
d15b774c 287 idr_destroy(&_minor_idr);
1da177e4
LT
288}
289
290/*
291 * Block device functions
292 */
432a212c
MA
293int dm_deleting_md(struct mapped_device *md)
294{
295 return test_bit(DMF_DELETING, &md->flags);
296}
297
fe5f9f2c 298static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
299{
300 struct mapped_device *md;
301
fba9f90e
JM
302 spin_lock(&_minor_lock);
303
fe5f9f2c 304 md = bdev->bd_disk->private_data;
fba9f90e
JM
305 if (!md)
306 goto out;
307
5c6bd75d 308 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 309 dm_deleting_md(md)) {
fba9f90e
JM
310 md = NULL;
311 goto out;
312 }
313
1da177e4 314 dm_get(md);
5c6bd75d 315 atomic_inc(&md->open_count);
fba9f90e
JM
316out:
317 spin_unlock(&_minor_lock);
318
319 return md ? 0 : -ENXIO;
1da177e4
LT
320}
321
db2a144b 322static void dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 323{
63a4f065 324 struct mapped_device *md;
6e9624b8 325
4a1aeb98
MB
326 spin_lock(&_minor_lock);
327
63a4f065
MS
328 md = disk->private_data;
329 if (WARN_ON(!md))
330 goto out;
331
2c140a24
MP
332 if (atomic_dec_and_test(&md->open_count) &&
333 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
acfe0ad7 334 queue_work(deferred_remove_workqueue, &deferred_remove_work);
2c140a24 335
1da177e4 336 dm_put(md);
63a4f065 337out:
4a1aeb98 338 spin_unlock(&_minor_lock);
1da177e4
LT
339}
340
5c6bd75d
AK
341int dm_open_count(struct mapped_device *md)
342{
343 return atomic_read(&md->open_count);
344}
345
346/*
347 * Guarantees nothing is using the device before it's deleted.
348 */
2c140a24 349int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
5c6bd75d
AK
350{
351 int r = 0;
352
353 spin_lock(&_minor_lock);
354
2c140a24 355 if (dm_open_count(md)) {
5c6bd75d 356 r = -EBUSY;
2c140a24
MP
357 if (mark_deferred)
358 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
359 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
360 r = -EEXIST;
5c6bd75d
AK
361 else
362 set_bit(DMF_DELETING, &md->flags);
363
364 spin_unlock(&_minor_lock);
365
366 return r;
367}
368
2c140a24
MP
369int dm_cancel_deferred_remove(struct mapped_device *md)
370{
371 int r = 0;
372
373 spin_lock(&_minor_lock);
374
375 if (test_bit(DMF_DELETING, &md->flags))
376 r = -EBUSY;
377 else
378 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
379
380 spin_unlock(&_minor_lock);
381
382 return r;
383}
384
385static void do_deferred_remove(struct work_struct *w)
386{
387 dm_deferred_remove();
388}
389
fd2ed4d2
MP
390sector_t dm_get_size(struct mapped_device *md)
391{
392 return get_capacity(md->disk);
393}
394
9974fa2c
MS
395struct request_queue *dm_get_md_queue(struct mapped_device *md)
396{
397 return md->queue;
398}
399
fd2ed4d2
MP
400struct dm_stats *dm_get_stats(struct mapped_device *md)
401{
402 return &md->stats;
403}
404
3ac51e74
DW
405static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
406{
407 struct mapped_device *md = bdev->bd_disk->private_data;
408
409 return dm_get_geometry(md, geo);
410}
411
956a4025
MS
412static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
413 struct block_device **bdev,
414 fmode_t *mode)
aa129a22 415{
66482026 416 struct dm_target *tgt;
6c182cd8 417 struct dm_table *map;
956a4025 418 int srcu_idx, r;
aa129a22 419
6c182cd8 420retry:
e56f81e0 421 r = -ENOTTY;
956a4025 422 map = dm_get_live_table(md, &srcu_idx);
aa129a22
MB
423 if (!map || !dm_table_get_size(map))
424 goto out;
425
426 /* We only support devices that have a single target */
427 if (dm_table_get_num_targets(map) != 1)
428 goto out;
429
66482026
MS
430 tgt = dm_table_get_target(map, 0);
431 if (!tgt->type->prepare_ioctl)
4d341d82 432 goto out;
aa129a22 433
4f186f8b 434 if (dm_suspended_md(md)) {
aa129a22
MB
435 r = -EAGAIN;
436 goto out;
437 }
438
66482026 439 r = tgt->type->prepare_ioctl(tgt, bdev, mode);
e56f81e0
CH
440 if (r < 0)
441 goto out;
aa129a22 442
956a4025
MS
443 bdgrab(*bdev);
444 dm_put_live_table(md, srcu_idx);
e56f81e0 445 return r;
aa129a22 446
aa129a22 447out:
956a4025 448 dm_put_live_table(md, srcu_idx);
5bbbfdf6 449 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
6c182cd8
HR
450 msleep(10);
451 goto retry;
452 }
e56f81e0
CH
453 return r;
454}
455
456static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
457 unsigned int cmd, unsigned long arg)
458{
459 struct mapped_device *md = bdev->bd_disk->private_data;
956a4025 460 int r;
e56f81e0 461
956a4025 462 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
e56f81e0
CH
463 if (r < 0)
464 return r;
6c182cd8 465
e56f81e0
CH
466 if (r > 0) {
467 /*
e980f623
CH
468 * Target determined this ioctl is being issued against a
469 * subset of the parent bdev; require extra privileges.
e56f81e0 470 */
e980f623
CH
471 if (!capable(CAP_SYS_RAWIO)) {
472 DMWARN_LIMIT(
473 "%s: sending ioctl %x to DM device without required privilege.",
474 current->comm, cmd);
475 r = -ENOIOCTLCMD;
e56f81e0 476 goto out;
e980f623 477 }
e56f81e0 478 }
6c182cd8 479
66482026 480 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
e56f81e0 481out:
956a4025 482 bdput(bdev);
aa129a22
MB
483 return r;
484}
485
028867ac 486static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
487{
488 return mempool_alloc(md->io_pool, GFP_NOIO);
489}
490
028867ac 491static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
492{
493 mempool_free(io, md->io_pool);
494}
495
cfae7529 496static void free_tio(struct dm_target_io *tio)
1da177e4 497{
dba14160 498 bio_put(&tio->clone);
1da177e4
LT
499}
500
4cc96131 501int md_in_flight(struct mapped_device *md)
90abb8c4
KU
502{
503 return atomic_read(&md->pending[READ]) +
504 atomic_read(&md->pending[WRITE]);
505}
506
3eaf840e
JNN
507static void start_io_acct(struct dm_io *io)
508{
509 struct mapped_device *md = io->md;
fd2ed4d2 510 struct bio *bio = io->bio;
c9959059 511 int cpu;
fd2ed4d2 512 int rw = bio_data_dir(bio);
3eaf840e
JNN
513
514 io->start_time = jiffies;
515
074a7aca
TH
516 cpu = part_stat_lock();
517 part_round_stats(cpu, &dm_disk(md)->part0);
518 part_stat_unlock();
1e9bb880
SL
519 atomic_set(&dm_disk(md)->part0.in_flight[rw],
520 atomic_inc_return(&md->pending[rw]));
fd2ed4d2
MP
521
522 if (unlikely(dm_stats_used(&md->stats)))
528ec5ab
MC
523 dm_stats_account_io(&md->stats, bio_data_dir(bio),
524 bio->bi_iter.bi_sector, bio_sectors(bio),
525 false, 0, &io->stats_aux);
3eaf840e
JNN
526}
527
d221d2e7 528static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
529{
530 struct mapped_device *md = io->md;
531 struct bio *bio = io->bio;
532 unsigned long duration = jiffies - io->start_time;
18c0b223 533 int pending;
3eaf840e
JNN
534 int rw = bio_data_dir(bio);
535
18c0b223 536 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
3eaf840e 537
fd2ed4d2 538 if (unlikely(dm_stats_used(&md->stats)))
528ec5ab
MC
539 dm_stats_account_io(&md->stats, bio_data_dir(bio),
540 bio->bi_iter.bi_sector, bio_sectors(bio),
541 true, duration, &io->stats_aux);
fd2ed4d2 542
af7e466a
MP
543 /*
544 * After this is decremented the bio must not be touched if it is
d87f4c14 545 * a flush.
af7e466a 546 */
1e9bb880
SL
547 pending = atomic_dec_return(&md->pending[rw]);
548 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
316d315b 549 pending += atomic_read(&md->pending[rw^0x1]);
3eaf840e 550
d221d2e7
MP
551 /* nudge anyone waiting on suspend queue */
552 if (!pending)
553 wake_up(&md->wait);
3eaf840e
JNN
554}
555
1da177e4
LT
556/*
557 * Add the bio to the list of deferred io.
558 */
92c63902 559static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 560{
05447420 561 unsigned long flags;
1da177e4 562
05447420 563 spin_lock_irqsave(&md->deferred_lock, flags);
1da177e4 564 bio_list_add(&md->deferred, bio);
05447420 565 spin_unlock_irqrestore(&md->deferred_lock, flags);
6a8736d1 566 queue_work(md->wq, &md->work);
1da177e4
LT
567}
568
569/*
570 * Everyone (including functions in this file), should use this
571 * function to access the md->map field, and make sure they call
83d5e5b0 572 * dm_put_live_table() when finished.
1da177e4 573 */
83d5e5b0 574struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
1da177e4 575{
83d5e5b0
MP
576 *srcu_idx = srcu_read_lock(&md->io_barrier);
577
578 return srcu_dereference(md->map, &md->io_barrier);
579}
1da177e4 580
83d5e5b0
MP
581void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
582{
583 srcu_read_unlock(&md->io_barrier, srcu_idx);
584}
585
586void dm_sync_table(struct mapped_device *md)
587{
588 synchronize_srcu(&md->io_barrier);
589 synchronize_rcu_expedited();
590}
591
592/*
593 * A fast alternative to dm_get_live_table/dm_put_live_table.
594 * The caller must not block between these two functions.
595 */
596static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
597{
598 rcu_read_lock();
599 return rcu_dereference(md->map);
600}
1da177e4 601
83d5e5b0
MP
602static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
603{
604 rcu_read_unlock();
1da177e4
LT
605}
606
86f1152b
BM
607/*
608 * Open a table device so we can use it as a map destination.
609 */
610static int open_table_device(struct table_device *td, dev_t dev,
611 struct mapped_device *md)
612{
613 static char *_claim_ptr = "I belong to device-mapper";
614 struct block_device *bdev;
615
616 int r;
617
618 BUG_ON(td->dm_dev.bdev);
619
620 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
621 if (IS_ERR(bdev))
622 return PTR_ERR(bdev);
623
624 r = bd_link_disk_holder(bdev, dm_disk(md));
625 if (r) {
626 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
627 return r;
628 }
629
630 td->dm_dev.bdev = bdev;
631 return 0;
632}
633
634/*
635 * Close a table device that we've been using.
636 */
637static void close_table_device(struct table_device *td, struct mapped_device *md)
638{
639 if (!td->dm_dev.bdev)
640 return;
641
642 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
643 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
644 td->dm_dev.bdev = NULL;
645}
646
647static struct table_device *find_table_device(struct list_head *l, dev_t dev,
648 fmode_t mode) {
649 struct table_device *td;
650
651 list_for_each_entry(td, l, list)
652 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
653 return td;
654
655 return NULL;
656}
657
658int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
659 struct dm_dev **result) {
660 int r;
661 struct table_device *td;
662
663 mutex_lock(&md->table_devices_lock);
664 td = find_table_device(&md->table_devices, dev, mode);
665 if (!td) {
115485e8 666 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
86f1152b
BM
667 if (!td) {
668 mutex_unlock(&md->table_devices_lock);
669 return -ENOMEM;
670 }
671
672 td->dm_dev.mode = mode;
673 td->dm_dev.bdev = NULL;
674
675 if ((r = open_table_device(td, dev, md))) {
676 mutex_unlock(&md->table_devices_lock);
677 kfree(td);
678 return r;
679 }
680
681 format_dev_t(td->dm_dev.name, dev);
682
683 atomic_set(&td->count, 0);
684 list_add(&td->list, &md->table_devices);
685 }
686 atomic_inc(&td->count);
687 mutex_unlock(&md->table_devices_lock);
688
689 *result = &td->dm_dev;
690 return 0;
691}
692EXPORT_SYMBOL_GPL(dm_get_table_device);
693
694void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
695{
696 struct table_device *td = container_of(d, struct table_device, dm_dev);
697
698 mutex_lock(&md->table_devices_lock);
699 if (atomic_dec_and_test(&td->count)) {
700 close_table_device(td, md);
701 list_del(&td->list);
702 kfree(td);
703 }
704 mutex_unlock(&md->table_devices_lock);
705}
706EXPORT_SYMBOL(dm_put_table_device);
707
708static void free_table_devices(struct list_head *devices)
709{
710 struct list_head *tmp, *next;
711
712 list_for_each_safe(tmp, next, devices) {
713 struct table_device *td = list_entry(tmp, struct table_device, list);
714
715 DMWARN("dm_destroy: %s still exists with %d references",
716 td->dm_dev.name, atomic_read(&td->count));
717 kfree(td);
718 }
719}
720
3ac51e74
DW
721/*
722 * Get the geometry associated with a dm device
723 */
724int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
725{
726 *geo = md->geometry;
727
728 return 0;
729}
730
731/*
732 * Set the geometry of a device.
733 */
734int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
735{
736 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
737
738 if (geo->start > sz) {
739 DMWARN("Start sector is beyond the geometry limits.");
740 return -EINVAL;
741 }
742
743 md->geometry = *geo;
744
745 return 0;
746}
747
1da177e4
LT
748/*-----------------------------------------------------------------
749 * CRUD START:
750 * A more elegant soln is in the works that uses the queue
751 * merge fn, unfortunately there are a couple of changes to
752 * the block layer that I want to make for this. So in the
753 * interests of getting something for people to use I give
754 * you this clearly demarcated crap.
755 *---------------------------------------------------------------*/
756
2e93ccc1
KU
757static int __noflush_suspending(struct mapped_device *md)
758{
759 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
760}
761
1da177e4
LT
762/*
763 * Decrements the number of outstanding ios that a bio has been
764 * cloned into, completing the original io if necc.
765 */
858119e1 766static void dec_pending(struct dm_io *io, int error)
1da177e4 767{
2e93ccc1 768 unsigned long flags;
b35f8caa
MB
769 int io_error;
770 struct bio *bio;
771 struct mapped_device *md = io->md;
2e93ccc1
KU
772
773 /* Push-back supersedes any I/O errors */
f88fb981
KU
774 if (unlikely(error)) {
775 spin_lock_irqsave(&io->endio_lock, flags);
776 if (!(io->error > 0 && __noflush_suspending(md)))
777 io->error = error;
778 spin_unlock_irqrestore(&io->endio_lock, flags);
779 }
1da177e4
LT
780
781 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
782 if (io->error == DM_ENDIO_REQUEUE) {
783 /*
784 * Target requested pushing back the I/O.
2e93ccc1 785 */
022c2611 786 spin_lock_irqsave(&md->deferred_lock, flags);
6a8736d1
TH
787 if (__noflush_suspending(md))
788 bio_list_add_head(&md->deferred, io->bio);
789 else
2e93ccc1
KU
790 /* noflush suspend was interrupted. */
791 io->error = -EIO;
022c2611 792 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
793 }
794
b35f8caa
MB
795 io_error = io->error;
796 bio = io->bio;
6a8736d1
TH
797 end_io_acct(io);
798 free_io(md, io);
799
800 if (io_error == DM_ENDIO_REQUEUE)
801 return;
2e93ccc1 802
1eff9d32 803 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
af7e466a 804 /*
6a8736d1 805 * Preflush done for flush with data, reissue
28a8f0d3 806 * without REQ_PREFLUSH.
af7e466a 807 */
1eff9d32 808 bio->bi_opf &= ~REQ_PREFLUSH;
6a8736d1 809 queue_io(md, bio);
af7e466a 810 } else {
b372d360 811 /* done with normal IO or empty flush */
0a82a8d1 812 trace_block_bio_complete(md->queue, bio, io_error);
4246a0b6
CH
813 bio->bi_error = io_error;
814 bio_endio(bio);
b35f8caa 815 }
1da177e4
LT
816 }
817}
818
4cc96131 819void disable_write_same(struct mapped_device *md)
7eee4ae2
MS
820{
821 struct queue_limits *limits = dm_get_queue_limits(md);
822
823 /* device doesn't really support WRITE SAME, disable it */
824 limits->max_write_same_sectors = 0;
825}
826
4246a0b6 827static void clone_endio(struct bio *bio)
1da177e4 828{
4246a0b6 829 int error = bio->bi_error;
5164bece 830 int r = error;
bfc6d41c 831 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
b35f8caa 832 struct dm_io *io = tio->io;
9faf400f 833 struct mapped_device *md = tio->io->md;
1da177e4
LT
834 dm_endio_fn endio = tio->ti->type->end_io;
835
1da177e4 836 if (endio) {
7de3ee57 837 r = endio(tio->ti, bio, error);
2e93ccc1
KU
838 if (r < 0 || r == DM_ENDIO_REQUEUE)
839 /*
840 * error and requeue request are handled
841 * in dec_pending().
842 */
1da177e4 843 error = r;
45cbcd79
KU
844 else if (r == DM_ENDIO_INCOMPLETE)
845 /* The target will handle the io */
6712ecf8 846 return;
45cbcd79
KU
847 else if (r) {
848 DMWARN("unimplemented target endio return value: %d", r);
849 BUG();
850 }
1da177e4
LT
851 }
852
e6047149 853 if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
7eee4ae2
MS
854 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
855 disable_write_same(md);
856
cfae7529 857 free_tio(tio);
b35f8caa 858 dec_pending(io, error);
1da177e4
LT
859}
860
56a67df7
MS
861/*
862 * Return maximum size of I/O possible at the supplied sector up to the current
863 * target boundary.
864 */
865static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
866{
867 sector_t target_offset = dm_target_offset(ti, sector);
868
869 return ti->len - target_offset;
870}
871
872static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1da177e4 873{
56a67df7 874 sector_t len = max_io_len_target_boundary(sector, ti);
542f9038 875 sector_t offset, max_len;
1da177e4
LT
876
877 /*
542f9038 878 * Does the target need to split even further?
1da177e4 879 */
542f9038
MS
880 if (ti->max_io_len) {
881 offset = dm_target_offset(ti, sector);
882 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
883 max_len = sector_div(offset, ti->max_io_len);
884 else
885 max_len = offset & (ti->max_io_len - 1);
886 max_len = ti->max_io_len - max_len;
887
888 if (len > max_len)
889 len = max_len;
1da177e4
LT
890 }
891
892 return len;
893}
894
542f9038
MS
895int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
896{
897 if (len > UINT_MAX) {
898 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
899 (unsigned long long)len, UINT_MAX);
900 ti->error = "Maximum size of target IO is too large";
901 return -EINVAL;
902 }
903
904 ti->max_io_len = (uint32_t) len;
905
906 return 0;
907}
908EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
909
545ed20e 910static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
f0c98ebc 911 void **kaddr, pfn_t *pfn, long size)
545ed20e
TK
912{
913 struct mapped_device *md = bdev->bd_disk->private_data;
914 struct dm_table *map;
915 struct dm_target *ti;
916 int srcu_idx;
917 long len, ret = -EIO;
918
919 map = dm_get_live_table(md, &srcu_idx);
920 if (!map)
921 goto out;
922
923 ti = dm_table_find_target(map, sector);
924 if (!dm_target_is_valid(ti))
925 goto out;
926
927 len = max_io_len(sector, ti) << SECTOR_SHIFT;
928 size = min(len, size);
929
930 if (ti->type->direct_access)
931 ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
932out:
933 dm_put_live_table(md, srcu_idx);
934 return min(ret, size);
935}
936
1dd40c3e
MP
937/*
938 * A target may call dm_accept_partial_bio only from the map routine. It is
28a8f0d3 939 * allowed for all bio types except REQ_PREFLUSH.
1dd40c3e
MP
940 *
941 * dm_accept_partial_bio informs the dm that the target only wants to process
942 * additional n_sectors sectors of the bio and the rest of the data should be
943 * sent in a next bio.
944 *
945 * A diagram that explains the arithmetics:
946 * +--------------------+---------------+-------+
947 * | 1 | 2 | 3 |
948 * +--------------------+---------------+-------+
949 *
950 * <-------------- *tio->len_ptr --------------->
951 * <------- bi_size ------->
952 * <-- n_sectors -->
953 *
954 * Region 1 was already iterated over with bio_advance or similar function.
955 * (it may be empty if the target doesn't use bio_advance)
956 * Region 2 is the remaining bio size that the target wants to process.
957 * (it may be empty if region 1 is non-empty, although there is no reason
958 * to make it empty)
959 * The target requires that region 3 is to be sent in the next bio.
960 *
961 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
962 * the partially processed part (the sum of regions 1+2) must be the same for all
963 * copies of the bio.
964 */
965void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
966{
967 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
968 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1eff9d32 969 BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1dd40c3e
MP
970 BUG_ON(bi_size > *tio->len_ptr);
971 BUG_ON(n_sectors > bi_size);
972 *tio->len_ptr -= bi_size - n_sectors;
973 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
974}
975EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
976
d67a5f4b
MP
977/*
978 * Flush current->bio_list when the target map method blocks.
979 * This fixes deadlocks in snapshot and possibly in other targets.
980 */
981struct dm_offload {
982 struct blk_plug plug;
983 struct blk_plug_cb cb;
984};
985
986static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
987{
988 struct dm_offload *o = container_of(cb, struct dm_offload, cb);
989 struct bio_list list;
990 struct bio *bio;
991
992 INIT_LIST_HEAD(&o->cb.list);
993
994 if (unlikely(!current->bio_list))
995 return;
996
997 list = *current->bio_list;
998 bio_list_init(current->bio_list);
999
1000 while ((bio = bio_list_pop(&list))) {
1001 struct bio_set *bs = bio->bi_pool;
1002 if (unlikely(!bs) || bs == fs_bio_set) {
1003 bio_list_add(current->bio_list, bio);
1004 continue;
1005 }
1006
1007 spin_lock(&bs->rescue_lock);
1008 bio_list_add(&bs->rescue_list, bio);
1009 queue_work(bs->rescue_workqueue, &bs->rescue_work);
1010 spin_unlock(&bs->rescue_lock);
1011 }
1012}
1013
1014static void dm_offload_start(struct dm_offload *o)
1015{
1016 blk_start_plug(&o->plug);
1017 o->cb.callback = flush_current_bio_list;
1018 list_add(&o->cb.list, &current->plug->cb_list);
1019}
1020
1021static void dm_offload_end(struct dm_offload *o)
1022{
1023 list_del(&o->cb.list);
1024 blk_finish_plug(&o->plug);
1025}
1026
bd2a49b8 1027static void __map_bio(struct dm_target_io *tio)
1da177e4
LT
1028{
1029 int r;
2056a782 1030 sector_t sector;
d67a5f4b 1031 struct dm_offload o;
dba14160 1032 struct bio *clone = &tio->clone;
bd2a49b8 1033 struct dm_target *ti = tio->ti;
1da177e4 1034
1da177e4 1035 clone->bi_end_io = clone_endio;
1da177e4
LT
1036
1037 /*
1038 * Map the clone. If r == 0 we don't need to do
1039 * anything, the target has assumed ownership of
1040 * this io.
1041 */
1042 atomic_inc(&tio->io->io_count);
4f024f37 1043 sector = clone->bi_iter.bi_sector;
d67a5f4b
MP
1044
1045 dm_offload_start(&o);
7de3ee57 1046 r = ti->type->map(ti, clone);
d67a5f4b
MP
1047 dm_offload_end(&o);
1048
45cbcd79 1049 if (r == DM_MAPIO_REMAPPED) {
1da177e4 1050 /* the bio has been remapped so dispatch it */
2056a782 1051
d07335e5
MS
1052 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1053 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 1054
1da177e4 1055 generic_make_request(clone);
2e93ccc1
KU
1056 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1057 /* error the io and bail out, or requeue it if needed */
9faf400f 1058 dec_pending(tio->io, r);
cfae7529 1059 free_tio(tio);
ab37844d 1060 } else if (r != DM_MAPIO_SUBMITTED) {
45cbcd79
KU
1061 DMWARN("unimplemented target map return value: %d", r);
1062 BUG();
1da177e4
LT
1063 }
1064}
1065
1066struct clone_info {
1067 struct mapped_device *md;
1068 struct dm_table *map;
1069 struct bio *bio;
1070 struct dm_io *io;
1071 sector_t sector;
e0d6609a 1072 unsigned sector_count;
1da177e4
LT
1073};
1074
e0d6609a 1075static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
bd2a49b8 1076{
4f024f37
KO
1077 bio->bi_iter.bi_sector = sector;
1078 bio->bi_iter.bi_size = to_bytes(len);
1da177e4
LT
1079}
1080
1081/*
1082 * Creates a bio that consists of range of complete bvecs.
1083 */
c80914e8
MS
1084static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1085 sector_t sector, unsigned len)
1da177e4 1086{
dba14160 1087 struct bio *clone = &tio->clone;
1da177e4 1088
1c3b13e6
KO
1089 __bio_clone_fast(clone, bio);
1090
c80914e8
MS
1091 if (bio_integrity(bio)) {
1092 int r = bio_integrity_clone(clone, bio, GFP_NOIO);
1093 if (r < 0)
1094 return r;
1095 }
bd2a49b8 1096
1c3b13e6
KO
1097 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1098 clone->bi_iter.bi_size = to_bytes(len);
1099
1100 if (bio_integrity(bio))
1101 bio_integrity_trim(clone, 0, len);
c80914e8
MS
1102
1103 return 0;
1da177e4
LT
1104}
1105
9015df24 1106static struct dm_target_io *alloc_tio(struct clone_info *ci,
99778273 1107 struct dm_target *ti,
55a62eef 1108 unsigned target_bio_nr)
f9ab94ce 1109{
dba14160
MP
1110 struct dm_target_io *tio;
1111 struct bio *clone;
1112
99778273 1113 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
dba14160 1114 tio = container_of(clone, struct dm_target_io, clone);
f9ab94ce
MP
1115
1116 tio->io = ci->io;
1117 tio->ti = ti;
55a62eef 1118 tio->target_bio_nr = target_bio_nr;
9015df24
AK
1119
1120 return tio;
1121}
1122
14fe594d
AK
1123static void __clone_and_map_simple_bio(struct clone_info *ci,
1124 struct dm_target *ti,
1dd40c3e 1125 unsigned target_bio_nr, unsigned *len)
9015df24 1126{
99778273 1127 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
dba14160 1128 struct bio *clone = &tio->clone;
9015df24 1129
1dd40c3e
MP
1130 tio->len_ptr = len;
1131
99778273 1132 __bio_clone_fast(clone, ci->bio);
bd2a49b8 1133 if (len)
1dd40c3e 1134 bio_setup_sector(clone, ci->sector, *len);
f9ab94ce 1135
bd2a49b8 1136 __map_bio(tio);
f9ab94ce
MP
1137}
1138
14fe594d 1139static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1140 unsigned num_bios, unsigned *len)
06a426ce 1141{
55a62eef 1142 unsigned target_bio_nr;
06a426ce 1143
55a62eef 1144 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
14fe594d 1145 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
06a426ce
MS
1146}
1147
14fe594d 1148static int __send_empty_flush(struct clone_info *ci)
f9ab94ce 1149{
06a426ce 1150 unsigned target_nr = 0;
f9ab94ce
MP
1151 struct dm_target *ti;
1152
b372d360 1153 BUG_ON(bio_has_data(ci->bio));
f9ab94ce 1154 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1dd40c3e 1155 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
f9ab94ce 1156
f9ab94ce
MP
1157 return 0;
1158}
1159
c80914e8 1160static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1161 sector_t sector, unsigned *len)
5ae89a87 1162{
dba14160 1163 struct bio *bio = ci->bio;
5ae89a87 1164 struct dm_target_io *tio;
b0d8ed4d
AK
1165 unsigned target_bio_nr;
1166 unsigned num_target_bios = 1;
c80914e8 1167 int r = 0;
5ae89a87 1168
b0d8ed4d
AK
1169 /*
1170 * Does the target want to receive duplicate copies of the bio?
1171 */
1172 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1173 num_target_bios = ti->num_write_bios(ti, bio);
e4c93811 1174
b0d8ed4d 1175 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
99778273 1176 tio = alloc_tio(ci, ti, target_bio_nr);
1dd40c3e 1177 tio->len_ptr = len;
c80914e8 1178 r = clone_bio(tio, bio, sector, *len);
072623de 1179 if (r < 0) {
cfae7529 1180 free_tio(tio);
c80914e8 1181 break;
072623de 1182 }
b0d8ed4d
AK
1183 __map_bio(tio);
1184 }
c80914e8
MS
1185
1186 return r;
5ae89a87
MS
1187}
1188
55a62eef 1189typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
23508a96 1190
55a62eef 1191static unsigned get_num_discard_bios(struct dm_target *ti)
23508a96 1192{
55a62eef 1193 return ti->num_discard_bios;
23508a96
MS
1194}
1195
55a62eef 1196static unsigned get_num_write_same_bios(struct dm_target *ti)
23508a96 1197{
55a62eef 1198 return ti->num_write_same_bios;
23508a96
MS
1199}
1200
1201typedef bool (*is_split_required_fn)(struct dm_target *ti);
9eef87da 1202
23508a96
MS
1203static bool is_split_required_for_discard(struct dm_target *ti)
1204{
55a62eef 1205 return ti->split_discard_bios;
cec47e3d
KU
1206}
1207
14fe594d
AK
1208static int __send_changing_extent_only(struct clone_info *ci,
1209 get_num_bios_fn get_num_bios,
1210 is_split_required_fn is_split_required)
ba1cbad9 1211{
5ae89a87 1212 struct dm_target *ti;
e0d6609a 1213 unsigned len;
55a62eef 1214 unsigned num_bios;
ba1cbad9 1215
a79245b3
MS
1216 do {
1217 ti = dm_table_find_target(ci->map, ci->sector);
1218 if (!dm_target_is_valid(ti))
1219 return -EIO;
2eb6e1e3 1220
5ae89a87 1221 /*
23508a96
MS
1222 * Even though the device advertised support for this type of
1223 * request, that does not mean every target supports it, and
936688d7 1224 * reconfiguration might also have changed that since the
a79245b3 1225 * check was performed.
5ae89a87 1226 */
55a62eef
AK
1227 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1228 if (!num_bios)
a79245b3 1229 return -EOPNOTSUPP;
ba1cbad9 1230
23508a96 1231 if (is_split_required && !is_split_required(ti))
e0d6609a 1232 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
7acf0277 1233 else
e0d6609a 1234 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
de3ec86d 1235
1dd40c3e 1236 __send_duplicate_bios(ci, ti, num_bios, &len);
e262f347 1237
a79245b3
MS
1238 ci->sector += len;
1239 } while (ci->sector_count -= len);
5ae89a87
MS
1240
1241 return 0;
ba1cbad9
MS
1242}
1243
14fe594d 1244static int __send_discard(struct clone_info *ci)
23508a96 1245{
14fe594d
AK
1246 return __send_changing_extent_only(ci, get_num_discard_bios,
1247 is_split_required_for_discard);
23508a96 1248}
0ce65797 1249
14fe594d 1250static int __send_write_same(struct clone_info *ci)
0ce65797 1251{
14fe594d 1252 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
0ce65797
MS
1253}
1254
e4c93811
AK
1255/*
1256 * Select the correct strategy for processing a non-flush bio.
1257 */
14fe594d 1258static int __split_and_process_non_flush(struct clone_info *ci)
0ce65797 1259{
dba14160 1260 struct bio *bio = ci->bio;
512875bd 1261 struct dm_target *ti;
1c3b13e6 1262 unsigned len;
c80914e8 1263 int r;
0ce65797 1264
e6047149 1265 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
14fe594d 1266 return __send_discard(ci);
e6047149 1267 else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
14fe594d 1268 return __send_write_same(ci);
0ce65797 1269
512875bd
JN
1270 ti = dm_table_find_target(ci->map, ci->sector);
1271 if (!dm_target_is_valid(ti))
1272 return -EIO;
1273
1c3b13e6 1274 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
0ce65797 1275
c80914e8
MS
1276 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1277 if (r < 0)
1278 return r;
0ce65797 1279
1c3b13e6
KO
1280 ci->sector += len;
1281 ci->sector_count -= len;
0ce65797 1282
1c3b13e6 1283 return 0;
0ce65797
MS
1284}
1285
1da177e4 1286/*
14fe594d 1287 * Entry point to split a bio into clones and submit them to the targets.
1da177e4 1288 */
83d5e5b0
MP
1289static void __split_and_process_bio(struct mapped_device *md,
1290 struct dm_table *map, struct bio *bio)
0ce65797 1291{
1da177e4 1292 struct clone_info ci;
512875bd 1293 int error = 0;
1da177e4 1294
83d5e5b0 1295 if (unlikely(!map)) {
6a8736d1 1296 bio_io_error(bio);
f0b9a450
MP
1297 return;
1298 }
692d0eb9 1299
83d5e5b0 1300 ci.map = map;
1da177e4 1301 ci.md = md;
1da177e4
LT
1302 ci.io = alloc_io(md);
1303 ci.io->error = 0;
1304 atomic_set(&ci.io->io_count, 1);
1305 ci.io->bio = bio;
1306 ci.io->md = md;
f88fb981 1307 spin_lock_init(&ci.io->endio_lock);
4f024f37 1308 ci.sector = bio->bi_iter.bi_sector;
0ce65797 1309
3eaf840e 1310 start_io_acct(ci.io);
0ce65797 1311
1eff9d32 1312 if (bio->bi_opf & REQ_PREFLUSH) {
b372d360
MS
1313 ci.bio = &ci.md->flush_bio;
1314 ci.sector_count = 0;
14fe594d 1315 error = __send_empty_flush(&ci);
b372d360
MS
1316 /* dec_pending submits any data associated with flush */
1317 } else {
6a8736d1 1318 ci.bio = bio;
d87f4c14 1319 ci.sector_count = bio_sectors(bio);
b372d360 1320 while (ci.sector_count && !error)
14fe594d 1321 error = __split_and_process_non_flush(&ci);
d87f4c14 1322 }
0ce65797 1323
1da177e4 1324 /* drop the extra reference count */
512875bd 1325 dec_pending(ci.io, error);
0ce65797 1326}
1da177e4
LT
1327/*-----------------------------------------------------------------
1328 * CRUD END
1329 *---------------------------------------------------------------*/
0ce65797 1330
cec47e3d 1331/*
1da177e4
LT
1332 * The request function that just remaps the bio built up by
1333 * dm_merge_bvec.
cec47e3d 1334 */
dece1635 1335static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
cec47e3d 1336{
12f03a49 1337 int rw = bio_data_dir(bio);
cec47e3d 1338 struct mapped_device *md = q->queuedata;
83d5e5b0
MP
1339 int srcu_idx;
1340 struct dm_table *map;
cec47e3d 1341
83d5e5b0 1342 map = dm_get_live_table(md, &srcu_idx);
29e4013d 1343
18c0b223 1344 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
d0bcb878 1345
6a8736d1
TH
1346 /* if we're suspended, we have to queue this io for later */
1347 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
83d5e5b0 1348 dm_put_live_table(md, srcu_idx);
9eef87da 1349
1eff9d32 1350 if (!(bio->bi_opf & REQ_RAHEAD))
6a8736d1
TH
1351 queue_io(md, bio);
1352 else
54d9a1b4 1353 bio_io_error(bio);
dece1635 1354 return BLK_QC_T_NONE;
cec47e3d 1355 }
1da177e4 1356
83d5e5b0
MP
1357 __split_and_process_bio(md, map, bio);
1358 dm_put_live_table(md, srcu_idx);
dece1635 1359 return BLK_QC_T_NONE;
cec47e3d
KU
1360}
1361
1da177e4
LT
1362static int dm_any_congested(void *congested_data, int bdi_bits)
1363{
8a57dfc6
CS
1364 int r = bdi_bits;
1365 struct mapped_device *md = congested_data;
1366 struct dm_table *map;
1da177e4 1367
1eb787ec 1368 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
e522c039 1369 if (dm_request_based(md)) {
cec47e3d 1370 /*
e522c039
MS
1371 * With request-based DM we only need to check the
1372 * top-level queue for congestion.
cec47e3d 1373 */
dc3b17cc 1374 r = md->queue->backing_dev_info->wb.state & bdi_bits;
e522c039
MS
1375 } else {
1376 map = dm_get_live_table_fast(md);
1377 if (map)
cec47e3d 1378 r = dm_table_any_congested(map, bdi_bits);
e522c039 1379 dm_put_live_table_fast(md);
8a57dfc6
CS
1380 }
1381 }
1382
1da177e4
LT
1383 return r;
1384}
1385
1386/*-----------------------------------------------------------------
1387 * An IDR is used to keep track of allocated minor numbers.
1388 *---------------------------------------------------------------*/
2b06cfff 1389static void free_minor(int minor)
1da177e4 1390{
f32c10b0 1391 spin_lock(&_minor_lock);
1da177e4 1392 idr_remove(&_minor_idr, minor);
f32c10b0 1393 spin_unlock(&_minor_lock);
1da177e4
LT
1394}
1395
1396/*
1397 * See if the device with a specific minor # is free.
1398 */
cf13ab8e 1399static int specific_minor(int minor)
1da177e4 1400{
c9d76be6 1401 int r;
1da177e4
LT
1402
1403 if (minor >= (1 << MINORBITS))
1404 return -EINVAL;
1405
c9d76be6 1406 idr_preload(GFP_KERNEL);
f32c10b0 1407 spin_lock(&_minor_lock);
1da177e4 1408
c9d76be6 1409 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1da177e4 1410
f32c10b0 1411 spin_unlock(&_minor_lock);
c9d76be6
TH
1412 idr_preload_end();
1413 if (r < 0)
1414 return r == -ENOSPC ? -EBUSY : r;
1415 return 0;
1da177e4
LT
1416}
1417
cf13ab8e 1418static int next_free_minor(int *minor)
1da177e4 1419{
c9d76be6 1420 int r;
62f75c2f 1421
c9d76be6 1422 idr_preload(GFP_KERNEL);
f32c10b0 1423 spin_lock(&_minor_lock);
1da177e4 1424
c9d76be6 1425 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1da177e4 1426
f32c10b0 1427 spin_unlock(&_minor_lock);
c9d76be6
TH
1428 idr_preload_end();
1429 if (r < 0)
1430 return r;
1431 *minor = r;
1432 return 0;
1da177e4
LT
1433}
1434
83d5cde4 1435static const struct block_device_operations dm_blk_dops;
1da177e4 1436
53d5914f
MP
1437static void dm_wq_work(struct work_struct *work);
1438
4cc96131 1439void dm_init_md_queue(struct mapped_device *md)
4a0b4ddf
MS
1440{
1441 /*
1442 * Request-based dm devices cannot be stacked on top of bio-based dm
bfebd1cd 1443 * devices. The type of this dm device may not have been decided yet.
4a0b4ddf
MS
1444 * The type is decided at the first table loading time.
1445 * To prevent problematic device stacking, clear the queue flag
1446 * for request stacking support until then.
1447 *
1448 * This queue is new, so no concurrency on the queue_flags.
1449 */
1450 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
ad5f498f
MP
1451
1452 /*
1453 * Initialize data that will only be used by a non-blk-mq DM queue
1454 * - must do so here (in alloc_dev callchain) before queue is used
1455 */
1456 md->queue->queuedata = md;
dc3b17cc 1457 md->queue->backing_dev_info->congested_data = md;
bfebd1cd 1458}
4a0b4ddf 1459
4cc96131 1460void dm_init_normal_md_queue(struct mapped_device *md)
bfebd1cd 1461{
17e149b8 1462 md->use_blk_mq = false;
bfebd1cd
MS
1463 dm_init_md_queue(md);
1464
1465 /*
1466 * Initialize aspects of queue that aren't relevant for blk-mq
1467 */
dc3b17cc 1468 md->queue->backing_dev_info->congested_fn = dm_any_congested;
4a0b4ddf 1469 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
4a0b4ddf
MS
1470}
1471
0f20972f
MS
1472static void cleanup_mapped_device(struct mapped_device *md)
1473{
0f20972f
MS
1474 if (md->wq)
1475 destroy_workqueue(md->wq);
1476 if (md->kworker_task)
1477 kthread_stop(md->kworker_task);
6f65985e 1478 mempool_destroy(md->io_pool);
0f20972f
MS
1479 if (md->bs)
1480 bioset_free(md->bs);
1481
1482 if (md->disk) {
1483 spin_lock(&_minor_lock);
1484 md->disk->private_data = NULL;
1485 spin_unlock(&_minor_lock);
0f20972f
MS
1486 del_gendisk(md->disk);
1487 put_disk(md->disk);
1488 }
1489
1490 if (md->queue)
1491 blk_cleanup_queue(md->queue);
1492
d09960b0
TE
1493 cleanup_srcu_struct(&md->io_barrier);
1494
0f20972f
MS
1495 if (md->bdev) {
1496 bdput(md->bdev);
1497 md->bdev = NULL;
1498 }
4cc96131
MS
1499
1500 dm_mq_cleanup_mapped_device(md);
0f20972f
MS
1501}
1502
1da177e4
LT
1503/*
1504 * Allocate and initialise a blank device with a given minor.
1505 */
2b06cfff 1506static struct mapped_device *alloc_dev(int minor)
1da177e4 1507{
115485e8
MS
1508 int r, numa_node_id = dm_get_numa_node();
1509 struct mapped_device *md;
ba61fdd1 1510 void *old_md;
1da177e4 1511
115485e8 1512 md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1da177e4
LT
1513 if (!md) {
1514 DMWARN("unable to allocate device, out of memory.");
1515 return NULL;
1516 }
1517
10da4f79 1518 if (!try_module_get(THIS_MODULE))
6ed7ade8 1519 goto bad_module_get;
10da4f79 1520
1da177e4 1521 /* get a minor number for the dev */
2b06cfff 1522 if (minor == DM_ANY_MINOR)
cf13ab8e 1523 r = next_free_minor(&minor);
2b06cfff 1524 else
cf13ab8e 1525 r = specific_minor(minor);
1da177e4 1526 if (r < 0)
6ed7ade8 1527 goto bad_minor;
1da177e4 1528
83d5e5b0
MP
1529 r = init_srcu_struct(&md->io_barrier);
1530 if (r < 0)
1531 goto bad_io_barrier;
1532
115485e8 1533 md->numa_node_id = numa_node_id;
4cc96131 1534 md->use_blk_mq = dm_use_blk_mq_default();
591ddcfc 1535 md->init_tio_pdu = false;
a5664dad 1536 md->type = DM_TYPE_NONE;
e61290a4 1537 mutex_init(&md->suspend_lock);
a5664dad 1538 mutex_init(&md->type_lock);
86f1152b 1539 mutex_init(&md->table_devices_lock);
022c2611 1540 spin_lock_init(&md->deferred_lock);
1da177e4 1541 atomic_set(&md->holders, 1);
5c6bd75d 1542 atomic_set(&md->open_count, 0);
1da177e4 1543 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1544 atomic_set(&md->uevent_seq, 0);
1545 INIT_LIST_HEAD(&md->uevent_list);
86f1152b 1546 INIT_LIST_HEAD(&md->table_devices);
7a8c3d3b 1547 spin_lock_init(&md->uevent_lock);
1da177e4 1548
115485e8 1549 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
1da177e4 1550 if (!md->queue)
0f20972f 1551 goto bad;
1da177e4 1552
4a0b4ddf 1553 dm_init_md_queue(md);
9faf400f 1554
115485e8 1555 md->disk = alloc_disk_node(1, numa_node_id);
1da177e4 1556 if (!md->disk)
0f20972f 1557 goto bad;
1da177e4 1558
316d315b
NK
1559 atomic_set(&md->pending[0], 0);
1560 atomic_set(&md->pending[1], 0);
f0b04115 1561 init_waitqueue_head(&md->wait);
53d5914f 1562 INIT_WORK(&md->work, dm_wq_work);
f0b04115 1563 init_waitqueue_head(&md->eventq);
2995fa78 1564 init_completion(&md->kobj_holder.completion);
2eb6e1e3 1565 md->kworker_task = NULL;
f0b04115 1566
1da177e4
LT
1567 md->disk->major = _major;
1568 md->disk->first_minor = minor;
1569 md->disk->fops = &dm_blk_dops;
1570 md->disk->queue = md->queue;
1571 md->disk->private_data = md;
1572 sprintf(md->disk->disk_name, "dm-%d", minor);
1573 add_disk(md->disk);
7e51f257 1574 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1575
670368a8 1576 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
304f3f6a 1577 if (!md->wq)
0f20972f 1578 goto bad;
304f3f6a 1579
32a926da
MP
1580 md->bdev = bdget_disk(md->disk, 0);
1581 if (!md->bdev)
0f20972f 1582 goto bad;
32a926da 1583
3a83f467 1584 bio_init(&md->flush_bio, NULL, 0);
6a8736d1 1585 md->flush_bio.bi_bdev = md->bdev;
70fd7614 1586 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
6a8736d1 1587
fd2ed4d2
MP
1588 dm_stats_init(&md->stats);
1589
ba61fdd1 1590 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1591 spin_lock(&_minor_lock);
ba61fdd1 1592 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1593 spin_unlock(&_minor_lock);
ba61fdd1
JM
1594
1595 BUG_ON(old_md != MINOR_ALLOCED);
1596
1da177e4
LT
1597 return md;
1598
0f20972f
MS
1599bad:
1600 cleanup_mapped_device(md);
83d5e5b0 1601bad_io_barrier:
1da177e4 1602 free_minor(minor);
6ed7ade8 1603bad_minor:
10da4f79 1604 module_put(THIS_MODULE);
6ed7ade8 1605bad_module_get:
1da177e4
LT
1606 kfree(md);
1607 return NULL;
1608}
1609
ae9da83f
JN
1610static void unlock_fs(struct mapped_device *md);
1611
1da177e4
LT
1612static void free_dev(struct mapped_device *md)
1613{
f331c029 1614 int minor = MINOR(disk_devt(md->disk));
63d94e48 1615
32a926da 1616 unlock_fs(md);
2eb6e1e3 1617
0f20972f 1618 cleanup_mapped_device(md);
63a4f065 1619
86f1152b 1620 free_table_devices(&md->table_devices);
63a4f065 1621 dm_stats_cleanup(&md->stats);
63a4f065
MS
1622 free_minor(minor);
1623
10da4f79 1624 module_put(THIS_MODULE);
1da177e4
LT
1625 kfree(md);
1626}
1627
e6ee8c0b
KU
1628static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1629{
c0820cf5 1630 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
e6ee8c0b 1631
4e6e36c3
MS
1632 if (md->bs) {
1633 /* The md already has necessary mempools. */
545ed20e 1634 if (dm_table_bio_based(t)) {
16245bdc
JN
1635 /*
1636 * Reload bioset because front_pad may have changed
1637 * because a different table was loaded.
1638 */
1639 bioset_free(md->bs);
1640 md->bs = p->bs;
1641 p->bs = NULL;
16245bdc 1642 }
4e6e36c3
MS
1643 /*
1644 * There's no need to reload with request-based dm
1645 * because the size of front_pad doesn't change.
1646 * Note for future: If you are to reload bioset,
1647 * prep-ed requests in the queue may refer
1648 * to bio from the old bioset, so you must walk
1649 * through the queue to unprep.
1650 */
1651 goto out;
c0820cf5 1652 }
e6ee8c0b 1653
eb8db831 1654 BUG_ON(!p || md->io_pool || md->bs);
cbc4e3c1 1655
e6ee8c0b
KU
1656 md->io_pool = p->io_pool;
1657 p->io_pool = NULL;
e6ee8c0b
KU
1658 md->bs = p->bs;
1659 p->bs = NULL;
4e6e36c3 1660
e6ee8c0b 1661out:
02233342 1662 /* mempool bind completed, no longer need any mempools in the table */
e6ee8c0b
KU
1663 dm_table_free_md_mempools(t);
1664}
1665
1da177e4
LT
1666/*
1667 * Bind a table to the device.
1668 */
1669static void event_callback(void *context)
1670{
7a8c3d3b
MA
1671 unsigned long flags;
1672 LIST_HEAD(uevents);
1da177e4
LT
1673 struct mapped_device *md = (struct mapped_device *) context;
1674
7a8c3d3b
MA
1675 spin_lock_irqsave(&md->uevent_lock, flags);
1676 list_splice_init(&md->uevent_list, &uevents);
1677 spin_unlock_irqrestore(&md->uevent_lock, flags);
1678
ed9e1982 1679 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 1680
1da177e4
LT
1681 atomic_inc(&md->event_nr);
1682 wake_up(&md->eventq);
1683}
1684
c217649b
MS
1685/*
1686 * Protected by md->suspend_lock obtained by dm_swap_table().
1687 */
4e90188b 1688static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 1689{
4e90188b 1690 set_capacity(md->disk, size);
1da177e4 1691
db8fef4f 1692 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1da177e4
LT
1693}
1694
042d2a9b
AK
1695/*
1696 * Returns old map, which caller must destroy.
1697 */
1698static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
1699 struct queue_limits *limits)
1da177e4 1700{
042d2a9b 1701 struct dm_table *old_map;
165125e1 1702 struct request_queue *q = md->queue;
1da177e4
LT
1703 sector_t size;
1704
5a8f1f80
BVA
1705 lockdep_assert_held(&md->suspend_lock);
1706
1da177e4 1707 size = dm_table_get_size(t);
3ac51e74
DW
1708
1709 /*
1710 * Wipe any geometry if the size of the table changed.
1711 */
fd2ed4d2 1712 if (size != dm_get_size(md))
3ac51e74
DW
1713 memset(&md->geometry, 0, sizeof(md->geometry));
1714
32a926da 1715 __set_size(md, size);
d5816876 1716
2ca3310e
AK
1717 dm_table_event_callback(t, event_callback, md);
1718
e6ee8c0b
KU
1719 /*
1720 * The queue hasn't been stopped yet, if the old table type wasn't
1721 * for request-based during suspension. So stop it to prevent
1722 * I/O mapping before resume.
1723 * This must be done before setting the queue restrictions,
1724 * because request-based dm may be run just after the setting.
1725 */
16f12266 1726 if (dm_table_request_based(t)) {
eca7ee6d 1727 dm_stop_queue(q);
16f12266
MS
1728 /*
1729 * Leverage the fact that request-based DM targets are
1730 * immutable singletons and establish md->immutable_target
1731 * - used to optimize both dm_request_fn and dm_mq_queue_rq
1732 */
1733 md->immutable_target = dm_table_get_immutable_target(t);
1734 }
e6ee8c0b
KU
1735
1736 __bind_mempools(md, t);
1737
a12f5d48 1738 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
1d3aa6f6 1739 rcu_assign_pointer(md->map, (void *)t);
36a0456f
AK
1740 md->immutable_target_type = dm_table_get_immutable_target_type(t);
1741
754c5fc7 1742 dm_table_set_restrictions(t, q, limits);
41abc4e1
HR
1743 if (old_map)
1744 dm_sync_table(md);
1da177e4 1745
042d2a9b 1746 return old_map;
1da177e4
LT
1747}
1748
a7940155
AK
1749/*
1750 * Returns unbound table for the caller to free.
1751 */
1752static struct dm_table *__unbind(struct mapped_device *md)
1da177e4 1753{
a12f5d48 1754 struct dm_table *map = rcu_dereference_protected(md->map, 1);
1da177e4
LT
1755
1756 if (!map)
a7940155 1757 return NULL;
1da177e4
LT
1758
1759 dm_table_event_callback(map, NULL, NULL);
9cdb8520 1760 RCU_INIT_POINTER(md->map, NULL);
83d5e5b0 1761 dm_sync_table(md);
a7940155
AK
1762
1763 return map;
1da177e4
LT
1764}
1765
1766/*
1767 * Constructor for a new device.
1768 */
2b06cfff 1769int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
1770{
1771 struct mapped_device *md;
1772
2b06cfff 1773 md = alloc_dev(minor);
1da177e4
LT
1774 if (!md)
1775 return -ENXIO;
1776
784aae73
MB
1777 dm_sysfs_init(md);
1778
1da177e4
LT
1779 *result = md;
1780 return 0;
1781}
1782
a5664dad
MS
1783/*
1784 * Functions to manage md->type.
1785 * All are required to hold md->type_lock.
1786 */
1787void dm_lock_md_type(struct mapped_device *md)
1788{
1789 mutex_lock(&md->type_lock);
1790}
1791
1792void dm_unlock_md_type(struct mapped_device *md)
1793{
1794 mutex_unlock(&md->type_lock);
1795}
1796
1797void dm_set_md_type(struct mapped_device *md, unsigned type)
1798{
00c4fc3b 1799 BUG_ON(!mutex_is_locked(&md->type_lock));
a5664dad
MS
1800 md->type = type;
1801}
1802
1803unsigned dm_get_md_type(struct mapped_device *md)
1804{
1805 return md->type;
1806}
1807
36a0456f
AK
1808struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
1809{
1810 return md->immutable_target_type;
1811}
1812
f84cb8a4
MS
1813/*
1814 * The queue_limits are only valid as long as you have a reference
1815 * count on 'md'.
1816 */
1817struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1818{
1819 BUG_ON(!atomic_read(&md->holders));
1820 return &md->queue->limits;
1821}
1822EXPORT_SYMBOL_GPL(dm_get_queue_limits);
1823
4a0b4ddf
MS
1824/*
1825 * Setup the DM device's queue based on md's type
1826 */
591ddcfc 1827int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
4a0b4ddf 1828{
bfebd1cd 1829 int r;
545ed20e 1830 unsigned type = dm_get_md_type(md);
bfebd1cd 1831
545ed20e 1832 switch (type) {
bfebd1cd 1833 case DM_TYPE_REQUEST_BASED:
eb8db831 1834 r = dm_old_init_request_queue(md, t);
bfebd1cd 1835 if (r) {
eca7ee6d 1836 DMERR("Cannot initialize queue for request-based mapped device");
bfebd1cd 1837 return r;
ff36ab34 1838 }
bfebd1cd
MS
1839 break;
1840 case DM_TYPE_MQ_REQUEST_BASED:
e83068a5 1841 r = dm_mq_init_request_queue(md, t);
bfebd1cd 1842 if (r) {
eca7ee6d 1843 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
bfebd1cd
MS
1844 return r;
1845 }
1846 break;
1847 case DM_TYPE_BIO_BASED:
545ed20e 1848 case DM_TYPE_DAX_BIO_BASED:
eca7ee6d 1849 dm_init_normal_md_queue(md);
ff36ab34 1850 blk_queue_make_request(md->queue, dm_make_request);
dbba42d8
MP
1851 /*
1852 * DM handles splitting bios as needed. Free the bio_split bioset
1853 * since it won't be used (saves 1 process per bio-based DM device).
1854 */
1855 bioset_free(md->queue->bio_split);
1856 md->queue->bio_split = NULL;
545ed20e
TK
1857
1858 if (type == DM_TYPE_DAX_BIO_BASED)
1859 queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
bfebd1cd 1860 break;
4a0b4ddf
MS
1861 }
1862
1863 return 0;
1864}
1865
2bec1f4a 1866struct mapped_device *dm_get_md(dev_t dev)
1da177e4
LT
1867{
1868 struct mapped_device *md;
1da177e4
LT
1869 unsigned minor = MINOR(dev);
1870
1871 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1872 return NULL;
1873
f32c10b0 1874 spin_lock(&_minor_lock);
1da177e4
LT
1875
1876 md = idr_find(&_minor_idr, minor);
2bec1f4a
MP
1877 if (md) {
1878 if ((md == MINOR_ALLOCED ||
1879 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1880 dm_deleting_md(md) ||
1881 test_bit(DMF_FREEING, &md->flags))) {
1882 md = NULL;
1883 goto out;
1884 }
1885 dm_get(md);
fba9f90e 1886 }
1da177e4 1887
fba9f90e 1888out:
f32c10b0 1889 spin_unlock(&_minor_lock);
1da177e4 1890
637842cf
DT
1891 return md;
1892}
3cf2e4ba 1893EXPORT_SYMBOL_GPL(dm_get_md);
d229a958 1894
9ade92a9 1895void *dm_get_mdptr(struct mapped_device *md)
637842cf 1896{
9ade92a9 1897 return md->interface_ptr;
1da177e4
LT
1898}
1899
1900void dm_set_mdptr(struct mapped_device *md, void *ptr)
1901{
1902 md->interface_ptr = ptr;
1903}
1904
1905void dm_get(struct mapped_device *md)
1906{
1907 atomic_inc(&md->holders);
3f77316d 1908 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1da177e4
LT
1909}
1910
09ee96b2
MP
1911int dm_hold(struct mapped_device *md)
1912{
1913 spin_lock(&_minor_lock);
1914 if (test_bit(DMF_FREEING, &md->flags)) {
1915 spin_unlock(&_minor_lock);
1916 return -EBUSY;
1917 }
1918 dm_get(md);
1919 spin_unlock(&_minor_lock);
1920 return 0;
1921}
1922EXPORT_SYMBOL_GPL(dm_hold);
1923
72d94861
AK
1924const char *dm_device_name(struct mapped_device *md)
1925{
1926 return md->name;
1927}
1928EXPORT_SYMBOL_GPL(dm_device_name);
1929
3f77316d 1930static void __dm_destroy(struct mapped_device *md, bool wait)
1da177e4 1931{
3b785fbc 1932 struct request_queue *q = dm_get_md_queue(md);
1134e5ae 1933 struct dm_table *map;
83d5e5b0 1934 int srcu_idx;
1da177e4 1935
3f77316d 1936 might_sleep();
fba9f90e 1937
63a4f065 1938 spin_lock(&_minor_lock);
3f77316d
KU
1939 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
1940 set_bit(DMF_FREEING, &md->flags);
1941 spin_unlock(&_minor_lock);
3b785fbc 1942
2e91c369 1943 blk_set_queue_dying(q);
3f77316d 1944
02233342 1945 if (dm_request_based(md) && md->kworker_task)
3989144f 1946 kthread_flush_worker(&md->kworker);
2eb6e1e3 1947
ab7c7bb6
MP
1948 /*
1949 * Take suspend_lock so that presuspend and postsuspend methods
1950 * do not race with internal suspend.
1951 */
1952 mutex_lock(&md->suspend_lock);
2a708cff 1953 map = dm_get_live_table(md, &srcu_idx);
3f77316d
KU
1954 if (!dm_suspended_md(md)) {
1955 dm_table_presuspend_targets(map);
1956 dm_table_postsuspend_targets(map);
1da177e4 1957 }
83d5e5b0
MP
1958 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
1959 dm_put_live_table(md, srcu_idx);
2a708cff 1960 mutex_unlock(&md->suspend_lock);
83d5e5b0 1961
3f77316d
KU
1962 /*
1963 * Rare, but there may be I/O requests still going to complete,
1964 * for example. Wait for all references to disappear.
1965 * No one should increment the reference count of the mapped_device,
1966 * after the mapped_device state becomes DMF_FREEING.
1967 */
1968 if (wait)
1969 while (atomic_read(&md->holders))
1970 msleep(1);
1971 else if (atomic_read(&md->holders))
1972 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
1973 dm_device_name(md), atomic_read(&md->holders));
1974
1975 dm_sysfs_exit(md);
3f77316d
KU
1976 dm_table_destroy(__unbind(md));
1977 free_dev(md);
1978}
1979
1980void dm_destroy(struct mapped_device *md)
1981{
1982 __dm_destroy(md, true);
1983}
1984
1985void dm_destroy_immediate(struct mapped_device *md)
1986{
1987 __dm_destroy(md, false);
1988}
1989
1990void dm_put(struct mapped_device *md)
1991{
1992 atomic_dec(&md->holders);
1da177e4 1993}
79eb885c 1994EXPORT_SYMBOL_GPL(dm_put);
1da177e4 1995
b48633f8 1996static int dm_wait_for_completion(struct mapped_device *md, long task_state)
46125c1c
MB
1997{
1998 int r = 0;
9f4c3f87 1999 DEFINE_WAIT(wait);
46125c1c
MB
2000
2001 while (1) {
9f4c3f87 2002 prepare_to_wait(&md->wait, &wait, task_state);
46125c1c 2003
b4324fee 2004 if (!md_in_flight(md))
46125c1c
MB
2005 break;
2006
e3fabdfd 2007 if (signal_pending_state(task_state, current)) {
46125c1c
MB
2008 r = -EINTR;
2009 break;
2010 }
2011
2012 io_schedule();
2013 }
9f4c3f87 2014 finish_wait(&md->wait, &wait);
b44ebeb0 2015
46125c1c
MB
2016 return r;
2017}
2018
1da177e4
LT
2019/*
2020 * Process the deferred bios
2021 */
ef208587 2022static void dm_wq_work(struct work_struct *work)
1da177e4 2023{
ef208587
MP
2024 struct mapped_device *md = container_of(work, struct mapped_device,
2025 work);
6d6f10df 2026 struct bio *c;
83d5e5b0
MP
2027 int srcu_idx;
2028 struct dm_table *map;
1da177e4 2029
83d5e5b0 2030 map = dm_get_live_table(md, &srcu_idx);
ef208587 2031
3b00b203 2032 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
2033 spin_lock_irq(&md->deferred_lock);
2034 c = bio_list_pop(&md->deferred);
2035 spin_unlock_irq(&md->deferred_lock);
2036
6a8736d1 2037 if (!c)
df12ee99 2038 break;
022c2611 2039
e6ee8c0b
KU
2040 if (dm_request_based(md))
2041 generic_make_request(c);
6a8736d1 2042 else
83d5e5b0 2043 __split_and_process_bio(md, map, c);
022c2611 2044 }
73d410c0 2045
83d5e5b0 2046 dm_put_live_table(md, srcu_idx);
1da177e4
LT
2047}
2048
9a1fb464 2049static void dm_queue_flush(struct mapped_device *md)
304f3f6a 2050{
3b00b203 2051 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
4e857c58 2052 smp_mb__after_atomic();
53d5914f 2053 queue_work(md->wq, &md->work);
304f3f6a
MB
2054}
2055
1da177e4 2056/*
042d2a9b 2057 * Swap in a new table, returning the old one for the caller to destroy.
1da177e4 2058 */
042d2a9b 2059struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1da177e4 2060{
87eb5b21 2061 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
754c5fc7 2062 struct queue_limits limits;
042d2a9b 2063 int r;
1da177e4 2064
e61290a4 2065 mutex_lock(&md->suspend_lock);
1da177e4
LT
2066
2067 /* device must be suspended */
4f186f8b 2068 if (!dm_suspended_md(md))
93c534ae 2069 goto out;
1da177e4 2070
3ae70656
MS
2071 /*
2072 * If the new table has no data devices, retain the existing limits.
2073 * This helps multipath with queue_if_no_path if all paths disappear,
2074 * then new I/O is queued based on these limits, and then some paths
2075 * reappear.
2076 */
2077 if (dm_table_has_no_data_devices(table)) {
83d5e5b0 2078 live_map = dm_get_live_table_fast(md);
3ae70656
MS
2079 if (live_map)
2080 limits = md->queue->limits;
83d5e5b0 2081 dm_put_live_table_fast(md);
3ae70656
MS
2082 }
2083
87eb5b21
MC
2084 if (!live_map) {
2085 r = dm_calculate_queue_limits(table, &limits);
2086 if (r) {
2087 map = ERR_PTR(r);
2088 goto out;
2089 }
042d2a9b 2090 }
754c5fc7 2091
042d2a9b 2092 map = __bind(md, table, &limits);
1da177e4 2093
93c534ae 2094out:
e61290a4 2095 mutex_unlock(&md->suspend_lock);
042d2a9b 2096 return map;
1da177e4
LT
2097}
2098
2099/*
2100 * Functions to lock and unlock any filesystem running on the
2101 * device.
2102 */
2ca3310e 2103static int lock_fs(struct mapped_device *md)
1da177e4 2104{
e39e2e95 2105 int r;
1da177e4
LT
2106
2107 WARN_ON(md->frozen_sb);
dfbe03f6 2108
db8fef4f 2109 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 2110 if (IS_ERR(md->frozen_sb)) {
cf222b37 2111 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
2112 md->frozen_sb = NULL;
2113 return r;
dfbe03f6
AK
2114 }
2115
aa8d7c2f
AK
2116 set_bit(DMF_FROZEN, &md->flags);
2117
1da177e4
LT
2118 return 0;
2119}
2120
2ca3310e 2121static void unlock_fs(struct mapped_device *md)
1da177e4 2122{
aa8d7c2f
AK
2123 if (!test_bit(DMF_FROZEN, &md->flags))
2124 return;
2125
db8fef4f 2126 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 2127 md->frozen_sb = NULL;
aa8d7c2f 2128 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
2129}
2130
2131/*
b48633f8
BVA
2132 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2133 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2134 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2135 *
ffcc3936
MS
2136 * If __dm_suspend returns 0, the device is completely quiescent
2137 * now. There is no request-processing activity. All new requests
2138 * are being added to md->deferred list.
cec47e3d 2139 *
ffcc3936 2140 * Caller must hold md->suspend_lock
cec47e3d 2141 */
ffcc3936 2142static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
b48633f8 2143 unsigned suspend_flags, long task_state,
eaf9a736 2144 int dmf_suspended_flag)
1da177e4 2145{
ffcc3936
MS
2146 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2147 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2148 int r;
1da177e4 2149
5a8f1f80
BVA
2150 lockdep_assert_held(&md->suspend_lock);
2151
2e93ccc1
KU
2152 /*
2153 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2154 * This flag is cleared before dm_suspend returns.
2155 */
2156 if (noflush)
2157 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2158
d67ee213
MS
2159 /*
2160 * This gets reverted if there's an error later and the targets
2161 * provide the .presuspend_undo hook.
2162 */
cf222b37
AK
2163 dm_table_presuspend_targets(map);
2164
32a926da 2165 /*
9f518b27
KU
2166 * Flush I/O to the device.
2167 * Any I/O submitted after lock_fs() may not be flushed.
2168 * noflush takes precedence over do_lockfs.
2169 * (lock_fs() flushes I/Os and waits for them to complete.)
32a926da
MP
2170 */
2171 if (!noflush && do_lockfs) {
2172 r = lock_fs(md);
d67ee213
MS
2173 if (r) {
2174 dm_table_presuspend_undo_targets(map);
ffcc3936 2175 return r;
d67ee213 2176 }
aa8d7c2f 2177 }
1da177e4
LT
2178
2179 /*
3b00b203
MP
2180 * Here we must make sure that no processes are submitting requests
2181 * to target drivers i.e. no one may be executing
2182 * __split_and_process_bio. This is called from dm_request and
2183 * dm_wq_work.
2184 *
2185 * To get all processes out of __split_and_process_bio in dm_request,
2186 * we take the write lock. To prevent any process from reentering
6a8736d1
TH
2187 * __split_and_process_bio from dm_request and quiesce the thread
2188 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2189 * flush_workqueue(md->wq).
1da177e4 2190 */
1eb787ec 2191 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
41abc4e1
HR
2192 if (map)
2193 synchronize_srcu(&md->io_barrier);
1da177e4 2194
d0bcb878 2195 /*
29e4013d
TH
2196 * Stop md->queue before flushing md->wq in case request-based
2197 * dm defers requests to md->wq from md->queue.
d0bcb878 2198 */
2eb6e1e3 2199 if (dm_request_based(md)) {
eca7ee6d 2200 dm_stop_queue(md->queue);
02233342 2201 if (md->kworker_task)
3989144f 2202 kthread_flush_worker(&md->kworker);
2eb6e1e3 2203 }
cec47e3d 2204
d0bcb878
KU
2205 flush_workqueue(md->wq);
2206
1da177e4 2207 /*
3b00b203
MP
2208 * At this point no more requests are entering target request routines.
2209 * We call dm_wait_for_completion to wait for all existing requests
2210 * to finish.
1da177e4 2211 */
b48633f8 2212 r = dm_wait_for_completion(md, task_state);
eaf9a736
MS
2213 if (!r)
2214 set_bit(dmf_suspended_flag, &md->flags);
1da177e4 2215
6d6f10df 2216 if (noflush)
022c2611 2217 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
41abc4e1
HR
2218 if (map)
2219 synchronize_srcu(&md->io_barrier);
2e93ccc1 2220
1da177e4 2221 /* were we interrupted ? */
46125c1c 2222 if (r < 0) {
9a1fb464 2223 dm_queue_flush(md);
73d410c0 2224
cec47e3d 2225 if (dm_request_based(md))
eca7ee6d 2226 dm_start_queue(md->queue);
cec47e3d 2227
2ca3310e 2228 unlock_fs(md);
d67ee213 2229 dm_table_presuspend_undo_targets(map);
ffcc3936 2230 /* pushback list is already flushed, so skip flush */
2ca3310e 2231 }
1da177e4 2232
ffcc3936
MS
2233 return r;
2234}
2235
2236/*
2237 * We need to be able to change a mapping table under a mounted
2238 * filesystem. For example we might want to move some data in
2239 * the background. Before the table can be swapped with
2240 * dm_bind_table, dm_suspend must be called to flush any in
2241 * flight bios and ensure that any further io gets deferred.
2242 */
2243/*
2244 * Suspend mechanism in request-based dm.
2245 *
2246 * 1. Flush all I/Os by lock_fs() if needed.
2247 * 2. Stop dispatching any I/O by stopping the request_queue.
2248 * 3. Wait for all in-flight I/Os to be completed or requeued.
2249 *
2250 * To abort suspend, start the request_queue.
2251 */
2252int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2253{
2254 struct dm_table *map = NULL;
2255 int r = 0;
2256
2257retry:
2258 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2259
2260 if (dm_suspended_md(md)) {
2261 r = -EINVAL;
2262 goto out_unlock;
2263 }
2264
2265 if (dm_suspended_internally_md(md)) {
2266 /* already internally suspended, wait for internal resume */
2267 mutex_unlock(&md->suspend_lock);
2268 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2269 if (r)
2270 return r;
2271 goto retry;
2272 }
2273
a12f5d48 2274 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936 2275
eaf9a736 2276 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
ffcc3936
MS
2277 if (r)
2278 goto out_unlock;
3b00b203 2279
4d4471cb
KU
2280 dm_table_postsuspend_targets(map);
2281
d287483d 2282out_unlock:
e61290a4 2283 mutex_unlock(&md->suspend_lock);
cf222b37 2284 return r;
1da177e4
LT
2285}
2286
ffcc3936
MS
2287static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2288{
2289 if (map) {
2290 int r = dm_table_resume_targets(map);
2291 if (r)
2292 return r;
2293 }
2294
2295 dm_queue_flush(md);
2296
2297 /*
2298 * Flushing deferred I/Os must be done after targets are resumed
2299 * so that mapping of targets can work correctly.
2300 * Request-based dm is queueing the deferred I/Os in its request_queue.
2301 */
2302 if (dm_request_based(md))
eca7ee6d 2303 dm_start_queue(md->queue);
ffcc3936
MS
2304
2305 unlock_fs(md);
2306
2307 return 0;
2308}
2309
1da177e4
LT
2310int dm_resume(struct mapped_device *md)
2311{
8dc23658 2312 int r;
cf222b37 2313 struct dm_table *map = NULL;
1da177e4 2314
ffcc3936 2315retry:
8dc23658 2316 r = -EINVAL;
ffcc3936
MS
2317 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2318
4f186f8b 2319 if (!dm_suspended_md(md))
cf222b37 2320 goto out;
cf222b37 2321
ffcc3936
MS
2322 if (dm_suspended_internally_md(md)) {
2323 /* already internally suspended, wait for internal resume */
2324 mutex_unlock(&md->suspend_lock);
2325 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2326 if (r)
2327 return r;
2328 goto retry;
2329 }
2330
a12f5d48 2331 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2ca3310e 2332 if (!map || !dm_table_get_size(map))
cf222b37 2333 goto out;
1da177e4 2334
ffcc3936 2335 r = __dm_resume(md, map);
8757b776
MB
2336 if (r)
2337 goto out;
2ca3310e 2338
2ca3310e 2339 clear_bit(DMF_SUSPENDED, &md->flags);
cf222b37 2340out:
e61290a4 2341 mutex_unlock(&md->suspend_lock);
2ca3310e 2342
cf222b37 2343 return r;
1da177e4
LT
2344}
2345
fd2ed4d2
MP
2346/*
2347 * Internal suspend/resume works like userspace-driven suspend. It waits
2348 * until all bios finish and prevents issuing new bios to the target drivers.
2349 * It may be used only from the kernel.
fd2ed4d2
MP
2350 */
2351
ffcc3936 2352static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
fd2ed4d2 2353{
ffcc3936
MS
2354 struct dm_table *map = NULL;
2355
96b26c8c 2356 if (md->internal_suspend_count++)
ffcc3936
MS
2357 return; /* nested internal suspend */
2358
2359 if (dm_suspended_md(md)) {
2360 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2361 return; /* nest suspend */
2362 }
2363
a12f5d48 2364 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
ffcc3936
MS
2365
2366 /*
2367 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2368 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2369 * would require changing .presuspend to return an error -- avoid this
2370 * until there is a need for more elaborate variants of internal suspend.
2371 */
eaf9a736
MS
2372 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2373 DMF_SUSPENDED_INTERNALLY);
ffcc3936
MS
2374
2375 dm_table_postsuspend_targets(map);
2376}
2377
2378static void __dm_internal_resume(struct mapped_device *md)
2379{
96b26c8c
MP
2380 BUG_ON(!md->internal_suspend_count);
2381
2382 if (--md->internal_suspend_count)
ffcc3936
MS
2383 return; /* resume from nested internal suspend */
2384
fd2ed4d2 2385 if (dm_suspended_md(md))
ffcc3936
MS
2386 goto done; /* resume from nested suspend */
2387
2388 /*
2389 * NOTE: existing callers don't need to call dm_table_resume_targets
2390 * (which may fail -- so best to avoid it for now by passing NULL map)
2391 */
2392 (void) __dm_resume(md, NULL);
2393
2394done:
2395 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2396 smp_mb__after_atomic();
2397 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2398}
2399
2400void dm_internal_suspend_noflush(struct mapped_device *md)
2401{
2402 mutex_lock(&md->suspend_lock);
2403 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2404 mutex_unlock(&md->suspend_lock);
2405}
2406EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2407
2408void dm_internal_resume(struct mapped_device *md)
2409{
2410 mutex_lock(&md->suspend_lock);
2411 __dm_internal_resume(md);
2412 mutex_unlock(&md->suspend_lock);
2413}
2414EXPORT_SYMBOL_GPL(dm_internal_resume);
2415
2416/*
2417 * Fast variants of internal suspend/resume hold md->suspend_lock,
2418 * which prevents interaction with userspace-driven suspend.
2419 */
2420
2421void dm_internal_suspend_fast(struct mapped_device *md)
2422{
2423 mutex_lock(&md->suspend_lock);
2424 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
2425 return;
2426
2427 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2428 synchronize_srcu(&md->io_barrier);
2429 flush_workqueue(md->wq);
2430 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2431}
b735fede 2432EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
fd2ed4d2 2433
ffcc3936 2434void dm_internal_resume_fast(struct mapped_device *md)
fd2ed4d2 2435{
ffcc3936 2436 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
fd2ed4d2
MP
2437 goto done;
2438
2439 dm_queue_flush(md);
2440
2441done:
2442 mutex_unlock(&md->suspend_lock);
2443}
b735fede 2444EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
fd2ed4d2 2445
1da177e4
LT
2446/*-----------------------------------------------------------------
2447 * Event notification.
2448 *---------------------------------------------------------------*/
3abf85b5 2449int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
60935eb2 2450 unsigned cookie)
69267a30 2451{
60935eb2
MB
2452 char udev_cookie[DM_COOKIE_LENGTH];
2453 char *envp[] = { udev_cookie, NULL };
2454
2455 if (!cookie)
3abf85b5 2456 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
60935eb2
MB
2457 else {
2458 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2459 DM_COOKIE_ENV_VAR_NAME, cookie);
3abf85b5
PR
2460 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2461 action, envp);
60935eb2 2462 }
69267a30
AK
2463}
2464
7a8c3d3b
MA
2465uint32_t dm_next_uevent_seq(struct mapped_device *md)
2466{
2467 return atomic_add_return(1, &md->uevent_seq);
2468}
2469
1da177e4
LT
2470uint32_t dm_get_event_nr(struct mapped_device *md)
2471{
2472 return atomic_read(&md->event_nr);
2473}
2474
2475int dm_wait_event(struct mapped_device *md, int event_nr)
2476{
2477 return wait_event_interruptible(md->eventq,
2478 (event_nr != atomic_read(&md->event_nr)));
2479}
2480
7a8c3d3b
MA
2481void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2482{
2483 unsigned long flags;
2484
2485 spin_lock_irqsave(&md->uevent_lock, flags);
2486 list_add(elist, &md->uevent_list);
2487 spin_unlock_irqrestore(&md->uevent_lock, flags);
2488}
2489
1da177e4
LT
2490/*
2491 * The gendisk is only valid as long as you have a reference
2492 * count on 'md'.
2493 */
2494struct gendisk *dm_disk(struct mapped_device *md)
2495{
2496 return md->disk;
2497}
65ff5b7d 2498EXPORT_SYMBOL_GPL(dm_disk);
1da177e4 2499
784aae73
MB
2500struct kobject *dm_kobject(struct mapped_device *md)
2501{
2995fa78 2502 return &md->kobj_holder.kobj;
784aae73
MB
2503}
2504
784aae73
MB
2505struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2506{
2507 struct mapped_device *md;
2508
2995fa78 2509 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
784aae73 2510
4d89b7b4 2511 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 2512 dm_deleting_md(md))
4d89b7b4
MB
2513 return NULL;
2514
784aae73
MB
2515 dm_get(md);
2516 return md;
2517}
2518
4f186f8b 2519int dm_suspended_md(struct mapped_device *md)
1da177e4
LT
2520{
2521 return test_bit(DMF_SUSPENDED, &md->flags);
2522}
2523
ffcc3936
MS
2524int dm_suspended_internally_md(struct mapped_device *md)
2525{
2526 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2527}
2528
2c140a24
MP
2529int dm_test_deferred_remove_flag(struct mapped_device *md)
2530{
2531 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2532}
2533
64dbce58
KU
2534int dm_suspended(struct dm_target *ti)
2535{
ecdb2e25 2536 return dm_suspended_md(dm_table_get_md(ti->table));
64dbce58
KU
2537}
2538EXPORT_SYMBOL_GPL(dm_suspended);
2539
2e93ccc1
KU
2540int dm_noflush_suspending(struct dm_target *ti)
2541{
ecdb2e25 2542 return __noflush_suspending(dm_table_get_md(ti->table));
2e93ccc1
KU
2543}
2544EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2545
78d8e58a 2546struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
30187e1d 2547 unsigned integrity, unsigned per_io_data_size)
e6ee8c0b 2548{
115485e8 2549 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
78d8e58a 2550 unsigned int pool_size = 0;
5f015204 2551 unsigned int front_pad;
e6ee8c0b
KU
2552
2553 if (!pools)
4e6e36c3 2554 return NULL;
e6ee8c0b 2555
78d8e58a
MS
2556 switch (type) {
2557 case DM_TYPE_BIO_BASED:
545ed20e 2558 case DM_TYPE_DAX_BIO_BASED:
78d8e58a 2559 pool_size = dm_get_reserved_bio_based_ios();
30187e1d 2560 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
eb8db831
CH
2561
2562 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
2563 if (!pools->io_pool)
2564 goto out;
78d8e58a
MS
2565 break;
2566 case DM_TYPE_REQUEST_BASED:
78d8e58a 2567 case DM_TYPE_MQ_REQUEST_BASED:
eb8db831 2568 pool_size = dm_get_reserved_rq_based_ios();
78d8e58a 2569 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
591ddcfc 2570 /* per_io_data_size is used for blk-mq pdu at queue allocation */
78d8e58a
MS
2571 break;
2572 default:
2573 BUG();
2574 }
2575
3d8aab2d 2576 pools->bs = bioset_create_nobvec(pool_size, front_pad);
e6ee8c0b 2577 if (!pools->bs)
5f015204 2578 goto out;
e6ee8c0b 2579
a91a2785 2580 if (integrity && bioset_integrity_create(pools->bs, pool_size))
5f015204 2581 goto out;
a91a2785 2582
e6ee8c0b 2583 return pools;
5f1b670d 2584
5f1b670d
CH
2585out:
2586 dm_free_md_mempools(pools);
78d8e58a 2587
4e6e36c3 2588 return NULL;
e6ee8c0b
KU
2589}
2590
2591void dm_free_md_mempools(struct dm_md_mempools *pools)
2592{
2593 if (!pools)
2594 return;
2595
6f65985e 2596 mempool_destroy(pools->io_pool);
1ae49ea2 2597
e6ee8c0b
KU
2598 if (pools->bs)
2599 bioset_free(pools->bs);
2600
2601 kfree(pools);
2602}
2603
9c72bad1
CH
2604struct dm_pr {
2605 u64 old_key;
2606 u64 new_key;
2607 u32 flags;
2608 bool fail_early;
2609};
2610
2611static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
2612 void *data)
71cdb697
CH
2613{
2614 struct mapped_device *md = bdev->bd_disk->private_data;
9c72bad1
CH
2615 struct dm_table *table;
2616 struct dm_target *ti;
2617 int ret = -ENOTTY, srcu_idx;
71cdb697 2618
9c72bad1
CH
2619 table = dm_get_live_table(md, &srcu_idx);
2620 if (!table || !dm_table_get_size(table))
2621 goto out;
71cdb697 2622
9c72bad1
CH
2623 /* We only support devices that have a single target */
2624 if (dm_table_get_num_targets(table) != 1)
2625 goto out;
2626 ti = dm_table_get_target(table, 0);
71cdb697 2627
9c72bad1
CH
2628 ret = -EINVAL;
2629 if (!ti->type->iterate_devices)
2630 goto out;
2631
2632 ret = ti->type->iterate_devices(ti, fn, data);
2633out:
2634 dm_put_live_table(md, srcu_idx);
2635 return ret;
2636}
2637
2638/*
2639 * For register / unregister we need to manually call out to every path.
2640 */
2641static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
2642 sector_t start, sector_t len, void *data)
2643{
2644 struct dm_pr *pr = data;
2645 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
2646
2647 if (!ops || !ops->pr_register)
2648 return -EOPNOTSUPP;
2649 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
2650}
2651
2652static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
2653 u32 flags)
2654{
2655 struct dm_pr pr = {
2656 .old_key = old_key,
2657 .new_key = new_key,
2658 .flags = flags,
2659 .fail_early = true,
2660 };
2661 int ret;
2662
2663 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
2664 if (ret && new_key) {
2665 /* unregister all paths if we failed to register any path */
2666 pr.old_key = new_key;
2667 pr.new_key = 0;
2668 pr.flags = 0;
2669 pr.fail_early = false;
2670 dm_call_pr(bdev, __dm_pr_register, &pr);
2671 }
2672
2673 return ret;
71cdb697
CH
2674}
2675
2676static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
956a4025 2677 u32 flags)
71cdb697
CH
2678{
2679 struct mapped_device *md = bdev->bd_disk->private_data;
2680 const struct pr_ops *ops;
71cdb697 2681 fmode_t mode;
956a4025 2682 int r;
71cdb697 2683
956a4025 2684 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
71cdb697
CH
2685 if (r < 0)
2686 return r;
2687
2688 ops = bdev->bd_disk->fops->pr_ops;
2689 if (ops && ops->pr_reserve)
2690 r = ops->pr_reserve(bdev, key, type, flags);
2691 else
2692 r = -EOPNOTSUPP;
2693
956a4025 2694 bdput(bdev);
71cdb697
CH
2695 return r;
2696}
2697
2698static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2699{
2700 struct mapped_device *md = bdev->bd_disk->private_data;
2701 const struct pr_ops *ops;
71cdb697 2702 fmode_t mode;
956a4025 2703 int r;
71cdb697 2704
956a4025 2705 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
71cdb697
CH
2706 if (r < 0)
2707 return r;
2708
2709 ops = bdev->bd_disk->fops->pr_ops;
2710 if (ops && ops->pr_release)
2711 r = ops->pr_release(bdev, key, type);
2712 else
2713 r = -EOPNOTSUPP;
2714
956a4025 2715 bdput(bdev);
71cdb697
CH
2716 return r;
2717}
2718
2719static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
956a4025 2720 enum pr_type type, bool abort)
71cdb697
CH
2721{
2722 struct mapped_device *md = bdev->bd_disk->private_data;
2723 const struct pr_ops *ops;
71cdb697 2724 fmode_t mode;
956a4025 2725 int r;
71cdb697 2726
956a4025 2727 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
71cdb697
CH
2728 if (r < 0)
2729 return r;
2730
2731 ops = bdev->bd_disk->fops->pr_ops;
2732 if (ops && ops->pr_preempt)
2733 r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
2734 else
2735 r = -EOPNOTSUPP;
2736
956a4025 2737 bdput(bdev);
71cdb697
CH
2738 return r;
2739}
2740
2741static int dm_pr_clear(struct block_device *bdev, u64 key)
2742{
2743 struct mapped_device *md = bdev->bd_disk->private_data;
2744 const struct pr_ops *ops;
71cdb697 2745 fmode_t mode;
956a4025 2746 int r;
71cdb697 2747
956a4025 2748 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
71cdb697
CH
2749 if (r < 0)
2750 return r;
2751
2752 ops = bdev->bd_disk->fops->pr_ops;
2753 if (ops && ops->pr_clear)
2754 r = ops->pr_clear(bdev, key);
2755 else
2756 r = -EOPNOTSUPP;
2757
956a4025 2758 bdput(bdev);
71cdb697
CH
2759 return r;
2760}
2761
2762static const struct pr_ops dm_pr_ops = {
2763 .pr_register = dm_pr_register,
2764 .pr_reserve = dm_pr_reserve,
2765 .pr_release = dm_pr_release,
2766 .pr_preempt = dm_pr_preempt,
2767 .pr_clear = dm_pr_clear,
2768};
2769
83d5cde4 2770static const struct block_device_operations dm_blk_dops = {
1da177e4
LT
2771 .open = dm_blk_open,
2772 .release = dm_blk_close,
aa129a22 2773 .ioctl = dm_blk_ioctl,
545ed20e 2774 .direct_access = dm_blk_direct_access,
3ac51e74 2775 .getgeo = dm_blk_getgeo,
71cdb697 2776 .pr_ops = &dm_pr_ops,
1da177e4
LT
2777 .owner = THIS_MODULE
2778};
2779
1da177e4
LT
2780/*
2781 * module hooks
2782 */
2783module_init(dm_init);
2784module_exit(dm_exit);
2785
2786module_param(major, uint, 0);
2787MODULE_PARM_DESC(major, "The major number of the device mapper");
f4790826 2788
e8603136
MS
2789module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2790MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2791
115485e8
MS
2792module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
2793MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
2794
1da177e4
LT
2795MODULE_DESCRIPTION(DM_NAME " driver");
2796MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2797MODULE_LICENSE("GPL");