]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/md/dm-thin.c
Merge tag 'edac_fix_for_4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-thin.c
CommitLineData
991d9fa0 1/*
e49e5829 2 * Copyright (C) 2011-2012 Red Hat UK.
991d9fa0
JT
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
4f81a417 8#include "dm-bio-prison.h"
1f4e0ff0 9#include "dm.h"
991d9fa0
JT
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
0f30af98 14#include <linux/jiffies.h>
604ea906 15#include <linux/log2.h>
991d9fa0 16#include <linux/list.h>
c140e1c4 17#include <linux/rculist.h>
991d9fa0
JT
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/slab.h>
a822c83e 21#include <linux/vmalloc.h>
ac4c3f34 22#include <linux/sort.h>
67324ea1 23#include <linux/rbtree.h>
991d9fa0
JT
24
25#define DM_MSG_PREFIX "thin"
26
27/*
28 * Tunable constants
29 */
7768ed33 30#define ENDIO_HOOK_POOL_SIZE 1024
991d9fa0 31#define MAPPING_POOL_SIZE 1024
905e51b3 32#define COMMIT_PERIOD HZ
80c57893
MS
33#define NO_SPACE_TIMEOUT_SECS 60
34
35static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
991d9fa0 36
df5d2e90
MP
37DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
38 "A percentage of time allocated for copy on write");
39
991d9fa0
JT
40/*
41 * The block size of the device holding pool data must be
42 * between 64KB and 1GB.
43 */
44#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
45#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
46
991d9fa0
JT
47/*
48 * Device id is restricted to 24 bits.
49 */
50#define MAX_DEV_ID ((1 << 24) - 1)
51
52/*
53 * How do we handle breaking sharing of data blocks?
54 * =================================================
55 *
56 * We use a standard copy-on-write btree to store the mappings for the
57 * devices (note I'm talking about copy-on-write of the metadata here, not
58 * the data). When you take an internal snapshot you clone the root node
59 * of the origin btree. After this there is no concept of an origin or a
60 * snapshot. They are just two device trees that happen to point to the
61 * same data blocks.
62 *
63 * When we get a write in we decide if it's to a shared data block using
64 * some timestamp magic. If it is, we have to break sharing.
65 *
66 * Let's say we write to a shared block in what was the origin. The
67 * steps are:
68 *
69 * i) plug io further to this physical block. (see bio_prison code).
70 *
71 * ii) quiesce any read io to that shared data block. Obviously
44feb387 72 * including all devices that share this block. (see dm_deferred_set code)
991d9fa0
JT
73 *
74 * iii) copy the data block to a newly allocate block. This step can be
75 * missed out if the io covers the block. (schedule_copy).
76 *
77 * iv) insert the new mapping into the origin's btree
fe878f34 78 * (process_prepared_mapping). This act of inserting breaks some
991d9fa0
JT
79 * sharing of btree nodes between the two devices. Breaking sharing only
80 * effects the btree of that specific device. Btrees for the other
81 * devices that share the block never change. The btree for the origin
82 * device as it was after the last commit is untouched, ie. we're using
83 * persistent data structures in the functional programming sense.
84 *
85 * v) unplug io to this physical block, including the io that triggered
86 * the breaking of sharing.
87 *
88 * Steps (ii) and (iii) occur in parallel.
89 *
90 * The metadata _doesn't_ need to be committed before the io continues. We
91 * get away with this because the io is always written to a _new_ block.
92 * If there's a crash, then:
93 *
94 * - The origin mapping will point to the old origin block (the shared
95 * one). This will contain the data as it was before the io that triggered
96 * the breaking of sharing came in.
97 *
98 * - The snap mapping still points to the old block. As it would after
99 * the commit.
100 *
101 * The downside of this scheme is the timestamp magic isn't perfect, and
102 * will continue to think that data block in the snapshot device is shared
103 * even after the write to the origin has broken sharing. I suspect data
104 * blocks will typically be shared by many different devices, so we're
105 * breaking sharing n + 1 times, rather than n, where n is the number of
106 * devices that reference this data block. At the moment I think the
107 * benefits far, far outweigh the disadvantages.
108 */
109
110/*----------------------------------------------------------------*/
111
991d9fa0
JT
112/*
113 * Key building.
114 */
34fbcf62
JT
115enum lock_space {
116 VIRTUAL,
117 PHYSICAL
118};
119
120static void build_key(struct dm_thin_device *td, enum lock_space ls,
121 dm_block_t b, dm_block_t e, struct dm_cell_key *key)
991d9fa0 122{
34fbcf62 123 key->virtual = (ls == VIRTUAL);
991d9fa0 124 key->dev = dm_thin_dev_id(td);
5f274d88 125 key->block_begin = b;
34fbcf62
JT
126 key->block_end = e;
127}
128
129static void build_data_key(struct dm_thin_device *td, dm_block_t b,
130 struct dm_cell_key *key)
131{
132 build_key(td, PHYSICAL, b, b + 1llu, key);
991d9fa0
JT
133}
134
135static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
44feb387 136 struct dm_cell_key *key)
991d9fa0 137{
34fbcf62 138 build_key(td, VIRTUAL, b, b + 1llu, key);
991d9fa0
JT
139}
140
141/*----------------------------------------------------------------*/
142
7d327fe0
JT
143#define THROTTLE_THRESHOLD (1 * HZ)
144
145struct throttle {
146 struct rw_semaphore lock;
147 unsigned long threshold;
148 bool throttle_applied;
149};
150
151static void throttle_init(struct throttle *t)
152{
153 init_rwsem(&t->lock);
154 t->throttle_applied = false;
155}
156
157static void throttle_work_start(struct throttle *t)
158{
159 t->threshold = jiffies + THROTTLE_THRESHOLD;
160}
161
162static void throttle_work_update(struct throttle *t)
163{
164 if (!t->throttle_applied && jiffies > t->threshold) {
165 down_write(&t->lock);
166 t->throttle_applied = true;
167 }
168}
169
170static void throttle_work_complete(struct throttle *t)
171{
172 if (t->throttle_applied) {
173 t->throttle_applied = false;
174 up_write(&t->lock);
175 }
176}
177
178static void throttle_lock(struct throttle *t)
179{
180 down_read(&t->lock);
181}
182
183static void throttle_unlock(struct throttle *t)
184{
185 up_read(&t->lock);
186}
187
188/*----------------------------------------------------------------*/
189
991d9fa0
JT
190/*
191 * A pool device ties together a metadata device and a data device. It
192 * also provides the interface for creating and destroying internal
193 * devices.
194 */
a24c2569 195struct dm_thin_new_mapping;
67e2e2b2 196
e49e5829 197/*
3e1a0699 198 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
e49e5829
JT
199 */
200enum pool_mode {
201 PM_WRITE, /* metadata may be changed */
3e1a0699 202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
e49e5829
JT
203 PM_READ_ONLY, /* metadata may not be changed */
204 PM_FAIL, /* all I/O fails */
205};
206
67e2e2b2 207struct pool_features {
e49e5829
JT
208 enum pool_mode mode;
209
9bc142dd
MS
210 bool zero_new_blocks:1;
211 bool discard_enabled:1;
212 bool discard_passdown:1;
787a996c 213 bool error_if_no_space:1;
67e2e2b2
JT
214};
215
e49e5829
JT
216struct thin_c;
217typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
a374bb21 218typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
e49e5829
JT
219typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
220
ac4c3f34
JT
221#define CELL_SORT_ARRAY_SIZE 8192
222
991d9fa0
JT
223struct pool {
224 struct list_head list;
225 struct dm_target *ti; /* Only set if a pool target is bound */
226
227 struct mapped_device *pool_md;
228 struct block_device *md_dev;
229 struct dm_pool_metadata *pmd;
230
991d9fa0 231 dm_block_t low_water_blocks;
55f2b8bd 232 uint32_t sectors_per_block;
f9a8e0cd 233 int sectors_per_block_shift;
991d9fa0 234
67e2e2b2 235 struct pool_features pf;
88a6621b 236 bool low_water_triggered:1; /* A dm event has been sent */
80e96c54 237 bool suspended:1;
991d9fa0 238
44feb387 239 struct dm_bio_prison *prison;
991d9fa0
JT
240 struct dm_kcopyd_client *copier;
241
242 struct workqueue_struct *wq;
7d327fe0 243 struct throttle throttle;
991d9fa0 244 struct work_struct worker;
905e51b3 245 struct delayed_work waker;
85ad643b 246 struct delayed_work no_space_timeout;
991d9fa0 247
905e51b3 248 unsigned long last_commit_jiffies;
55f2b8bd 249 unsigned ref_count;
991d9fa0
JT
250
251 spinlock_t lock;
991d9fa0
JT
252 struct bio_list deferred_flush_bios;
253 struct list_head prepared_mappings;
104655fd 254 struct list_head prepared_discards;
c140e1c4 255 struct list_head active_thins;
991d9fa0 256
44feb387
MS
257 struct dm_deferred_set *shared_read_ds;
258 struct dm_deferred_set *all_io_ds;
991d9fa0 259
a24c2569 260 struct dm_thin_new_mapping *next_mapping;
991d9fa0 261 mempool_t *mapping_pool;
e49e5829
JT
262
263 process_bio_fn process_bio;
264 process_bio_fn process_discard;
265
a374bb21
JT
266 process_cell_fn process_cell;
267 process_cell_fn process_discard_cell;
268
e49e5829
JT
269 process_mapping_fn process_prepared_mapping;
270 process_mapping_fn process_prepared_discard;
ac4c3f34 271
a822c83e 272 struct dm_bio_prison_cell **cell_sort_array;
991d9fa0
JT
273};
274
e49e5829 275static enum pool_mode get_pool_mode(struct pool *pool);
b5330655 276static void metadata_operation_failed(struct pool *pool, const char *op, int r);
e49e5829 277
991d9fa0
JT
278/*
279 * Target context for a pool.
280 */
281struct pool_c {
282 struct dm_target *ti;
283 struct pool *pool;
284 struct dm_dev *data_dev;
285 struct dm_dev *metadata_dev;
286 struct dm_target_callbacks callbacks;
287
288 dm_block_t low_water_blocks;
0424caa1
MS
289 struct pool_features requested_pf; /* Features requested during table load */
290 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
991d9fa0
JT
291};
292
293/*
294 * Target context for a thin.
295 */
296struct thin_c {
c140e1c4 297 struct list_head list;
991d9fa0 298 struct dm_dev *pool_dev;
2dd9c257 299 struct dm_dev *origin_dev;
e5aea7b4 300 sector_t origin_size;
991d9fa0
JT
301 dm_thin_id dev_id;
302
303 struct pool *pool;
304 struct dm_thin_device *td;
583024d2
MS
305 struct mapped_device *thin_md;
306
738211f7 307 bool requeue_mode:1;
c140e1c4 308 spinlock_t lock;
a374bb21 309 struct list_head deferred_cells;
c140e1c4
MS
310 struct bio_list deferred_bio_list;
311 struct bio_list retry_on_resume_list;
67324ea1 312 struct rb_root sort_bio_list; /* sorted list of deferred bios */
b10ebd34
JT
313
314 /*
315 * Ensures the thin is not destroyed until the worker has finished
316 * iterating the active_thins list.
317 */
318 atomic_t refcount;
319 struct completion can_destroy;
991d9fa0
JT
320};
321
322/*----------------------------------------------------------------*/
323
34fbcf62
JT
324/**
325 * __blkdev_issue_discard_async - queue a discard with async completion
326 * @bdev: blockdev to issue discard for
327 * @sector: start sector
328 * @nr_sects: number of sectors to discard
329 * @gfp_mask: memory allocation flags (for bio_alloc)
330 * @flags: BLKDEV_IFL_* flags to control behaviour
331 * @parent_bio: parent discard bio that all sub discards get chained to
332 *
333 * Description:
334 * Asynchronously issue a discard request for the sectors in question.
335 * NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap
336 * that is being kept local to DM thinp until the block changes to allow
337 * late bio splitting land upstream.
338 */
339static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
340 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
341 struct bio *parent_bio)
342{
343 struct request_queue *q = bdev_get_queue(bdev);
344 int type = REQ_WRITE | REQ_DISCARD;
345 unsigned int max_discard_sectors, granularity;
346 int alignment;
347 struct bio *bio;
348 int ret = 0;
349 struct blk_plug plug;
350
351 if (!q)
352 return -ENXIO;
353
354 if (!blk_queue_discard(q))
355 return -EOPNOTSUPP;
356
357 /* Zero-sector (unknown) and one-sector granularities are the same. */
358 granularity = max(q->limits.discard_granularity >> 9, 1U);
359 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
360
361 /*
362 * Ensure that max_discard_sectors is of the proper
363 * granularity, so that requests stay aligned after a split.
364 */
365 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
366 max_discard_sectors -= max_discard_sectors % granularity;
367 if (unlikely(!max_discard_sectors)) {
368 /* Avoid infinite loop below. Being cautious never hurts. */
369 return -EOPNOTSUPP;
370 }
371
372 if (flags & BLKDEV_DISCARD_SECURE) {
373 if (!blk_queue_secdiscard(q))
374 return -EOPNOTSUPP;
375 type |= REQ_SECURE;
376 }
377
378 blk_start_plug(&plug);
379 while (nr_sects) {
380 unsigned int req_sects;
381 sector_t end_sect, tmp;
382
383 /*
384 * Required bio_put occurs in bio_endio thanks to bio_chain below
385 */
386 bio = bio_alloc(gfp_mask, 1);
387 if (!bio) {
388 ret = -ENOMEM;
389 break;
390 }
391
392 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
393
394 /*
395 * If splitting a request, and the next starting sector would be
396 * misaligned, stop the discard at the previous aligned sector.
397 */
398 end_sect = sector + req_sects;
399 tmp = end_sect;
400 if (req_sects < nr_sects &&
401 sector_div(tmp, granularity) != alignment) {
402 end_sect = end_sect - alignment;
403 sector_div(end_sect, granularity);
404 end_sect = end_sect * granularity + alignment;
405 req_sects = end_sect - sector;
406 }
407
408 bio_chain(bio, parent_bio);
409
410 bio->bi_iter.bi_sector = sector;
411 bio->bi_bdev = bdev;
412
413 bio->bi_iter.bi_size = req_sects << 9;
414 nr_sects -= req_sects;
415 sector = end_sect;
416
417 submit_bio(type, bio);
418
419 /*
420 * We can loop for a long time in here, if someone does
421 * full device discards (like mkfs). Be nice and allow
422 * us to schedule out to avoid softlocking if preempt
423 * is disabled.
424 */
425 cond_resched();
426 }
427 blk_finish_plug(&plug);
428
429 return ret;
430}
431
432static bool block_size_is_power_of_two(struct pool *pool)
433{
434 return pool->sectors_per_block_shift >= 0;
435}
436
437static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
438{
439 return block_size_is_power_of_two(pool) ?
440 (b << pool->sectors_per_block_shift) :
441 (b * pool->sectors_per_block);
442}
443
444static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
445 struct bio *parent_bio)
446{
447 sector_t s = block_to_sectors(tc->pool, data_b);
448 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
449
450 return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
451 GFP_NOWAIT, 0, parent_bio);
452}
453
454/*----------------------------------------------------------------*/
455
025b9685
JT
456/*
457 * wake_worker() is used when new work is queued and when pool_resume is
458 * ready to continue deferred IO processing.
459 */
460static void wake_worker(struct pool *pool)
461{
462 queue_work(pool->wq, &pool->worker);
463}
464
465/*----------------------------------------------------------------*/
466
6beca5eb
JT
467static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
468 struct dm_bio_prison_cell **cell_result)
469{
470 int r;
471 struct dm_bio_prison_cell *cell_prealloc;
472
473 /*
474 * Allocate a cell from the prison's mempool.
475 * This might block but it can't fail.
476 */
477 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
478
479 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
480 if (r)
481 /*
482 * We reused an old cell; we can get rid of
483 * the new one.
484 */
485 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
486
487 return r;
488}
489
490static void cell_release(struct pool *pool,
491 struct dm_bio_prison_cell *cell,
492 struct bio_list *bios)
493{
494 dm_cell_release(pool->prison, cell, bios);
495 dm_bio_prison_free_cell(pool->prison, cell);
496}
497
2d759a46
JT
498static void cell_visit_release(struct pool *pool,
499 void (*fn)(void *, struct dm_bio_prison_cell *),
500 void *context,
501 struct dm_bio_prison_cell *cell)
502{
503 dm_cell_visit_release(pool->prison, fn, context, cell);
504 dm_bio_prison_free_cell(pool->prison, cell);
505}
506
6beca5eb
JT
507static void cell_release_no_holder(struct pool *pool,
508 struct dm_bio_prison_cell *cell,
509 struct bio_list *bios)
510{
511 dm_cell_release_no_holder(pool->prison, cell, bios);
512 dm_bio_prison_free_cell(pool->prison, cell);
513}
514
af91805a
MS
515static void cell_error_with_code(struct pool *pool,
516 struct dm_bio_prison_cell *cell, int error_code)
6beca5eb 517{
af91805a 518 dm_cell_error(pool->prison, cell, error_code);
6beca5eb
JT
519 dm_bio_prison_free_cell(pool->prison, cell);
520}
521
af91805a
MS
522static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
523{
524 cell_error_with_code(pool, cell, -EIO);
525}
526
a374bb21
JT
527static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
528{
529 cell_error_with_code(pool, cell, 0);
530}
531
532static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
533{
534 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
535}
536
6beca5eb
JT
537/*----------------------------------------------------------------*/
538
991d9fa0
JT
539/*
540 * A global list of pools that uses a struct mapped_device as a key.
541 */
542static struct dm_thin_pool_table {
543 struct mutex mutex;
544 struct list_head pools;
545} dm_thin_pool_table;
546
547static void pool_table_init(void)
548{
549 mutex_init(&dm_thin_pool_table.mutex);
550 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
551}
552
553static void __pool_table_insert(struct pool *pool)
554{
555 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
556 list_add(&pool->list, &dm_thin_pool_table.pools);
557}
558
559static void __pool_table_remove(struct pool *pool)
560{
561 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
562 list_del(&pool->list);
563}
564
565static struct pool *__pool_table_lookup(struct mapped_device *md)
566{
567 struct pool *pool = NULL, *tmp;
568
569 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
570
571 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
572 if (tmp->pool_md == md) {
573 pool = tmp;
574 break;
575 }
576 }
577
578 return pool;
579}
580
581static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
582{
583 struct pool *pool = NULL, *tmp;
584
585 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
586
587 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
588 if (tmp->md_dev == md_dev) {
589 pool = tmp;
590 break;
591 }
592 }
593
594 return pool;
595}
596
597/*----------------------------------------------------------------*/
598
a24c2569 599struct dm_thin_endio_hook {
eb2aa48d 600 struct thin_c *tc;
44feb387
MS
601 struct dm_deferred_entry *shared_read_entry;
602 struct dm_deferred_entry *all_io_entry;
a24c2569 603 struct dm_thin_new_mapping *overwrite_mapping;
67324ea1 604 struct rb_node rb_node;
34fbcf62 605 struct dm_bio_prison_cell *cell;
eb2aa48d
JT
606};
607
42d6a8ce
MS
608static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
609{
610 bio_list_merge(bios, master);
611 bio_list_init(master);
612}
613
614static void error_bio_list(struct bio_list *bios, int error)
991d9fa0
JT
615{
616 struct bio *bio;
42d6a8ce
MS
617
618 while ((bio = bio_list_pop(bios)))
619 bio_endio(bio, error);
620}
621
622static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
623{
991d9fa0 624 struct bio_list bios;
18adc577 625 unsigned long flags;
991d9fa0
JT
626
627 bio_list_init(&bios);
18adc577 628
c140e1c4 629 spin_lock_irqsave(&tc->lock, flags);
42d6a8ce 630 __merge_bio_list(&bios, master);
c140e1c4 631 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 632
42d6a8ce 633 error_bio_list(&bios, error);
991d9fa0
JT
634}
635
a374bb21
JT
636static void requeue_deferred_cells(struct thin_c *tc)
637{
638 struct pool *pool = tc->pool;
639 unsigned long flags;
640 struct list_head cells;
641 struct dm_bio_prison_cell *cell, *tmp;
642
643 INIT_LIST_HEAD(&cells);
644
645 spin_lock_irqsave(&tc->lock, flags);
646 list_splice_init(&tc->deferred_cells, &cells);
647 spin_unlock_irqrestore(&tc->lock, flags);
648
649 list_for_each_entry_safe(cell, tmp, &cells, user_list)
650 cell_requeue(pool, cell);
651}
652
991d9fa0
JT
653static void requeue_io(struct thin_c *tc)
654{
3e1a0699 655 struct bio_list bios;
42d6a8ce 656 unsigned long flags;
3e1a0699
JT
657
658 bio_list_init(&bios);
659
c140e1c4 660 spin_lock_irqsave(&tc->lock, flags);
42d6a8ce
MS
661 __merge_bio_list(&bios, &tc->deferred_bio_list);
662 __merge_bio_list(&bios, &tc->retry_on_resume_list);
c140e1c4 663 spin_unlock_irqrestore(&tc->lock, flags);
3e1a0699 664
42d6a8ce
MS
665 error_bio_list(&bios, DM_ENDIO_REQUEUE);
666 requeue_deferred_cells(tc);
3e1a0699
JT
667}
668
0a927c2f 669static void error_retry_list_with_code(struct pool *pool, int error)
c140e1c4
MS
670{
671 struct thin_c *tc;
672
673 rcu_read_lock();
674 list_for_each_entry_rcu(tc, &pool->active_thins, list)
0a927c2f 675 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
c140e1c4
MS
676 rcu_read_unlock();
677}
678
0a927c2f
MS
679static void error_retry_list(struct pool *pool)
680{
681 return error_retry_list_with_code(pool, -EIO);
682}
683
991d9fa0
JT
684/*
685 * This section of code contains the logic for processing a thin device's IO.
686 * Much of the code depends on pool object resources (lists, workqueues, etc)
687 * but most is exclusively called from the thin target rather than the thin-pool
688 * target.
689 */
690
691static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
692{
58f77a21 693 struct pool *pool = tc->pool;
4f024f37 694 sector_t block_nr = bio->bi_iter.bi_sector;
55f2b8bd 695
58f77a21
MS
696 if (block_size_is_power_of_two(pool))
697 block_nr >>= pool->sectors_per_block_shift;
f9a8e0cd 698 else
58f77a21 699 (void) sector_div(block_nr, pool->sectors_per_block);
55f2b8bd
MS
700
701 return block_nr;
991d9fa0
JT
702}
703
34fbcf62
JT
704/*
705 * Returns the _complete_ blocks that this bio covers.
706 */
707static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
708 dm_block_t *begin, dm_block_t *end)
709{
710 struct pool *pool = tc->pool;
711 sector_t b = bio->bi_iter.bi_sector;
712 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
713
714 b += pool->sectors_per_block - 1ull; /* so we round up */
715
716 if (block_size_is_power_of_two(pool)) {
717 b >>= pool->sectors_per_block_shift;
718 e >>= pool->sectors_per_block_shift;
719 } else {
720 (void) sector_div(b, pool->sectors_per_block);
721 (void) sector_div(e, pool->sectors_per_block);
722 }
723
724 if (e < b)
725 /* Can happen if the bio is within a single block. */
726 e = b;
727
728 *begin = b;
729 *end = e;
730}
731
991d9fa0
JT
732static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
733{
734 struct pool *pool = tc->pool;
4f024f37 735 sector_t bi_sector = bio->bi_iter.bi_sector;
991d9fa0
JT
736
737 bio->bi_bdev = tc->pool_dev->bdev;
58f77a21 738 if (block_size_is_power_of_two(pool))
4f024f37
KO
739 bio->bi_iter.bi_sector =
740 (block << pool->sectors_per_block_shift) |
741 (bi_sector & (pool->sectors_per_block - 1));
58f77a21 742 else
4f024f37 743 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
58f77a21 744 sector_div(bi_sector, pool->sectors_per_block);
991d9fa0
JT
745}
746
2dd9c257
JT
747static void remap_to_origin(struct thin_c *tc, struct bio *bio)
748{
749 bio->bi_bdev = tc->origin_dev->bdev;
750}
751
4afdd680
JT
752static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
753{
754 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
755 dm_thin_changed_this_transaction(tc->td);
756}
757
e8088073
JT
758static void inc_all_io_entry(struct pool *pool, struct bio *bio)
759{
760 struct dm_thin_endio_hook *h;
761
762 if (bio->bi_rw & REQ_DISCARD)
763 return;
764
59c3d2c6 765 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
e8088073
JT
766 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
767}
768
2dd9c257 769static void issue(struct thin_c *tc, struct bio *bio)
991d9fa0
JT
770{
771 struct pool *pool = tc->pool;
772 unsigned long flags;
773
e49e5829
JT
774 if (!bio_triggers_commit(tc, bio)) {
775 generic_make_request(bio);
776 return;
777 }
778
991d9fa0 779 /*
e49e5829
JT
780 * Complete bio with an error if earlier I/O caused changes to
781 * the metadata that can't be committed e.g, due to I/O errors
782 * on the metadata device.
991d9fa0 783 */
e49e5829
JT
784 if (dm_thin_aborted_changes(tc->td)) {
785 bio_io_error(bio);
786 return;
787 }
788
789 /*
790 * Batch together any bios that trigger commits and then issue a
791 * single commit for them in process_deferred_bios().
792 */
793 spin_lock_irqsave(&pool->lock, flags);
794 bio_list_add(&pool->deferred_flush_bios, bio);
795 spin_unlock_irqrestore(&pool->lock, flags);
991d9fa0
JT
796}
797
2dd9c257
JT
798static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
799{
800 remap_to_origin(tc, bio);
801 issue(tc, bio);
802}
803
804static void remap_and_issue(struct thin_c *tc, struct bio *bio,
805 dm_block_t block)
806{
807 remap(tc, bio, block);
808 issue(tc, bio);
809}
810
991d9fa0
JT
811/*----------------------------------------------------------------*/
812
813/*
814 * Bio endio functions.
815 */
a24c2569 816struct dm_thin_new_mapping {
991d9fa0
JT
817 struct list_head list;
818
7f214665 819 bool pass_discard:1;
34fbcf62 820 bool maybe_shared:1;
991d9fa0 821
50f3c3ef
JT
822 /*
823 * Track quiescing, copying and zeroing preparation actions. When this
824 * counter hits zero the block is prepared and can be inserted into the
825 * btree.
826 */
827 atomic_t prepare_actions;
828
7f214665 829 int err;
991d9fa0 830 struct thin_c *tc;
34fbcf62 831 dm_block_t virt_begin, virt_end;
991d9fa0 832 dm_block_t data_block;
34fbcf62 833 struct dm_bio_prison_cell *cell;
991d9fa0
JT
834
835 /*
836 * If the bio covers the whole area of a block then we can avoid
837 * zeroing or copying. Instead this bio is hooked. The bio will
838 * still be in the cell, so care has to be taken to avoid issuing
839 * the bio twice.
840 */
841 struct bio *bio;
842 bio_end_io_t *saved_bi_end_io;
843};
844
50f3c3ef 845static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
846{
847 struct pool *pool = m->tc->pool;
848
50f3c3ef 849 if (atomic_dec_and_test(&m->prepare_actions)) {
daec338b 850 list_add_tail(&m->list, &pool->prepared_mappings);
991d9fa0
JT
851 wake_worker(pool);
852 }
853}
854
e5aea7b4 855static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
856{
857 unsigned long flags;
991d9fa0
JT
858 struct pool *pool = m->tc->pool;
859
991d9fa0 860 spin_lock_irqsave(&pool->lock, flags);
50f3c3ef 861 __complete_mapping_preparation(m);
991d9fa0
JT
862 spin_unlock_irqrestore(&pool->lock, flags);
863}
864
e5aea7b4
JT
865static void copy_complete(int read_err, unsigned long write_err, void *context)
866{
867 struct dm_thin_new_mapping *m = context;
868
869 m->err = read_err || write_err ? -EIO : 0;
870 complete_mapping_preparation(m);
871}
872
991d9fa0
JT
873static void overwrite_endio(struct bio *bio, int err)
874{
59c3d2c6 875 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 876 struct dm_thin_new_mapping *m = h->overwrite_mapping;
991d9fa0 877
8b908f8e
MS
878 bio->bi_end_io = m->saved_bi_end_io;
879
991d9fa0 880 m->err = err;
e5aea7b4 881 complete_mapping_preparation(m);
991d9fa0
JT
882}
883
991d9fa0
JT
884/*----------------------------------------------------------------*/
885
886/*
887 * Workqueue.
888 */
889
890/*
891 * Prepared mapping jobs.
892 */
893
894/*
2d759a46
JT
895 * This sends the bios in the cell, except the original holder, back
896 * to the deferred_bios list.
991d9fa0 897 */
f286ba0e 898static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0 899{
991d9fa0
JT
900 struct pool *pool = tc->pool;
901 unsigned long flags;
902
c140e1c4
MS
903 spin_lock_irqsave(&tc->lock, flags);
904 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
905 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
906
907 wake_worker(pool);
908}
909
a374bb21
JT
910static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
911
2d759a46
JT
912struct remap_info {
913 struct thin_c *tc;
914 struct bio_list defer_bios;
915 struct bio_list issue_bios;
916};
917
918static void __inc_remap_and_issue_cell(void *context,
919 struct dm_bio_prison_cell *cell)
a374bb21 920{
2d759a46 921 struct remap_info *info = context;
a374bb21 922 struct bio *bio;
a374bb21 923
2d759a46 924 while ((bio = bio_list_pop(&cell->bios))) {
a374bb21 925 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
2d759a46 926 bio_list_add(&info->defer_bios, bio);
a374bb21 927 else {
2d759a46
JT
928 inc_all_io_entry(info->tc->pool, bio);
929
930 /*
931 * We can't issue the bios with the bio prison lock
932 * held, so we add them to a list to issue on
933 * return from this function.
934 */
935 bio_list_add(&info->issue_bios, bio);
a374bb21
JT
936 }
937 }
938}
939
2d759a46
JT
940static void inc_remap_and_issue_cell(struct thin_c *tc,
941 struct dm_bio_prison_cell *cell,
942 dm_block_t block)
943{
944 struct bio *bio;
945 struct remap_info info;
946
947 info.tc = tc;
948 bio_list_init(&info.defer_bios);
949 bio_list_init(&info.issue_bios);
950
951 /*
952 * We have to be careful to inc any bios we're about to issue
953 * before the cell is released, and avoid a race with new bios
954 * being added to the cell.
955 */
956 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
957 &info, cell);
958
959 while ((bio = bio_list_pop(&info.defer_bios)))
960 thin_defer_bio(tc, bio);
961
962 while ((bio = bio_list_pop(&info.issue_bios)))
963 remap_and_issue(info.tc, bio, block);
964}
965
e49e5829
JT
966static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
967{
6beca5eb 968 cell_error(m->tc->pool, m->cell);
e49e5829
JT
969 list_del(&m->list);
970 mempool_free(m, m->tc->pool->mapping_pool);
971}
025b9685 972
a24c2569 973static void process_prepared_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
974{
975 struct thin_c *tc = m->tc;
6beca5eb 976 struct pool *pool = tc->pool;
8b908f8e 977 struct bio *bio = m->bio;
991d9fa0
JT
978 int r;
979
991d9fa0 980 if (m->err) {
6beca5eb 981 cell_error(pool, m->cell);
905386f8 982 goto out;
991d9fa0
JT
983 }
984
985 /*
986 * Commit the prepared block into the mapping btree.
987 * Any I/O for this block arriving after this point will get
988 * remapped to it directly.
989 */
34fbcf62 990 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
991d9fa0 991 if (r) {
b5330655 992 metadata_operation_failed(pool, "dm_thin_insert_block", r);
6beca5eb 993 cell_error(pool, m->cell);
905386f8 994 goto out;
991d9fa0
JT
995 }
996
997 /*
998 * Release any bios held while the block was being provisioned.
999 * If we are processing a write bio that completely covers the block,
1000 * we already processed it so can ignore it now when processing
1001 * the bios in the cell.
1002 */
1003 if (bio) {
2d759a46 1004 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991d9fa0 1005 bio_endio(bio, 0);
2d759a46
JT
1006 } else {
1007 inc_all_io_entry(tc->pool, m->cell->holder);
1008 remap_and_issue(tc, m->cell->holder, m->data_block);
1009 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1010 }
991d9fa0 1011
905386f8 1012out:
991d9fa0 1013 list_del(&m->list);
6beca5eb 1014 mempool_free(m, pool->mapping_pool);
991d9fa0
JT
1015}
1016
34fbcf62
JT
1017/*----------------------------------------------------------------*/
1018
1019static void free_discard_mapping(struct dm_thin_new_mapping *m)
104655fd 1020{
104655fd 1021 struct thin_c *tc = m->tc;
34fbcf62
JT
1022 if (m->cell)
1023 cell_defer_no_holder(tc, m->cell);
1024 mempool_free(m, tc->pool->mapping_pool);
1025}
104655fd 1026
34fbcf62
JT
1027static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
1028{
e49e5829 1029 bio_io_error(m->bio);
34fbcf62
JT
1030 free_discard_mapping(m);
1031}
1032
1033static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
1034{
1035 bio_endio(m->bio, 0);
1036 free_discard_mapping(m);
1037}
1038
1039static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
1040{
1041 int r;
1042 struct thin_c *tc = m->tc;
1043
1044 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1045 if (r) {
1046 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1047 bio_io_error(m->bio);
1048 } else
1049 bio_endio(m->bio, 0);
1050
f286ba0e 1051 cell_defer_no_holder(tc, m->cell);
e49e5829
JT
1052 mempool_free(m, tc->pool->mapping_pool);
1053}
1054
34fbcf62 1055static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
e49e5829 1056{
34fbcf62
JT
1057 /*
1058 * We've already unmapped this range of blocks, but before we
1059 * passdown we have to check that these blocks are now unused.
1060 */
1061 int r;
1062 bool used = true;
e49e5829 1063 struct thin_c *tc = m->tc;
34fbcf62
JT
1064 struct pool *pool = tc->pool;
1065 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
104655fd 1066
34fbcf62
JT
1067 while (b != end) {
1068 /* find start of unmapped run */
1069 for (; b < end; b++) {
1070 r = dm_pool_block_is_used(pool->pmd, b, &used);
1071 if (r)
1072 return r;
e8088073 1073
34fbcf62
JT
1074 if (!used)
1075 break;
19fa1a67 1076 }
104655fd 1077
34fbcf62
JT
1078 if (b == end)
1079 break;
1080
1081 /* find end of run */
1082 for (e = b + 1; e != end; e++) {
1083 r = dm_pool_block_is_used(pool->pmd, e, &used);
1084 if (r)
1085 return r;
1086
1087 if (used)
1088 break;
1089 }
1090
1091 r = issue_discard(tc, b, e, m->bio);
1092 if (r)
1093 return r;
1094
1095 b = e;
1096 }
1097
1098 return 0;
104655fd
JT
1099}
1100
34fbcf62 1101static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
e49e5829
JT
1102{
1103 int r;
1104 struct thin_c *tc = m->tc;
34fbcf62 1105 struct pool *pool = tc->pool;
e49e5829 1106
34fbcf62 1107 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
e49e5829 1108 if (r)
34fbcf62
JT
1109 metadata_operation_failed(pool, "dm_thin_remove_range", r);
1110
1111 else if (m->maybe_shared)
1112 r = passdown_double_checking_shared_status(m);
1113 else
1114 r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
e49e5829 1115
34fbcf62
JT
1116 /*
1117 * Even if r is set, there could be sub discards in flight that we
1118 * need to wait for.
1119 */
1120 bio_endio(m->bio, r);
1121 cell_defer_no_holder(tc, m->cell);
1122 mempool_free(m, pool->mapping_pool);
e49e5829
JT
1123}
1124
104655fd 1125static void process_prepared(struct pool *pool, struct list_head *head,
e49e5829 1126 process_mapping_fn *fn)
991d9fa0
JT
1127{
1128 unsigned long flags;
1129 struct list_head maps;
a24c2569 1130 struct dm_thin_new_mapping *m, *tmp;
991d9fa0
JT
1131
1132 INIT_LIST_HEAD(&maps);
1133 spin_lock_irqsave(&pool->lock, flags);
104655fd 1134 list_splice_init(head, &maps);
991d9fa0
JT
1135 spin_unlock_irqrestore(&pool->lock, flags);
1136
1137 list_for_each_entry_safe(m, tmp, &maps, list)
e49e5829 1138 (*fn)(m);
991d9fa0
JT
1139}
1140
1141/*
1142 * Deferred bio jobs.
1143 */
104655fd 1144static int io_overlaps_block(struct pool *pool, struct bio *bio)
991d9fa0 1145{
4f024f37
KO
1146 return bio->bi_iter.bi_size ==
1147 (pool->sectors_per_block << SECTOR_SHIFT);
104655fd
JT
1148}
1149
1150static int io_overwrites_block(struct pool *pool, struct bio *bio)
1151{
1152 return (bio_data_dir(bio) == WRITE) &&
1153 io_overlaps_block(pool, bio);
991d9fa0
JT
1154}
1155
1156static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1157 bio_end_io_t *fn)
1158{
1159 *save = bio->bi_end_io;
1160 bio->bi_end_io = fn;
1161}
1162
1163static int ensure_next_mapping(struct pool *pool)
1164{
1165 if (pool->next_mapping)
1166 return 0;
1167
1168 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
1169
1170 return pool->next_mapping ? 0 : -ENOMEM;
1171}
1172
a24c2569 1173static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
991d9fa0 1174{
16961b04 1175 struct dm_thin_new_mapping *m = pool->next_mapping;
991d9fa0
JT
1176
1177 BUG_ON(!pool->next_mapping);
1178
16961b04
MS
1179 memset(m, 0, sizeof(struct dm_thin_new_mapping));
1180 INIT_LIST_HEAD(&m->list);
1181 m->bio = NULL;
1182
991d9fa0
JT
1183 pool->next_mapping = NULL;
1184
16961b04 1185 return m;
991d9fa0
JT
1186}
1187
e5aea7b4
JT
1188static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1189 sector_t begin, sector_t end)
1190{
1191 int r;
1192 struct dm_io_region to;
1193
1194 to.bdev = tc->pool_dev->bdev;
1195 to.sector = begin;
1196 to.count = end - begin;
1197
1198 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1199 if (r < 0) {
1200 DMERR_LIMIT("dm_kcopyd_zero() failed");
1201 copy_complete(1, 1, m);
1202 }
1203}
1204
452d7a62 1205static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
34fbcf62 1206 dm_block_t data_begin,
452d7a62
MS
1207 struct dm_thin_new_mapping *m)
1208{
1209 struct pool *pool = tc->pool;
1210 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1211
1212 h->overwrite_mapping = m;
1213 m->bio = bio;
1214 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1215 inc_all_io_entry(pool, bio);
34fbcf62 1216 remap_and_issue(tc, bio, data_begin);
452d7a62
MS
1217}
1218
e5aea7b4
JT
1219/*
1220 * A partial copy also needs to zero the uncopied region.
1221 */
991d9fa0 1222static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2dd9c257
JT
1223 struct dm_dev *origin, dm_block_t data_origin,
1224 dm_block_t data_dest,
e5aea7b4
JT
1225 struct dm_bio_prison_cell *cell, struct bio *bio,
1226 sector_t len)
991d9fa0
JT
1227{
1228 int r;
1229 struct pool *pool = tc->pool;
a24c2569 1230 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 1231
991d9fa0 1232 m->tc = tc;
34fbcf62
JT
1233 m->virt_begin = virt_block;
1234 m->virt_end = virt_block + 1u;
991d9fa0
JT
1235 m->data_block = data_dest;
1236 m->cell = cell;
991d9fa0 1237
e5aea7b4
JT
1238 /*
1239 * quiesce action + copy action + an extra reference held for the
1240 * duration of this function (we may need to inc later for a
1241 * partial zero).
1242 */
1243 atomic_set(&m->prepare_actions, 3);
1244
44feb387 1245 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
e5aea7b4 1246 complete_mapping_preparation(m); /* already quiesced */
991d9fa0
JT
1247
1248 /*
1249 * IO to pool_dev remaps to the pool target's data_dev.
1250 *
1251 * If the whole block of data is being overwritten, we can issue the
1252 * bio immediately. Otherwise we use kcopyd to clone the data first.
1253 */
452d7a62
MS
1254 if (io_overwrites_block(pool, bio))
1255 remap_and_issue_overwrite(tc, bio, data_dest, m);
1256 else {
991d9fa0
JT
1257 struct dm_io_region from, to;
1258
2dd9c257 1259 from.bdev = origin->bdev;
991d9fa0 1260 from.sector = data_origin * pool->sectors_per_block;
e5aea7b4 1261 from.count = len;
991d9fa0
JT
1262
1263 to.bdev = tc->pool_dev->bdev;
1264 to.sector = data_dest * pool->sectors_per_block;
e5aea7b4 1265 to.count = len;
991d9fa0
JT
1266
1267 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1268 0, copy_complete, m);
1269 if (r < 0) {
c397741c 1270 DMERR_LIMIT("dm_kcopyd_copy() failed");
e5aea7b4
JT
1271 copy_complete(1, 1, m);
1272
1273 /*
1274 * We allow the zero to be issued, to simplify the
1275 * error path. Otherwise we'd need to start
1276 * worrying about decrementing the prepare_actions
1277 * counter.
1278 */
1279 }
1280
1281 /*
1282 * Do we need to zero a tail region?
1283 */
1284 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1285 atomic_inc(&m->prepare_actions);
1286 ll_zero(tc, m,
1287 data_dest * pool->sectors_per_block + len,
1288 (data_dest + 1) * pool->sectors_per_block);
991d9fa0
JT
1289 }
1290 }
e5aea7b4
JT
1291
1292 complete_mapping_preparation(m); /* drop our ref */
991d9fa0
JT
1293}
1294
2dd9c257
JT
1295static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1296 dm_block_t data_origin, dm_block_t data_dest,
a24c2569 1297 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
1298{
1299 schedule_copy(tc, virt_block, tc->pool_dev,
e5aea7b4
JT
1300 data_origin, data_dest, cell, bio,
1301 tc->pool->sectors_per_block);
2dd9c257
JT
1302}
1303
991d9fa0 1304static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
a24c2569 1305 dm_block_t data_block, struct dm_bio_prison_cell *cell,
991d9fa0
JT
1306 struct bio *bio)
1307{
1308 struct pool *pool = tc->pool;
a24c2569 1309 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 1310
50f3c3ef 1311 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
991d9fa0 1312 m->tc = tc;
34fbcf62
JT
1313 m->virt_begin = virt_block;
1314 m->virt_end = virt_block + 1u;
991d9fa0
JT
1315 m->data_block = data_block;
1316 m->cell = cell;
991d9fa0
JT
1317
1318 /*
1319 * If the whole block of data is being overwritten or we are not
1320 * zeroing pre-existing data, we can issue the bio immediately.
1321 * Otherwise we use kcopyd to zero the data first.
1322 */
f8ae7525
MS
1323 if (pool->pf.zero_new_blocks) {
1324 if (io_overwrites_block(pool, bio))
1325 remap_and_issue_overwrite(tc, bio, data_block, m);
1326 else
1327 ll_zero(tc, m, data_block * pool->sectors_per_block,
1328 (data_block + 1) * pool->sectors_per_block);
1329 } else
991d9fa0 1330 process_prepared_mapping(m);
e5aea7b4 1331}
991d9fa0 1332
e5aea7b4
JT
1333static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1334 dm_block_t data_dest,
1335 struct dm_bio_prison_cell *cell, struct bio *bio)
1336{
1337 struct pool *pool = tc->pool;
1338 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1339 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1340
1341 if (virt_block_end <= tc->origin_size)
1342 schedule_copy(tc, virt_block, tc->origin_dev,
1343 virt_block, data_dest, cell, bio,
1344 pool->sectors_per_block);
1345
1346 else if (virt_block_begin < tc->origin_size)
1347 schedule_copy(tc, virt_block, tc->origin_dev,
1348 virt_block, data_dest, cell, bio,
1349 tc->origin_size - virt_block_begin);
1350
1351 else
1352 schedule_zero(tc, virt_block, data_dest, cell, bio);
991d9fa0
JT
1353}
1354
2c43fd26
JT
1355static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1356
1357static void check_for_space(struct pool *pool)
1358{
1359 int r;
1360 dm_block_t nr_free;
1361
1362 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1363 return;
1364
1365 r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1366 if (r)
1367 return;
1368
1369 if (nr_free)
1370 set_pool_mode(pool, PM_WRITE);
1371}
1372
e49e5829
JT
1373/*
1374 * A non-zero return indicates read_only or fail_io mode.
1375 * Many callers don't care about the return value.
1376 */
020cc3b5 1377static int commit(struct pool *pool)
e49e5829
JT
1378{
1379 int r;
1380
8d07e8a5 1381 if (get_pool_mode(pool) >= PM_READ_ONLY)
e49e5829
JT
1382 return -EINVAL;
1383
020cc3b5 1384 r = dm_pool_commit_metadata(pool->pmd);
b5330655
JT
1385 if (r)
1386 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
2c43fd26
JT
1387 else
1388 check_for_space(pool);
e49e5829
JT
1389
1390 return r;
1391}
1392
88a6621b
JT
1393static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1394{
1395 unsigned long flags;
1396
1397 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1398 DMWARN("%s: reached low water mark for data device: sending event.",
1399 dm_device_name(pool->pool_md));
1400 spin_lock_irqsave(&pool->lock, flags);
1401 pool->low_water_triggered = true;
1402 spin_unlock_irqrestore(&pool->lock, flags);
1403 dm_table_event(pool->ti->table);
1404 }
1405}
1406
991d9fa0
JT
1407static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1408{
1409 int r;
1410 dm_block_t free_blocks;
991d9fa0
JT
1411 struct pool *pool = tc->pool;
1412
3e1a0699 1413 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
8d30abff
JT
1414 return -EINVAL;
1415
991d9fa0 1416 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1417 if (r) {
1418 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
991d9fa0 1419 return r;
b5330655 1420 }
991d9fa0 1421
88a6621b 1422 check_low_water_mark(pool, free_blocks);
991d9fa0
JT
1423
1424 if (!free_blocks) {
94563bad
MS
1425 /*
1426 * Try to commit to see if that will free up some
1427 * more space.
1428 */
020cc3b5
JT
1429 r = commit(pool);
1430 if (r)
1431 return r;
991d9fa0 1432
94563bad 1433 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1434 if (r) {
1435 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
94563bad 1436 return r;
b5330655 1437 }
991d9fa0 1438
94563bad 1439 if (!free_blocks) {
3e1a0699 1440 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
94563bad 1441 return -ENOSPC;
991d9fa0
JT
1442 }
1443 }
1444
1445 r = dm_pool_alloc_data_block(pool->pmd, result);
4a02b34e 1446 if (r) {
b5330655 1447 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
991d9fa0 1448 return r;
4a02b34e 1449 }
991d9fa0
JT
1450
1451 return 0;
1452}
1453
1454/*
1455 * If we have run out of space, queue bios until the device is
1456 * resumed, presumably after having been reloaded with more space.
1457 */
1458static void retry_on_resume(struct bio *bio)
1459{
59c3d2c6 1460 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 1461 struct thin_c *tc = h->tc;
991d9fa0
JT
1462 unsigned long flags;
1463
c140e1c4
MS
1464 spin_lock_irqsave(&tc->lock, flags);
1465 bio_list_add(&tc->retry_on_resume_list, bio);
1466 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1467}
1468
af91805a 1469static int should_error_unserviceable_bio(struct pool *pool)
8c0f0e8c 1470{
3e1a0699
JT
1471 enum pool_mode m = get_pool_mode(pool);
1472
1473 switch (m) {
1474 case PM_WRITE:
1475 /* Shouldn't get here */
1476 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
af91805a 1477 return -EIO;
3e1a0699
JT
1478
1479 case PM_OUT_OF_DATA_SPACE:
af91805a 1480 return pool->pf.error_if_no_space ? -ENOSPC : 0;
3e1a0699
JT
1481
1482 case PM_READ_ONLY:
1483 case PM_FAIL:
af91805a 1484 return -EIO;
3e1a0699
JT
1485 default:
1486 /* Shouldn't get here */
1487 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
af91805a 1488 return -EIO;
3e1a0699
JT
1489 }
1490}
8c0f0e8c 1491
3e1a0699
JT
1492static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1493{
af91805a
MS
1494 int error = should_error_unserviceable_bio(pool);
1495
1496 if (error)
1497 bio_endio(bio, error);
6d16202b
MS
1498 else
1499 retry_on_resume(bio);
8c0f0e8c
MS
1500}
1501
399caddf 1502static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1503{
1504 struct bio *bio;
1505 struct bio_list bios;
af91805a 1506 int error;
991d9fa0 1507
af91805a
MS
1508 error = should_error_unserviceable_bio(pool);
1509 if (error) {
1510 cell_error_with_code(pool, cell, error);
3e1a0699
JT
1511 return;
1512 }
1513
991d9fa0 1514 bio_list_init(&bios);
6beca5eb 1515 cell_release(pool, cell, &bios);
991d9fa0 1516
9d094eeb
MS
1517 while ((bio = bio_list_pop(&bios)))
1518 retry_on_resume(bio);
991d9fa0
JT
1519}
1520
34fbcf62
JT
1521static void process_discard_cell_no_passdown(struct thin_c *tc,
1522 struct dm_bio_prison_cell *virt_cell)
104655fd 1523{
104655fd 1524 struct pool *pool = tc->pool;
34fbcf62 1525 struct dm_thin_new_mapping *m = get_next_mapping(pool);
104655fd 1526
34fbcf62
JT
1527 /*
1528 * We don't need to lock the data blocks, since there's no
1529 * passdown. We only lock data blocks for allocation and breaking sharing.
1530 */
1531 m->tc = tc;
1532 m->virt_begin = virt_cell->key.block_begin;
1533 m->virt_end = virt_cell->key.block_end;
1534 m->cell = virt_cell;
1535 m->bio = virt_cell->holder;
104655fd 1536
34fbcf62
JT
1537 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1538 pool->process_prepared_discard(m);
1539}
104655fd 1540
34fbcf62
JT
1541/*
1542 * FIXME: DM local hack to defer parent bios's end_io until we
1543 * _know_ all chained sub range discard bios have completed.
1544 * Will go away once late bio splitting lands upstream!
1545 */
1546static inline void __bio_inc_remaining(struct bio *bio)
1547{
1548 bio->bi_flags |= (1 << BIO_CHAIN);
1549 smp_mb__before_atomic();
1550 atomic_inc(&bio->__bi_remaining);
1551}
7a7e97ca 1552
34fbcf62
JT
1553static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1554 struct bio *bio)
1555{
1556 struct pool *pool = tc->pool;
1557
1558 int r;
1559 bool maybe_shared;
1560 struct dm_cell_key data_key;
1561 struct dm_bio_prison_cell *data_cell;
1562 struct dm_thin_new_mapping *m;
1563 dm_block_t virt_begin, virt_end, data_begin;
1564
1565 while (begin != end) {
1566 r = ensure_next_mapping(pool);
1567 if (r)
1568 /* we did our best */
1569 return;
e8088073 1570
34fbcf62
JT
1571 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1572 &data_begin, &maybe_shared);
1573 if (r)
104655fd 1574 /*
34fbcf62
JT
1575 * Silently fail, letting any mappings we've
1576 * created complete.
104655fd 1577 */
34fbcf62
JT
1578 break;
1579
1580 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
1581 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1582 /* contention, we'll give up with this range */
1583 begin = virt_end;
1584 continue;
104655fd 1585 }
104655fd 1586
104655fd 1587 /*
34fbcf62
JT
1588 * IO may still be going to the destination block. We must
1589 * quiesce before we can do the removal.
104655fd 1590 */
34fbcf62
JT
1591 m = get_next_mapping(pool);
1592 m->tc = tc;
1593 m->maybe_shared = maybe_shared;
1594 m->virt_begin = virt_begin;
1595 m->virt_end = virt_end;
1596 m->data_block = data_begin;
1597 m->cell = data_cell;
1598 m->bio = bio;
104655fd 1599
34fbcf62
JT
1600 /*
1601 * The parent bio must not complete before sub discard bios are
1602 * chained to it (see __blkdev_issue_discard_async's bio_chain)!
1603 *
1604 * This per-mapping bi_remaining increment is paired with
1605 * the implicit decrement that occurs via bio_endio() in
1606 * process_prepared_discard_{passdown,no_passdown}.
1607 */
1608 __bio_inc_remaining(bio);
1609 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1610 pool->process_prepared_discard(m);
1611
1612 begin = virt_end;
104655fd
JT
1613 }
1614}
1615
34fbcf62
JT
1616static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1617{
1618 struct bio *bio = virt_cell->holder;
1619 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1620
1621 /*
1622 * The virt_cell will only get freed once the origin bio completes.
1623 * This means it will remain locked while all the individual
1624 * passdown bios are in flight.
1625 */
1626 h->cell = virt_cell;
1627 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1628
1629 /*
1630 * We complete the bio now, knowing that the bi_remaining field
1631 * will prevent completion until the sub range discards have
1632 * completed.
1633 */
1634 bio_endio(bio, 0);
1635}
1636
a374bb21
JT
1637static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1638{
34fbcf62
JT
1639 dm_block_t begin, end;
1640 struct dm_cell_key virt_key;
1641 struct dm_bio_prison_cell *virt_cell;
a374bb21 1642
34fbcf62
JT
1643 get_bio_block_range(tc, bio, &begin, &end);
1644 if (begin == end) {
1645 /*
1646 * The discard covers less than a block.
1647 */
1648 bio_endio(bio, 0);
a374bb21 1649 return;
34fbcf62 1650 }
a374bb21 1651
34fbcf62
JT
1652 build_key(tc->td, VIRTUAL, begin, end, &virt_key);
1653 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1654 /*
1655 * Potential starvation issue: We're relying on the
1656 * fs/application being well behaved, and not trying to
1657 * send IO to a region at the same time as discarding it.
1658 * If they do this persistently then it's possible this
1659 * cell will never be granted.
1660 */
1661 return;
1662
1663 tc->pool->process_discard_cell(tc, virt_cell);
a374bb21
JT
1664}
1665
991d9fa0 1666static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
44feb387 1667 struct dm_cell_key *key,
991d9fa0 1668 struct dm_thin_lookup_result *lookup_result,
a24c2569 1669 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1670{
1671 int r;
1672 dm_block_t data_block;
d6fc2042 1673 struct pool *pool = tc->pool;
991d9fa0
JT
1674
1675 r = alloc_data_block(tc, &data_block);
1676 switch (r) {
1677 case 0:
2dd9c257
JT
1678 schedule_internal_copy(tc, block, lookup_result->block,
1679 data_block, cell, bio);
991d9fa0
JT
1680 break;
1681
1682 case -ENOSPC:
399caddf 1683 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1684 break;
1685
1686 default:
c397741c
MS
1687 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1688 __func__, r);
d6fc2042 1689 cell_error(pool, cell);
991d9fa0
JT
1690 break;
1691 }
1692}
1693
23ca2bb6
JT
1694static void __remap_and_issue_shared_cell(void *context,
1695 struct dm_bio_prison_cell *cell)
1696{
1697 struct remap_info *info = context;
1698 struct bio *bio;
1699
1700 while ((bio = bio_list_pop(&cell->bios))) {
1701 if ((bio_data_dir(bio) == WRITE) ||
1702 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1703 bio_list_add(&info->defer_bios, bio);
1704 else {
1705 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1706
1707 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1708 inc_all_io_entry(info->tc->pool, bio);
1709 bio_list_add(&info->issue_bios, bio);
1710 }
1711 }
1712}
1713
1714static void remap_and_issue_shared_cell(struct thin_c *tc,
1715 struct dm_bio_prison_cell *cell,
1716 dm_block_t block)
1717{
1718 struct bio *bio;
1719 struct remap_info info;
1720
1721 info.tc = tc;
1722 bio_list_init(&info.defer_bios);
1723 bio_list_init(&info.issue_bios);
1724
1725 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1726 &info, cell);
1727
1728 while ((bio = bio_list_pop(&info.defer_bios)))
1729 thin_defer_bio(tc, bio);
1730
1731 while ((bio = bio_list_pop(&info.issue_bios)))
1732 remap_and_issue(tc, bio, block);
1733}
1734
991d9fa0
JT
1735static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1736 dm_block_t block,
23ca2bb6
JT
1737 struct dm_thin_lookup_result *lookup_result,
1738 struct dm_bio_prison_cell *virt_cell)
991d9fa0 1739{
23ca2bb6 1740 struct dm_bio_prison_cell *data_cell;
991d9fa0 1741 struct pool *pool = tc->pool;
44feb387 1742 struct dm_cell_key key;
991d9fa0
JT
1743
1744 /*
1745 * If cell is already occupied, then sharing is already in the process
1746 * of being broken so we have nothing further to do here.
1747 */
1748 build_data_key(tc->td, lookup_result->block, &key);
23ca2bb6
JT
1749 if (bio_detain(pool, &key, bio, &data_cell)) {
1750 cell_defer_no_holder(tc, virt_cell);
991d9fa0 1751 return;
23ca2bb6 1752 }
991d9fa0 1753
23ca2bb6
JT
1754 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1755 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1756 cell_defer_no_holder(tc, virt_cell);
1757 } else {
59c3d2c6 1758 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
991d9fa0 1759
44feb387 1760 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
e8088073 1761 inc_all_io_entry(pool, bio);
991d9fa0 1762 remap_and_issue(tc, bio, lookup_result->block);
23ca2bb6
JT
1763
1764 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1765 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
991d9fa0
JT
1766 }
1767}
1768
1769static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
a24c2569 1770 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1771{
1772 int r;
1773 dm_block_t data_block;
6beca5eb 1774 struct pool *pool = tc->pool;
991d9fa0
JT
1775
1776 /*
1777 * Remap empty bios (flushes) immediately, without provisioning.
1778 */
4f024f37 1779 if (!bio->bi_iter.bi_size) {
6beca5eb 1780 inc_all_io_entry(pool, bio);
f286ba0e 1781 cell_defer_no_holder(tc, cell);
e8088073 1782
991d9fa0
JT
1783 remap_and_issue(tc, bio, 0);
1784 return;
1785 }
1786
1787 /*
1788 * Fill read bios with zeroes and complete them immediately.
1789 */
1790 if (bio_data_dir(bio) == READ) {
1791 zero_fill_bio(bio);
f286ba0e 1792 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1793 bio_endio(bio, 0);
1794 return;
1795 }
1796
1797 r = alloc_data_block(tc, &data_block);
1798 switch (r) {
1799 case 0:
2dd9c257
JT
1800 if (tc->origin_dev)
1801 schedule_external_copy(tc, block, data_block, cell, bio);
1802 else
1803 schedule_zero(tc, block, data_block, cell, bio);
991d9fa0
JT
1804 break;
1805
1806 case -ENOSPC:
399caddf 1807 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1808 break;
1809
1810 default:
c397741c
MS
1811 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1812 __func__, r);
6beca5eb 1813 cell_error(pool, cell);
991d9fa0
JT
1814 break;
1815 }
1816}
1817
a374bb21 1818static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1819{
1820 int r;
6beca5eb 1821 struct pool *pool = tc->pool;
a374bb21 1822 struct bio *bio = cell->holder;
991d9fa0 1823 dm_block_t block = get_bio_block(tc, bio);
991d9fa0
JT
1824 struct dm_thin_lookup_result lookup_result;
1825
a374bb21
JT
1826 if (tc->requeue_mode) {
1827 cell_requeue(pool, cell);
991d9fa0 1828 return;
a374bb21 1829 }
991d9fa0
JT
1830
1831 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1832 switch (r) {
1833 case 0:
23ca2bb6
JT
1834 if (lookup_result.shared)
1835 process_shared_bio(tc, bio, block, &lookup_result, cell);
1836 else {
6beca5eb 1837 inc_all_io_entry(pool, bio);
991d9fa0 1838 remap_and_issue(tc, bio, lookup_result.block);
a374bb21 1839 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
e8088073 1840 }
991d9fa0
JT
1841 break;
1842
1843 case -ENODATA:
2dd9c257 1844 if (bio_data_dir(bio) == READ && tc->origin_dev) {
6beca5eb 1845 inc_all_io_entry(pool, bio);
f286ba0e 1846 cell_defer_no_holder(tc, cell);
e8088073 1847
e5aea7b4
JT
1848 if (bio_end_sector(bio) <= tc->origin_size)
1849 remap_to_origin_and_issue(tc, bio);
1850
1851 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1852 zero_fill_bio(bio);
1853 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1854 remap_to_origin_and_issue(tc, bio);
1855
1856 } else {
1857 zero_fill_bio(bio);
1858 bio_endio(bio, 0);
1859 }
2dd9c257
JT
1860 } else
1861 provision_block(tc, bio, block, cell);
991d9fa0
JT
1862 break;
1863
1864 default:
c397741c
MS
1865 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1866 __func__, r);
f286ba0e 1867 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1868 bio_io_error(bio);
1869 break;
1870 }
1871}
1872
a374bb21
JT
1873static void process_bio(struct thin_c *tc, struct bio *bio)
1874{
1875 struct pool *pool = tc->pool;
1876 dm_block_t block = get_bio_block(tc, bio);
1877 struct dm_bio_prison_cell *cell;
1878 struct dm_cell_key key;
1879
1880 /*
1881 * If cell is already occupied, then the block is already
1882 * being provisioned so we have nothing further to do here.
1883 */
1884 build_virtual_key(tc->td, block, &key);
1885 if (bio_detain(pool, &key, bio, &cell))
1886 return;
1887
1888 process_cell(tc, cell);
1889}
1890
1891static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1892 struct dm_bio_prison_cell *cell)
e49e5829
JT
1893{
1894 int r;
1895 int rw = bio_data_dir(bio);
1896 dm_block_t block = get_bio_block(tc, bio);
1897 struct dm_thin_lookup_result lookup_result;
1898
1899 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1900 switch (r) {
1901 case 0:
a374bb21 1902 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
8c0f0e8c 1903 handle_unserviceable_bio(tc->pool, bio);
a374bb21
JT
1904 if (cell)
1905 cell_defer_no_holder(tc, cell);
1906 } else {
e8088073 1907 inc_all_io_entry(tc->pool, bio);
e49e5829 1908 remap_and_issue(tc, bio, lookup_result.block);
a374bb21
JT
1909 if (cell)
1910 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
e8088073 1911 }
e49e5829
JT
1912 break;
1913
1914 case -ENODATA:
a374bb21
JT
1915 if (cell)
1916 cell_defer_no_holder(tc, cell);
e49e5829 1917 if (rw != READ) {
8c0f0e8c 1918 handle_unserviceable_bio(tc->pool, bio);
e49e5829
JT
1919 break;
1920 }
1921
1922 if (tc->origin_dev) {
e8088073 1923 inc_all_io_entry(tc->pool, bio);
e49e5829
JT
1924 remap_to_origin_and_issue(tc, bio);
1925 break;
1926 }
1927
1928 zero_fill_bio(bio);
1929 bio_endio(bio, 0);
1930 break;
1931
1932 default:
c397741c
MS
1933 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1934 __func__, r);
a374bb21
JT
1935 if (cell)
1936 cell_defer_no_holder(tc, cell);
e49e5829
JT
1937 bio_io_error(bio);
1938 break;
1939 }
1940}
1941
a374bb21
JT
1942static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1943{
1944 __process_bio_read_only(tc, bio, NULL);
1945}
1946
1947static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1948{
1949 __process_bio_read_only(tc, cell->holder, cell);
1950}
1951
3e1a0699
JT
1952static void process_bio_success(struct thin_c *tc, struct bio *bio)
1953{
1954 bio_endio(bio, 0);
1955}
1956
e49e5829
JT
1957static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1958{
1959 bio_io_error(bio);
1960}
1961
a374bb21
JT
1962static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1963{
1964 cell_success(tc->pool, cell);
1965}
1966
1967static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1968{
1969 cell_error(tc->pool, cell);
1970}
1971
ac8c3f3d
JT
1972/*
1973 * FIXME: should we also commit due to size of transaction, measured in
1974 * metadata blocks?
1975 */
905e51b3
JT
1976static int need_commit_due_to_time(struct pool *pool)
1977{
0f30af98
MS
1978 return !time_in_range(jiffies, pool->last_commit_jiffies,
1979 pool->last_commit_jiffies + COMMIT_PERIOD);
905e51b3
JT
1980}
1981
67324ea1
MS
1982#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1983#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1984
1985static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1986{
1987 struct rb_node **rbp, *parent;
1988 struct dm_thin_endio_hook *pbd;
1989 sector_t bi_sector = bio->bi_iter.bi_sector;
1990
1991 rbp = &tc->sort_bio_list.rb_node;
1992 parent = NULL;
1993 while (*rbp) {
1994 parent = *rbp;
1995 pbd = thin_pbd(parent);
1996
1997 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1998 rbp = &(*rbp)->rb_left;
1999 else
2000 rbp = &(*rbp)->rb_right;
2001 }
2002
2003 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2004 rb_link_node(&pbd->rb_node, parent, rbp);
2005 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
2006}
2007
2008static void __extract_sorted_bios(struct thin_c *tc)
2009{
2010 struct rb_node *node;
2011 struct dm_thin_endio_hook *pbd;
2012 struct bio *bio;
2013
2014 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
2015 pbd = thin_pbd(node);
2016 bio = thin_bio(pbd);
2017
2018 bio_list_add(&tc->deferred_bio_list, bio);
2019 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
2020 }
2021
2022 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
2023}
2024
2025static void __sort_thin_deferred_bios(struct thin_c *tc)
2026{
2027 struct bio *bio;
2028 struct bio_list bios;
2029
2030 bio_list_init(&bios);
2031 bio_list_merge(&bios, &tc->deferred_bio_list);
2032 bio_list_init(&tc->deferred_bio_list);
2033
2034 /* Sort deferred_bio_list using rb-tree */
2035 while ((bio = bio_list_pop(&bios)))
2036 __thin_bio_rb_add(tc, bio);
2037
2038 /*
2039 * Transfer the sorted bios in sort_bio_list back to
2040 * deferred_bio_list to allow lockless submission of
2041 * all bios.
2042 */
2043 __extract_sorted_bios(tc);
2044}
2045
c140e1c4 2046static void process_thin_deferred_bios(struct thin_c *tc)
991d9fa0 2047{
c140e1c4 2048 struct pool *pool = tc->pool;
991d9fa0
JT
2049 unsigned long flags;
2050 struct bio *bio;
2051 struct bio_list bios;
67324ea1 2052 struct blk_plug plug;
8a01a6af 2053 unsigned count = 0;
991d9fa0 2054
c140e1c4 2055 if (tc->requeue_mode) {
42d6a8ce 2056 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
c140e1c4
MS
2057 return;
2058 }
2059
991d9fa0
JT
2060 bio_list_init(&bios);
2061
c140e1c4 2062 spin_lock_irqsave(&tc->lock, flags);
67324ea1
MS
2063
2064 if (bio_list_empty(&tc->deferred_bio_list)) {
2065 spin_unlock_irqrestore(&tc->lock, flags);
2066 return;
2067 }
2068
2069 __sort_thin_deferred_bios(tc);
2070
c140e1c4
MS
2071 bio_list_merge(&bios, &tc->deferred_bio_list);
2072 bio_list_init(&tc->deferred_bio_list);
67324ea1 2073
c140e1c4 2074 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 2075
67324ea1 2076 blk_start_plug(&plug);
991d9fa0 2077 while ((bio = bio_list_pop(&bios))) {
991d9fa0
JT
2078 /*
2079 * If we've got no free new_mapping structs, and processing
2080 * this bio might require one, we pause until there are some
2081 * prepared mappings to process.
2082 */
2083 if (ensure_next_mapping(pool)) {
c140e1c4
MS
2084 spin_lock_irqsave(&tc->lock, flags);
2085 bio_list_add(&tc->deferred_bio_list, bio);
2086 bio_list_merge(&tc->deferred_bio_list, &bios);
2087 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
2088 break;
2089 }
104655fd
JT
2090
2091 if (bio->bi_rw & REQ_DISCARD)
e49e5829 2092 pool->process_discard(tc, bio);
104655fd 2093 else
e49e5829 2094 pool->process_bio(tc, bio);
8a01a6af
JT
2095
2096 if ((count++ & 127) == 0) {
7d327fe0 2097 throttle_work_update(&pool->throttle);
8a01a6af
JT
2098 dm_pool_issue_prefetches(pool->pmd);
2099 }
991d9fa0 2100 }
67324ea1 2101 blk_finish_plug(&plug);
c140e1c4
MS
2102}
2103
ac4c3f34
JT
2104static int cmp_cells(const void *lhs, const void *rhs)
2105{
2106 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
2107 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
2108
2109 BUG_ON(!lhs_cell->holder);
2110 BUG_ON(!rhs_cell->holder);
2111
2112 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
2113 return -1;
2114
2115 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
2116 return 1;
2117
2118 return 0;
2119}
2120
2121static unsigned sort_cells(struct pool *pool, struct list_head *cells)
2122{
2123 unsigned count = 0;
2124 struct dm_bio_prison_cell *cell, *tmp;
2125
2126 list_for_each_entry_safe(cell, tmp, cells, user_list) {
2127 if (count >= CELL_SORT_ARRAY_SIZE)
2128 break;
2129
2130 pool->cell_sort_array[count++] = cell;
2131 list_del(&cell->user_list);
2132 }
2133
2134 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2135
2136 return count;
2137}
2138
a374bb21
JT
2139static void process_thin_deferred_cells(struct thin_c *tc)
2140{
2141 struct pool *pool = tc->pool;
2142 unsigned long flags;
2143 struct list_head cells;
ac4c3f34
JT
2144 struct dm_bio_prison_cell *cell;
2145 unsigned i, j, count;
a374bb21
JT
2146
2147 INIT_LIST_HEAD(&cells);
2148
2149 spin_lock_irqsave(&tc->lock, flags);
2150 list_splice_init(&tc->deferred_cells, &cells);
2151 spin_unlock_irqrestore(&tc->lock, flags);
2152
2153 if (list_empty(&cells))
2154 return;
2155
ac4c3f34
JT
2156 do {
2157 count = sort_cells(tc->pool, &cells);
a374bb21 2158
ac4c3f34
JT
2159 for (i = 0; i < count; i++) {
2160 cell = pool->cell_sort_array[i];
2161 BUG_ON(!cell->holder);
a374bb21 2162
ac4c3f34
JT
2163 /*
2164 * If we've got no free new_mapping structs, and processing
2165 * this bio might require one, we pause until there are some
2166 * prepared mappings to process.
2167 */
2168 if (ensure_next_mapping(pool)) {
2169 for (j = i; j < count; j++)
2170 list_add(&pool->cell_sort_array[j]->user_list, &cells);
2171
2172 spin_lock_irqsave(&tc->lock, flags);
2173 list_splice(&cells, &tc->deferred_cells);
2174 spin_unlock_irqrestore(&tc->lock, flags);
2175 return;
2176 }
2177
2178 if (cell->holder->bi_rw & REQ_DISCARD)
2179 pool->process_discard_cell(tc, cell);
2180 else
2181 pool->process_cell(tc, cell);
2182 }
2183 } while (!list_empty(&cells));
a374bb21
JT
2184}
2185
b10ebd34
JT
2186static void thin_get(struct thin_c *tc);
2187static void thin_put(struct thin_c *tc);
2188
2189/*
2190 * We can't hold rcu_read_lock() around code that can block. So we
2191 * find a thin with the rcu lock held; bump a refcount; then drop
2192 * the lock.
2193 */
2194static struct thin_c *get_first_thin(struct pool *pool)
2195{
2196 struct thin_c *tc = NULL;
2197
2198 rcu_read_lock();
2199 if (!list_empty(&pool->active_thins)) {
2200 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2201 thin_get(tc);
2202 }
2203 rcu_read_unlock();
2204
2205 return tc;
2206}
2207
2208static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2209{
2210 struct thin_c *old_tc = tc;
2211
2212 rcu_read_lock();
2213 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2214 thin_get(tc);
2215 thin_put(old_tc);
2216 rcu_read_unlock();
2217 return tc;
2218 }
2219 thin_put(old_tc);
2220 rcu_read_unlock();
2221
2222 return NULL;
2223}
2224
c140e1c4
MS
2225static void process_deferred_bios(struct pool *pool)
2226{
2227 unsigned long flags;
2228 struct bio *bio;
2229 struct bio_list bios;
2230 struct thin_c *tc;
2231
b10ebd34
JT
2232 tc = get_first_thin(pool);
2233 while (tc) {
a374bb21 2234 process_thin_deferred_cells(tc);
c140e1c4 2235 process_thin_deferred_bios(tc);
b10ebd34
JT
2236 tc = get_next_thin(pool, tc);
2237 }
991d9fa0
JT
2238
2239 /*
2240 * If there are any deferred flush bios, we must commit
2241 * the metadata before issuing them.
2242 */
2243 bio_list_init(&bios);
2244 spin_lock_irqsave(&pool->lock, flags);
2245 bio_list_merge(&bios, &pool->deferred_flush_bios);
2246 bio_list_init(&pool->deferred_flush_bios);
2247 spin_unlock_irqrestore(&pool->lock, flags);
2248
4d1662a3
MS
2249 if (bio_list_empty(&bios) &&
2250 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
991d9fa0
JT
2251 return;
2252
020cc3b5 2253 if (commit(pool)) {
991d9fa0
JT
2254 while ((bio = bio_list_pop(&bios)))
2255 bio_io_error(bio);
2256 return;
2257 }
905e51b3 2258 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
2259
2260 while ((bio = bio_list_pop(&bios)))
2261 generic_make_request(bio);
2262}
2263
2264static void do_worker(struct work_struct *ws)
2265{
2266 struct pool *pool = container_of(ws, struct pool, worker);
2267
7d327fe0 2268 throttle_work_start(&pool->throttle);
8a01a6af 2269 dm_pool_issue_prefetches(pool->pmd);
7d327fe0 2270 throttle_work_update(&pool->throttle);
e49e5829 2271 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
7d327fe0 2272 throttle_work_update(&pool->throttle);
e49e5829 2273 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
7d327fe0 2274 throttle_work_update(&pool->throttle);
991d9fa0 2275 process_deferred_bios(pool);
7d327fe0 2276 throttle_work_complete(&pool->throttle);
991d9fa0
JT
2277}
2278
905e51b3
JT
2279/*
2280 * We want to commit periodically so that not too much
2281 * unwritten data builds up.
2282 */
2283static void do_waker(struct work_struct *ws)
2284{
2285 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2286 wake_worker(pool);
2287 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2288}
2289
bcc696fa
MS
2290static void notify_of_pool_mode_change_to_oods(struct pool *pool);
2291
85ad643b
JT
2292/*
2293 * We're holding onto IO to allow userland time to react. After the
2294 * timeout either the pool will have been resized (and thus back in
bcc696fa 2295 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
85ad643b
JT
2296 */
2297static void do_no_space_timeout(struct work_struct *ws)
2298{
2299 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2300 no_space_timeout);
2301
bcc696fa
MS
2302 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2303 pool->pf.error_if_no_space = true;
2304 notify_of_pool_mode_change_to_oods(pool);
0a927c2f 2305 error_retry_list_with_code(pool, -ENOSPC);
bcc696fa 2306 }
85ad643b
JT
2307}
2308
991d9fa0
JT
2309/*----------------------------------------------------------------*/
2310
e7a3e871 2311struct pool_work {
738211f7 2312 struct work_struct worker;
e7a3e871
JT
2313 struct completion complete;
2314};
2315
2316static struct pool_work *to_pool_work(struct work_struct *ws)
2317{
2318 return container_of(ws, struct pool_work, worker);
2319}
2320
2321static void pool_work_complete(struct pool_work *pw)
2322{
2323 complete(&pw->complete);
2324}
738211f7 2325
e7a3e871
JT
2326static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2327 void (*fn)(struct work_struct *))
2328{
2329 INIT_WORK_ONSTACK(&pw->worker, fn);
2330 init_completion(&pw->complete);
2331 queue_work(pool->wq, &pw->worker);
2332 wait_for_completion(&pw->complete);
2333}
2334
2335/*----------------------------------------------------------------*/
2336
2337struct noflush_work {
2338 struct pool_work pw;
2339 struct thin_c *tc;
738211f7
JT
2340};
2341
e7a3e871 2342static struct noflush_work *to_noflush(struct work_struct *ws)
738211f7 2343{
e7a3e871 2344 return container_of(to_pool_work(ws), struct noflush_work, pw);
738211f7
JT
2345}
2346
2347static void do_noflush_start(struct work_struct *ws)
2348{
e7a3e871 2349 struct noflush_work *w = to_noflush(ws);
738211f7
JT
2350 w->tc->requeue_mode = true;
2351 requeue_io(w->tc);
e7a3e871 2352 pool_work_complete(&w->pw);
738211f7
JT
2353}
2354
2355static void do_noflush_stop(struct work_struct *ws)
2356{
e7a3e871 2357 struct noflush_work *w = to_noflush(ws);
738211f7 2358 w->tc->requeue_mode = false;
e7a3e871 2359 pool_work_complete(&w->pw);
738211f7
JT
2360}
2361
2362static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2363{
2364 struct noflush_work w;
2365
738211f7 2366 w.tc = tc;
e7a3e871 2367 pool_work_wait(&w.pw, tc->pool, fn);
738211f7
JT
2368}
2369
2370/*----------------------------------------------------------------*/
2371
e49e5829
JT
2372static enum pool_mode get_pool_mode(struct pool *pool)
2373{
2374 return pool->pf.mode;
2375}
2376
3e1a0699
JT
2377static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2378{
2379 dm_table_event(pool->ti->table);
2380 DMINFO("%s: switching pool to %s mode",
2381 dm_device_name(pool->pool_md), new_mode);
2382}
2383
bcc696fa
MS
2384static void notify_of_pool_mode_change_to_oods(struct pool *pool)
2385{
2386 if (!pool->pf.error_if_no_space)
2387 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
2388 else
2389 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
2390}
2391
34fbcf62
JT
2392static bool passdown_enabled(struct pool_c *pt)
2393{
2394 return pt->adjusted_pf.discard_passdown;
2395}
2396
2397static void set_discard_callbacks(struct pool *pool)
2398{
2399 struct pool_c *pt = pool->ti->private;
2400
2401 if (passdown_enabled(pt)) {
2402 pool->process_discard_cell = process_discard_cell_passdown;
2403 pool->process_prepared_discard = process_prepared_discard_passdown;
2404 } else {
2405 pool->process_discard_cell = process_discard_cell_no_passdown;
2406 pool->process_prepared_discard = process_prepared_discard_no_passdown;
2407 }
2408}
2409
8b64e881 2410static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
e49e5829 2411{
cdc2b415 2412 struct pool_c *pt = pool->ti->private;
07f2b6e0
MS
2413 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2414 enum pool_mode old_mode = get_pool_mode(pool);
80c57893 2415 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
07f2b6e0
MS
2416
2417 /*
2418 * Never allow the pool to transition to PM_WRITE mode if user
2419 * intervention is required to verify metadata and data consistency.
2420 */
2421 if (new_mode == PM_WRITE && needs_check) {
2422 DMERR("%s: unable to switch pool to write mode until repaired.",
2423 dm_device_name(pool->pool_md));
2424 if (old_mode != new_mode)
2425 new_mode = old_mode;
2426 else
2427 new_mode = PM_READ_ONLY;
2428 }
2429 /*
2430 * If we were in PM_FAIL mode, rollback of metadata failed. We're
2431 * not going to recover without a thin_repair. So we never let the
2432 * pool move out of the old mode.
2433 */
2434 if (old_mode == PM_FAIL)
2435 new_mode = old_mode;
e49e5829 2436
8b64e881 2437 switch (new_mode) {
e49e5829 2438 case PM_FAIL:
8b64e881 2439 if (old_mode != new_mode)
3e1a0699 2440 notify_of_pool_mode_change(pool, "failure");
5383ef3a 2441 dm_pool_metadata_read_only(pool->pmd);
e49e5829
JT
2442 pool->process_bio = process_bio_fail;
2443 pool->process_discard = process_bio_fail;
a374bb21
JT
2444 pool->process_cell = process_cell_fail;
2445 pool->process_discard_cell = process_cell_fail;
e49e5829
JT
2446 pool->process_prepared_mapping = process_prepared_mapping_fail;
2447 pool->process_prepared_discard = process_prepared_discard_fail;
3e1a0699
JT
2448
2449 error_retry_list(pool);
e49e5829
JT
2450 break;
2451
2452 case PM_READ_ONLY:
8b64e881 2453 if (old_mode != new_mode)
3e1a0699
JT
2454 notify_of_pool_mode_change(pool, "read-only");
2455 dm_pool_metadata_read_only(pool->pmd);
2456 pool->process_bio = process_bio_read_only;
2457 pool->process_discard = process_bio_success;
a374bb21
JT
2458 pool->process_cell = process_cell_read_only;
2459 pool->process_discard_cell = process_cell_success;
3e1a0699 2460 pool->process_prepared_mapping = process_prepared_mapping_fail;
34fbcf62 2461 pool->process_prepared_discard = process_prepared_discard_success;
3e1a0699
JT
2462
2463 error_retry_list(pool);
2464 break;
2465
2466 case PM_OUT_OF_DATA_SPACE:
2467 /*
2468 * Ideally we'd never hit this state; the low water mark
2469 * would trigger userland to extend the pool before we
2470 * completely run out of data space. However, many small
2471 * IOs to unprovisioned space can consume data space at an
2472 * alarming rate. Adjust your low water mark if you're
2473 * frequently seeing this mode.
2474 */
2475 if (old_mode != new_mode)
bcc696fa 2476 notify_of_pool_mode_change_to_oods(pool);
3e1a0699 2477 pool->process_bio = process_bio_read_only;
a374bb21
JT
2478 pool->process_discard = process_discard_bio;
2479 pool->process_cell = process_cell_read_only;
3e1a0699 2480 pool->process_prepared_mapping = process_prepared_mapping;
34fbcf62 2481 set_discard_callbacks(pool);
85ad643b 2482
80c57893
MS
2483 if (!pool->pf.error_if_no_space && no_space_timeout)
2484 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
e49e5829
JT
2485 break;
2486
2487 case PM_WRITE:
8b64e881 2488 if (old_mode != new_mode)
3e1a0699 2489 notify_of_pool_mode_change(pool, "write");
9b7aaa64 2490 dm_pool_metadata_read_write(pool->pmd);
e49e5829 2491 pool->process_bio = process_bio;
a374bb21
JT
2492 pool->process_discard = process_discard_bio;
2493 pool->process_cell = process_cell;
e49e5829 2494 pool->process_prepared_mapping = process_prepared_mapping;
34fbcf62 2495 set_discard_callbacks(pool);
e49e5829
JT
2496 break;
2497 }
8b64e881
MS
2498
2499 pool->pf.mode = new_mode;
cdc2b415
MS
2500 /*
2501 * The pool mode may have changed, sync it so bind_control_target()
2502 * doesn't cause an unexpected mode transition on resume.
2503 */
2504 pt->adjusted_pf.mode = new_mode;
e49e5829
JT
2505}
2506
07f2b6e0 2507static void abort_transaction(struct pool *pool)
b5330655 2508{
07f2b6e0
MS
2509 const char *dev_name = dm_device_name(pool->pool_md);
2510
2511 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2512 if (dm_pool_abort_metadata(pool->pmd)) {
2513 DMERR("%s: failed to abort metadata transaction", dev_name);
2514 set_pool_mode(pool, PM_FAIL);
2515 }
2516
2517 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2518 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2519 set_pool_mode(pool, PM_FAIL);
2520 }
2521}
399caddf 2522
07f2b6e0
MS
2523static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2524{
b5330655
JT
2525 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2526 dm_device_name(pool->pool_md), op, r);
2527
07f2b6e0 2528 abort_transaction(pool);
b5330655
JT
2529 set_pool_mode(pool, PM_READ_ONLY);
2530}
2531
e49e5829
JT
2532/*----------------------------------------------------------------*/
2533
991d9fa0
JT
2534/*
2535 * Mapping functions.
2536 */
2537
2538/*
2539 * Called only while mapping a thin bio to hand it over to the workqueue.
2540 */
2541static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2542{
2543 unsigned long flags;
2544 struct pool *pool = tc->pool;
2545
c140e1c4
MS
2546 spin_lock_irqsave(&tc->lock, flags);
2547 bio_list_add(&tc->deferred_bio_list, bio);
2548 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
2549
2550 wake_worker(pool);
2551}
2552
7d327fe0
JT
2553static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2554{
2555 struct pool *pool = tc->pool;
2556
2557 throttle_lock(&pool->throttle);
2558 thin_defer_bio(tc, bio);
2559 throttle_unlock(&pool->throttle);
2560}
2561
a374bb21
JT
2562static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2563{
2564 unsigned long flags;
2565 struct pool *pool = tc->pool;
2566
2567 throttle_lock(&pool->throttle);
2568 spin_lock_irqsave(&tc->lock, flags);
2569 list_add_tail(&cell->user_list, &tc->deferred_cells);
2570 spin_unlock_irqrestore(&tc->lock, flags);
2571 throttle_unlock(&pool->throttle);
2572
2573 wake_worker(pool);
2574}
2575
59c3d2c6 2576static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
eb2aa48d 2577{
59c3d2c6 2578 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d
JT
2579
2580 h->tc = tc;
2581 h->shared_read_entry = NULL;
e8088073 2582 h->all_io_entry = NULL;
eb2aa48d 2583 h->overwrite_mapping = NULL;
34fbcf62 2584 h->cell = NULL;
eb2aa48d
JT
2585}
2586
991d9fa0
JT
2587/*
2588 * Non-blocking function called from the thin target's map function.
2589 */
7de3ee57 2590static int thin_bio_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
2591{
2592 int r;
2593 struct thin_c *tc = ti->private;
2594 dm_block_t block = get_bio_block(tc, bio);
2595 struct dm_thin_device *td = tc->td;
2596 struct dm_thin_lookup_result result;
a374bb21 2597 struct dm_bio_prison_cell *virt_cell, *data_cell;
e8088073 2598 struct dm_cell_key key;
991d9fa0 2599
59c3d2c6 2600 thin_hook_bio(tc, bio);
e49e5829 2601
738211f7
JT
2602 if (tc->requeue_mode) {
2603 bio_endio(bio, DM_ENDIO_REQUEUE);
2604 return DM_MAPIO_SUBMITTED;
2605 }
2606
e49e5829
JT
2607 if (get_pool_mode(tc->pool) == PM_FAIL) {
2608 bio_io_error(bio);
2609 return DM_MAPIO_SUBMITTED;
2610 }
2611
104655fd 2612 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
7d327fe0 2613 thin_defer_bio_with_throttle(tc, bio);
991d9fa0
JT
2614 return DM_MAPIO_SUBMITTED;
2615 }
2616
c822ed96
JT
2617 /*
2618 * We must hold the virtual cell before doing the lookup, otherwise
2619 * there's a race with discard.
2620 */
2621 build_virtual_key(tc->td, block, &key);
a374bb21 2622 if (bio_detain(tc->pool, &key, bio, &virt_cell))
c822ed96
JT
2623 return DM_MAPIO_SUBMITTED;
2624
991d9fa0
JT
2625 r = dm_thin_find_block(td, block, 0, &result);
2626
2627 /*
2628 * Note that we defer readahead too.
2629 */
2630 switch (r) {
2631 case 0:
2632 if (unlikely(result.shared)) {
2633 /*
2634 * We have a race condition here between the
2635 * result.shared value returned by the lookup and
2636 * snapshot creation, which may cause new
2637 * sharing.
2638 *
2639 * To avoid this always quiesce the origin before
2640 * taking the snap. You want to do this anyway to
2641 * ensure a consistent application view
2642 * (i.e. lockfs).
2643 *
2644 * More distant ancestors are irrelevant. The
2645 * shared flag will be set in their case.
2646 */
a374bb21 2647 thin_defer_cell(tc, virt_cell);
e8088073 2648 return DM_MAPIO_SUBMITTED;
991d9fa0 2649 }
e8088073 2650
e8088073 2651 build_data_key(tc->td, result.block, &key);
a374bb21
JT
2652 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2653 cell_defer_no_holder(tc, virt_cell);
e8088073
JT
2654 return DM_MAPIO_SUBMITTED;
2655 }
2656
2657 inc_all_io_entry(tc->pool, bio);
a374bb21
JT
2658 cell_defer_no_holder(tc, data_cell);
2659 cell_defer_no_holder(tc, virt_cell);
e8088073
JT
2660
2661 remap(tc, bio, result.block);
2662 return DM_MAPIO_REMAPPED;
991d9fa0
JT
2663
2664 case -ENODATA:
e49e5829 2665 case -EWOULDBLOCK:
a374bb21 2666 thin_defer_cell(tc, virt_cell);
2aab3850 2667 return DM_MAPIO_SUBMITTED;
e49e5829
JT
2668
2669 default:
2670 /*
2671 * Must always call bio_io_error on failure.
2672 * dm_thin_find_block can fail with -EINVAL if the
2673 * pool is switched to fail-io mode.
2674 */
2675 bio_io_error(bio);
a374bb21 2676 cell_defer_no_holder(tc, virt_cell);
2aab3850 2677 return DM_MAPIO_SUBMITTED;
991d9fa0 2678 }
991d9fa0
JT
2679}
2680
2681static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2682{
991d9fa0 2683 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
760fe67e 2684 struct request_queue *q;
991d9fa0 2685
760fe67e
MS
2686 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2687 return 1;
991d9fa0 2688
760fe67e
MS
2689 q = bdev_get_queue(pt->data_dev->bdev);
2690 return bdi_congested(&q->backing_dev_info, bdi_bits);
991d9fa0
JT
2691}
2692
c140e1c4 2693static void requeue_bios(struct pool *pool)
991d9fa0 2694{
c140e1c4
MS
2695 unsigned long flags;
2696 struct thin_c *tc;
2697
2698 rcu_read_lock();
2699 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2700 spin_lock_irqsave(&tc->lock, flags);
2701 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2702 bio_list_init(&tc->retry_on_resume_list);
2703 spin_unlock_irqrestore(&tc->lock, flags);
2704 }
2705 rcu_read_unlock();
991d9fa0
JT
2706}
2707
2708/*----------------------------------------------------------------
2709 * Binding of control targets to a pool object
2710 *--------------------------------------------------------------*/
9bc142dd
MS
2711static bool data_dev_supports_discard(struct pool_c *pt)
2712{
2713 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2714
2715 return q && blk_queue_discard(q);
2716}
2717
58051b94
JT
2718static bool is_factor(sector_t block_size, uint32_t n)
2719{
2720 return !sector_div(block_size, n);
2721}
2722
9bc142dd
MS
2723/*
2724 * If discard_passdown was enabled verify that the data device
0424caa1 2725 * supports discards. Disable discard_passdown if not.
9bc142dd 2726 */
0424caa1 2727static void disable_passdown_if_not_supported(struct pool_c *pt)
9bc142dd 2728{
0424caa1
MS
2729 struct pool *pool = pt->pool;
2730 struct block_device *data_bdev = pt->data_dev->bdev;
2731 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
0424caa1 2732 const char *reason = NULL;
9bc142dd
MS
2733 char buf[BDEVNAME_SIZE];
2734
0424caa1 2735 if (!pt->adjusted_pf.discard_passdown)
9bc142dd
MS
2736 return;
2737
0424caa1
MS
2738 if (!data_dev_supports_discard(pt))
2739 reason = "discard unsupported";
2740
2741 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2742 reason = "max discard sectors smaller than a block";
9bc142dd 2743
0424caa1
MS
2744 if (reason) {
2745 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2746 pt->adjusted_pf.discard_passdown = false;
2747 }
9bc142dd
MS
2748}
2749
991d9fa0
JT
2750static int bind_control_target(struct pool *pool, struct dm_target *ti)
2751{
2752 struct pool_c *pt = ti->private;
2753
e49e5829 2754 /*
9b7aaa64 2755 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
e49e5829 2756 */
07f2b6e0 2757 enum pool_mode old_mode = get_pool_mode(pool);
0424caa1 2758 enum pool_mode new_mode = pt->adjusted_pf.mode;
e49e5829 2759
8b64e881
MS
2760 /*
2761 * Don't change the pool's mode until set_pool_mode() below.
2762 * Otherwise the pool's process_* function pointers may
2763 * not match the desired pool mode.
2764 */
2765 pt->adjusted_pf.mode = old_mode;
2766
2767 pool->ti = ti;
2768 pool->pf = pt->adjusted_pf;
2769 pool->low_water_blocks = pt->low_water_blocks;
2770
9bc142dd 2771 set_pool_mode(pool, new_mode);
f402693d 2772
991d9fa0
JT
2773 return 0;
2774}
2775
2776static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2777{
2778 if (pool->ti == ti)
2779 pool->ti = NULL;
2780}
2781
2782/*----------------------------------------------------------------
2783 * Pool creation
2784 *--------------------------------------------------------------*/
67e2e2b2
JT
2785/* Initialize pool features. */
2786static void pool_features_init(struct pool_features *pf)
2787{
e49e5829 2788 pf->mode = PM_WRITE;
9bc142dd
MS
2789 pf->zero_new_blocks = true;
2790 pf->discard_enabled = true;
2791 pf->discard_passdown = true;
787a996c 2792 pf->error_if_no_space = false;
67e2e2b2
JT
2793}
2794
991d9fa0
JT
2795static void __pool_destroy(struct pool *pool)
2796{
2797 __pool_table_remove(pool);
2798
a822c83e 2799 vfree(pool->cell_sort_array);
991d9fa0
JT
2800 if (dm_pool_metadata_close(pool->pmd) < 0)
2801 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2802
44feb387 2803 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2804 dm_kcopyd_client_destroy(pool->copier);
2805
2806 if (pool->wq)
2807 destroy_workqueue(pool->wq);
2808
2809 if (pool->next_mapping)
2810 mempool_free(pool->next_mapping, pool->mapping_pool);
2811 mempool_destroy(pool->mapping_pool);
44feb387
MS
2812 dm_deferred_set_destroy(pool->shared_read_ds);
2813 dm_deferred_set_destroy(pool->all_io_ds);
991d9fa0
JT
2814 kfree(pool);
2815}
2816
a24c2569 2817static struct kmem_cache *_new_mapping_cache;
a24c2569 2818
991d9fa0
JT
2819static struct pool *pool_create(struct mapped_device *pool_md,
2820 struct block_device *metadata_dev,
e49e5829
JT
2821 unsigned long block_size,
2822 int read_only, char **error)
991d9fa0
JT
2823{
2824 int r;
2825 void *err_p;
2826 struct pool *pool;
2827 struct dm_pool_metadata *pmd;
e49e5829 2828 bool format_device = read_only ? false : true;
991d9fa0 2829
e49e5829 2830 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
991d9fa0
JT
2831 if (IS_ERR(pmd)) {
2832 *error = "Error creating metadata object";
2833 return (struct pool *)pmd;
2834 }
2835
2836 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2837 if (!pool) {
2838 *error = "Error allocating memory for pool";
2839 err_p = ERR_PTR(-ENOMEM);
2840 goto bad_pool;
2841 }
2842
2843 pool->pmd = pmd;
2844 pool->sectors_per_block = block_size;
f9a8e0cd
MP
2845 if (block_size & (block_size - 1))
2846 pool->sectors_per_block_shift = -1;
2847 else
2848 pool->sectors_per_block_shift = __ffs(block_size);
991d9fa0 2849 pool->low_water_blocks = 0;
67e2e2b2 2850 pool_features_init(&pool->pf);
a195db2d 2851 pool->prison = dm_bio_prison_create();
991d9fa0
JT
2852 if (!pool->prison) {
2853 *error = "Error creating pool's bio prison";
2854 err_p = ERR_PTR(-ENOMEM);
2855 goto bad_prison;
2856 }
2857
df5d2e90 2858 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
991d9fa0
JT
2859 if (IS_ERR(pool->copier)) {
2860 r = PTR_ERR(pool->copier);
2861 *error = "Error creating pool's kcopyd client";
2862 err_p = ERR_PTR(r);
2863 goto bad_kcopyd_client;
2864 }
2865
2866 /*
2867 * Create singlethreaded workqueue that will service all devices
2868 * that use this metadata.
2869 */
2870 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2871 if (!pool->wq) {
2872 *error = "Error creating pool's workqueue";
2873 err_p = ERR_PTR(-ENOMEM);
2874 goto bad_wq;
2875 }
2876
7d327fe0 2877 throttle_init(&pool->throttle);
991d9fa0 2878 INIT_WORK(&pool->worker, do_worker);
905e51b3 2879 INIT_DELAYED_WORK(&pool->waker, do_waker);
85ad643b 2880 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
991d9fa0 2881 spin_lock_init(&pool->lock);
991d9fa0
JT
2882 bio_list_init(&pool->deferred_flush_bios);
2883 INIT_LIST_HEAD(&pool->prepared_mappings);
104655fd 2884 INIT_LIST_HEAD(&pool->prepared_discards);
c140e1c4 2885 INIT_LIST_HEAD(&pool->active_thins);
88a6621b 2886 pool->low_water_triggered = false;
80e96c54 2887 pool->suspended = true;
44feb387
MS
2888
2889 pool->shared_read_ds = dm_deferred_set_create();
2890 if (!pool->shared_read_ds) {
2891 *error = "Error creating pool's shared read deferred set";
2892 err_p = ERR_PTR(-ENOMEM);
2893 goto bad_shared_read_ds;
2894 }
2895
2896 pool->all_io_ds = dm_deferred_set_create();
2897 if (!pool->all_io_ds) {
2898 *error = "Error creating pool's all io deferred set";
2899 err_p = ERR_PTR(-ENOMEM);
2900 goto bad_all_io_ds;
2901 }
991d9fa0
JT
2902
2903 pool->next_mapping = NULL;
a24c2569
MS
2904 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2905 _new_mapping_cache);
991d9fa0
JT
2906 if (!pool->mapping_pool) {
2907 *error = "Error creating pool's mapping mempool";
2908 err_p = ERR_PTR(-ENOMEM);
2909 goto bad_mapping_pool;
2910 }
2911
a822c83e
JT
2912 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
2913 if (!pool->cell_sort_array) {
2914 *error = "Error allocating cell sort array";
2915 err_p = ERR_PTR(-ENOMEM);
2916 goto bad_sort_array;
2917 }
2918
991d9fa0 2919 pool->ref_count = 1;
905e51b3 2920 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
2921 pool->pool_md = pool_md;
2922 pool->md_dev = metadata_dev;
2923 __pool_table_insert(pool);
2924
2925 return pool;
2926
a822c83e
JT
2927bad_sort_array:
2928 mempool_destroy(pool->mapping_pool);
991d9fa0 2929bad_mapping_pool:
44feb387
MS
2930 dm_deferred_set_destroy(pool->all_io_ds);
2931bad_all_io_ds:
2932 dm_deferred_set_destroy(pool->shared_read_ds);
2933bad_shared_read_ds:
991d9fa0
JT
2934 destroy_workqueue(pool->wq);
2935bad_wq:
2936 dm_kcopyd_client_destroy(pool->copier);
2937bad_kcopyd_client:
44feb387 2938 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2939bad_prison:
2940 kfree(pool);
2941bad_pool:
2942 if (dm_pool_metadata_close(pmd))
2943 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2944
2945 return err_p;
2946}
2947
2948static void __pool_inc(struct pool *pool)
2949{
2950 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2951 pool->ref_count++;
2952}
2953
2954static void __pool_dec(struct pool *pool)
2955{
2956 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2957 BUG_ON(!pool->ref_count);
2958 if (!--pool->ref_count)
2959 __pool_destroy(pool);
2960}
2961
2962static struct pool *__pool_find(struct mapped_device *pool_md,
2963 struct block_device *metadata_dev,
e49e5829
JT
2964 unsigned long block_size, int read_only,
2965 char **error, int *created)
991d9fa0
JT
2966{
2967 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2968
2969 if (pool) {
f09996c9
MS
2970 if (pool->pool_md != pool_md) {
2971 *error = "metadata device already in use by a pool";
991d9fa0 2972 return ERR_PTR(-EBUSY);
f09996c9 2973 }
991d9fa0
JT
2974 __pool_inc(pool);
2975
2976 } else {
2977 pool = __pool_table_lookup(pool_md);
2978 if (pool) {
f09996c9
MS
2979 if (pool->md_dev != metadata_dev) {
2980 *error = "different pool cannot replace a pool";
991d9fa0 2981 return ERR_PTR(-EINVAL);
f09996c9 2982 }
991d9fa0
JT
2983 __pool_inc(pool);
2984
67e2e2b2 2985 } else {
e49e5829 2986 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
67e2e2b2
JT
2987 *created = 1;
2988 }
991d9fa0
JT
2989 }
2990
2991 return pool;
2992}
2993
2994/*----------------------------------------------------------------
2995 * Pool target methods
2996 *--------------------------------------------------------------*/
2997static void pool_dtr(struct dm_target *ti)
2998{
2999 struct pool_c *pt = ti->private;
3000
3001 mutex_lock(&dm_thin_pool_table.mutex);
3002
3003 unbind_control_target(pt->pool, ti);
3004 __pool_dec(pt->pool);
3005 dm_put_device(ti, pt->metadata_dev);
3006 dm_put_device(ti, pt->data_dev);
3007 kfree(pt);
3008
3009 mutex_unlock(&dm_thin_pool_table.mutex);
3010}
3011
991d9fa0
JT
3012static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
3013 struct dm_target *ti)
3014{
3015 int r;
3016 unsigned argc;
3017 const char *arg_name;
3018
3019 static struct dm_arg _args[] = {
74aa45c3 3020 {0, 4, "Invalid number of pool feature arguments"},
991d9fa0
JT
3021 };
3022
3023 /*
3024 * No feature arguments supplied.
3025 */
3026 if (!as->argc)
3027 return 0;
3028
3029 r = dm_read_arg_group(_args, as, &argc, &ti->error);
3030 if (r)
3031 return -EINVAL;
3032
3033 while (argc && !r) {
3034 arg_name = dm_shift_arg(as);
3035 argc--;
3036
e49e5829 3037 if (!strcasecmp(arg_name, "skip_block_zeroing"))
9bc142dd 3038 pf->zero_new_blocks = false;
e49e5829
JT
3039
3040 else if (!strcasecmp(arg_name, "ignore_discard"))
9bc142dd 3041 pf->discard_enabled = false;
e49e5829
JT
3042
3043 else if (!strcasecmp(arg_name, "no_discard_passdown"))
9bc142dd 3044 pf->discard_passdown = false;
991d9fa0 3045
e49e5829
JT
3046 else if (!strcasecmp(arg_name, "read_only"))
3047 pf->mode = PM_READ_ONLY;
3048
787a996c
MS
3049 else if (!strcasecmp(arg_name, "error_if_no_space"))
3050 pf->error_if_no_space = true;
3051
e49e5829
JT
3052 else {
3053 ti->error = "Unrecognised pool feature requested";
3054 r = -EINVAL;
3055 break;
3056 }
991d9fa0
JT
3057 }
3058
3059 return r;
3060}
3061
ac8c3f3d
JT
3062static void metadata_low_callback(void *context)
3063{
3064 struct pool *pool = context;
3065
3066 DMWARN("%s: reached low water mark for metadata device: sending event.",
3067 dm_device_name(pool->pool_md));
3068
3069 dm_table_event(pool->ti->table);
3070}
3071
7d48935e
MS
3072static sector_t get_dev_size(struct block_device *bdev)
3073{
3074 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
3075}
3076
3077static void warn_if_metadata_device_too_big(struct block_device *bdev)
b17446df 3078{
7d48935e 3079 sector_t metadata_dev_size = get_dev_size(bdev);
b17446df
JT
3080 char buffer[BDEVNAME_SIZE];
3081
7d48935e 3082 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
b17446df
JT
3083 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
3084 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
7d48935e
MS
3085}
3086
3087static sector_t get_metadata_dev_size(struct block_device *bdev)
3088{
3089 sector_t metadata_dev_size = get_dev_size(bdev);
3090
3091 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
3092 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
b17446df
JT
3093
3094 return metadata_dev_size;
3095}
3096
24347e95
JT
3097static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
3098{
3099 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
3100
7d48935e 3101 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
24347e95
JT
3102
3103 return metadata_dev_size;
3104}
3105
ac8c3f3d
JT
3106/*
3107 * When a metadata threshold is crossed a dm event is triggered, and
3108 * userland should respond by growing the metadata device. We could let
3109 * userland set the threshold, like we do with the data threshold, but I'm
3110 * not sure they know enough to do this well.
3111 */
3112static dm_block_t calc_metadata_threshold(struct pool_c *pt)
3113{
3114 /*
3115 * 4M is ample for all ops with the possible exception of thin
3116 * device deletion which is harmless if it fails (just retry the
3117 * delete after you've grown the device).
3118 */
3119 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
3120 return min((dm_block_t)1024ULL /* 4M */, quarter);
3121}
3122
991d9fa0
JT
3123/*
3124 * thin-pool <metadata dev> <data dev>
3125 * <data block size (sectors)>
3126 * <low water mark (blocks)>
3127 * [<#feature args> [<arg>]*]
3128 *
3129 * Optional feature arguments are:
3130 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
67e2e2b2
JT
3131 * ignore_discard: disable discard
3132 * no_discard_passdown: don't pass discards down to the data device
787a996c
MS
3133 * read_only: Don't allow any changes to be made to the pool metadata.
3134 * error_if_no_space: error IOs, instead of queueing, if no space.
991d9fa0
JT
3135 */
3136static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3137{
67e2e2b2 3138 int r, pool_created = 0;
991d9fa0
JT
3139 struct pool_c *pt;
3140 struct pool *pool;
3141 struct pool_features pf;
3142 struct dm_arg_set as;
3143 struct dm_dev *data_dev;
3144 unsigned long block_size;
3145 dm_block_t low_water_blocks;
3146 struct dm_dev *metadata_dev;
5d0db96d 3147 fmode_t metadata_mode;
991d9fa0
JT
3148
3149 /*
3150 * FIXME Remove validation from scope of lock.
3151 */
3152 mutex_lock(&dm_thin_pool_table.mutex);
3153
3154 if (argc < 4) {
3155 ti->error = "Invalid argument count";
3156 r = -EINVAL;
3157 goto out_unlock;
3158 }
5d0db96d 3159
991d9fa0
JT
3160 as.argc = argc;
3161 as.argv = argv;
3162
5d0db96d
JT
3163 /*
3164 * Set default pool features.
3165 */
3166 pool_features_init(&pf);
3167
3168 dm_consume_args(&as, 4);
3169 r = parse_pool_features(&as, &pf, ti);
3170 if (r)
3171 goto out_unlock;
3172
3173 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
3174 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
991d9fa0
JT
3175 if (r) {
3176 ti->error = "Error opening metadata block device";
3177 goto out_unlock;
3178 }
7d48935e 3179 warn_if_metadata_device_too_big(metadata_dev->bdev);
991d9fa0
JT
3180
3181 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
3182 if (r) {
3183 ti->error = "Error getting data device";
3184 goto out_metadata;
3185 }
3186
3187 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
3188 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
3189 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
55f2b8bd 3190 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
991d9fa0
JT
3191 ti->error = "Invalid block size";
3192 r = -EINVAL;
3193 goto out;
3194 }
3195
3196 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
3197 ti->error = "Invalid low water mark";
3198 r = -EINVAL;
3199 goto out;
3200 }
3201
991d9fa0
JT
3202 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
3203 if (!pt) {
3204 r = -ENOMEM;
3205 goto out;
3206 }
3207
3208 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
e49e5829 3209 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
991d9fa0
JT
3210 if (IS_ERR(pool)) {
3211 r = PTR_ERR(pool);
3212 goto out_free_pt;
3213 }
3214
67e2e2b2
JT
3215 /*
3216 * 'pool_created' reflects whether this is the first table load.
3217 * Top level discard support is not allowed to be changed after
3218 * initial load. This would require a pool reload to trigger thin
3219 * device changes.
3220 */
3221 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
3222 ti->error = "Discard support cannot be disabled once enabled";
3223 r = -EINVAL;
3224 goto out_flags_changed;
3225 }
3226
991d9fa0
JT
3227 pt->pool = pool;
3228 pt->ti = ti;
3229 pt->metadata_dev = metadata_dev;
3230 pt->data_dev = data_dev;
3231 pt->low_water_blocks = low_water_blocks;
0424caa1 3232 pt->adjusted_pf = pt->requested_pf = pf;
55a62eef 3233 ti->num_flush_bios = 1;
9bc142dd 3234
67e2e2b2
JT
3235 /*
3236 * Only need to enable discards if the pool should pass
3237 * them down to the data device. The thin device's discard
3238 * processing will cause mappings to be removed from the btree.
3239 */
b60ab990 3240 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 3241 if (pf.discard_enabled && pf.discard_passdown) {
55a62eef 3242 ti->num_discard_bios = 1;
9bc142dd 3243
67e2e2b2
JT
3244 /*
3245 * Setting 'discards_supported' circumvents the normal
3246 * stacking of discard limits (this keeps the pool and
3247 * thin devices' discard limits consistent).
3248 */
0ac55489 3249 ti->discards_supported = true;
67e2e2b2 3250 }
991d9fa0
JT
3251 ti->private = pt;
3252
ac8c3f3d
JT
3253 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
3254 calc_metadata_threshold(pt),
3255 metadata_low_callback,
3256 pool);
3257 if (r)
3258 goto out_free_pt;
3259
991d9fa0
JT
3260 pt->callbacks.congested_fn = pool_is_congested;
3261 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3262
3263 mutex_unlock(&dm_thin_pool_table.mutex);
3264
3265 return 0;
3266
67e2e2b2
JT
3267out_flags_changed:
3268 __pool_dec(pool);
991d9fa0
JT
3269out_free_pt:
3270 kfree(pt);
3271out:
3272 dm_put_device(ti, data_dev);
3273out_metadata:
3274 dm_put_device(ti, metadata_dev);
3275out_unlock:
3276 mutex_unlock(&dm_thin_pool_table.mutex);
3277
3278 return r;
3279}
3280
7de3ee57 3281static int pool_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
3282{
3283 int r;
3284 struct pool_c *pt = ti->private;
3285 struct pool *pool = pt->pool;
3286 unsigned long flags;
3287
3288 /*
3289 * As this is a singleton target, ti->begin is always zero.
3290 */
3291 spin_lock_irqsave(&pool->lock, flags);
3292 bio->bi_bdev = pt->data_dev->bdev;
3293 r = DM_MAPIO_REMAPPED;
3294 spin_unlock_irqrestore(&pool->lock, flags);
3295
3296 return r;
3297}
3298
b17446df 3299static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
991d9fa0
JT
3300{
3301 int r;
3302 struct pool_c *pt = ti->private;
3303 struct pool *pool = pt->pool;
55f2b8bd
MS
3304 sector_t data_size = ti->len;
3305 dm_block_t sb_data_size;
991d9fa0 3306
b17446df 3307 *need_commit = false;
991d9fa0 3308
55f2b8bd
MS
3309 (void) sector_div(data_size, pool->sectors_per_block);
3310
991d9fa0
JT
3311 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3312 if (r) {
4fa5971a
MS
3313 DMERR("%s: failed to retrieve data device size",
3314 dm_device_name(pool->pool_md));
991d9fa0
JT
3315 return r;
3316 }
3317
3318 if (data_size < sb_data_size) {
4fa5971a
MS
3319 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3320 dm_device_name(pool->pool_md),
55f2b8bd 3321 (unsigned long long)data_size, sb_data_size);
991d9fa0
JT
3322 return -EINVAL;
3323
3324 } else if (data_size > sb_data_size) {
07f2b6e0
MS
3325 if (dm_pool_metadata_needs_check(pool->pmd)) {
3326 DMERR("%s: unable to grow the data device until repaired.",
3327 dm_device_name(pool->pool_md));
3328 return 0;
3329 }
3330
6f7f51d4
MS
3331 if (sb_data_size)
3332 DMINFO("%s: growing the data device from %llu to %llu blocks",
3333 dm_device_name(pool->pool_md),
3334 sb_data_size, (unsigned long long)data_size);
991d9fa0
JT
3335 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3336 if (r) {
b5330655 3337 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
991d9fa0
JT
3338 return r;
3339 }
3340
b17446df 3341 *need_commit = true;
991d9fa0
JT
3342 }
3343
3344 return 0;
3345}
3346
24347e95
JT
3347static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3348{
3349 int r;
3350 struct pool_c *pt = ti->private;
3351 struct pool *pool = pt->pool;
3352 dm_block_t metadata_dev_size, sb_metadata_dev_size;
3353
3354 *need_commit = false;
3355
610bba8b 3356 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
24347e95
JT
3357
3358 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3359 if (r) {
4fa5971a
MS
3360 DMERR("%s: failed to retrieve metadata device size",
3361 dm_device_name(pool->pool_md));
24347e95
JT
3362 return r;
3363 }
3364
3365 if (metadata_dev_size < sb_metadata_dev_size) {
4fa5971a
MS
3366 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3367 dm_device_name(pool->pool_md),
24347e95
JT
3368 metadata_dev_size, sb_metadata_dev_size);
3369 return -EINVAL;
3370
3371 } else if (metadata_dev_size > sb_metadata_dev_size) {
07f2b6e0
MS
3372 if (dm_pool_metadata_needs_check(pool->pmd)) {
3373 DMERR("%s: unable to grow the metadata device until repaired.",
3374 dm_device_name(pool->pool_md));
3375 return 0;
3376 }
3377
7d48935e 3378 warn_if_metadata_device_too_big(pool->md_dev);
6f7f51d4
MS
3379 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3380 dm_device_name(pool->pool_md),
3381 sb_metadata_dev_size, metadata_dev_size);
24347e95
JT
3382 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3383 if (r) {
b5330655 3384 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
24347e95
JT
3385 return r;
3386 }
3387
3388 *need_commit = true;
3389 }
3390
3391 return 0;
3392}
3393
b17446df
JT
3394/*
3395 * Retrieves the number of blocks of the data device from
3396 * the superblock and compares it to the actual device size,
3397 * thus resizing the data device in case it has grown.
3398 *
3399 * This both copes with opening preallocated data devices in the ctr
3400 * being followed by a resume
3401 * -and-
3402 * calling the resume method individually after userspace has
3403 * grown the data device in reaction to a table event.
3404 */
3405static int pool_preresume(struct dm_target *ti)
3406{
3407 int r;
24347e95 3408 bool need_commit1, need_commit2;
b17446df
JT
3409 struct pool_c *pt = ti->private;
3410 struct pool *pool = pt->pool;
3411
3412 /*
3413 * Take control of the pool object.
3414 */
3415 r = bind_control_target(pool, ti);
3416 if (r)
3417 return r;
3418
3419 r = maybe_resize_data_dev(ti, &need_commit1);
3420 if (r)
3421 return r;
3422
24347e95
JT
3423 r = maybe_resize_metadata_dev(ti, &need_commit2);
3424 if (r)
3425 return r;
3426
3427 if (need_commit1 || need_commit2)
020cc3b5 3428 (void) commit(pool);
b17446df
JT
3429
3430 return 0;
3431}
3432
583024d2
MS
3433static void pool_suspend_active_thins(struct pool *pool)
3434{
3435 struct thin_c *tc;
3436
3437 /* Suspend all active thin devices */
3438 tc = get_first_thin(pool);
3439 while (tc) {
3440 dm_internal_suspend_noflush(tc->thin_md);
3441 tc = get_next_thin(pool, tc);
3442 }
3443}
3444
3445static void pool_resume_active_thins(struct pool *pool)
3446{
3447 struct thin_c *tc;
3448
3449 /* Resume all active thin devices */
3450 tc = get_first_thin(pool);
3451 while (tc) {
3452 dm_internal_resume(tc->thin_md);
3453 tc = get_next_thin(pool, tc);
3454 }
3455}
3456
991d9fa0
JT
3457static void pool_resume(struct dm_target *ti)
3458{
3459 struct pool_c *pt = ti->private;
3460 struct pool *pool = pt->pool;
3461 unsigned long flags;
3462
583024d2
MS
3463 /*
3464 * Must requeue active_thins' bios and then resume
3465 * active_thins _before_ clearing 'suspend' flag.
3466 */
3467 requeue_bios(pool);
3468 pool_resume_active_thins(pool);
3469
991d9fa0 3470 spin_lock_irqsave(&pool->lock, flags);
88a6621b 3471 pool->low_water_triggered = false;
80e96c54 3472 pool->suspended = false;
991d9fa0 3473 spin_unlock_irqrestore(&pool->lock, flags);
80e96c54 3474
905e51b3 3475 do_waker(&pool->waker.work);
991d9fa0
JT
3476}
3477
80e96c54
MS
3478static void pool_presuspend(struct dm_target *ti)
3479{
3480 struct pool_c *pt = ti->private;
3481 struct pool *pool = pt->pool;
3482 unsigned long flags;
3483
3484 spin_lock_irqsave(&pool->lock, flags);
3485 pool->suspended = true;
3486 spin_unlock_irqrestore(&pool->lock, flags);
583024d2
MS
3487
3488 pool_suspend_active_thins(pool);
80e96c54
MS
3489}
3490
3491static void pool_presuspend_undo(struct dm_target *ti)
3492{
3493 struct pool_c *pt = ti->private;
3494 struct pool *pool = pt->pool;
3495 unsigned long flags;
3496
583024d2
MS
3497 pool_resume_active_thins(pool);
3498
80e96c54
MS
3499 spin_lock_irqsave(&pool->lock, flags);
3500 pool->suspended = false;
3501 spin_unlock_irqrestore(&pool->lock, flags);
3502}
3503
991d9fa0
JT
3504static void pool_postsuspend(struct dm_target *ti)
3505{
991d9fa0
JT
3506 struct pool_c *pt = ti->private;
3507 struct pool *pool = pt->pool;
3508
905e51b3 3509 cancel_delayed_work(&pool->waker);
85ad643b 3510 cancel_delayed_work(&pool->no_space_timeout);
991d9fa0 3511 flush_workqueue(pool->wq);
020cc3b5 3512 (void) commit(pool);
991d9fa0
JT
3513}
3514
3515static int check_arg_count(unsigned argc, unsigned args_required)
3516{
3517 if (argc != args_required) {
3518 DMWARN("Message received with %u arguments instead of %u.",
3519 argc, args_required);
3520 return -EINVAL;
3521 }
3522
3523 return 0;
3524}
3525
3526static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3527{
3528 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3529 *dev_id <= MAX_DEV_ID)
3530 return 0;
3531
3532 if (warning)
3533 DMWARN("Message received with invalid device id: %s", arg);
3534
3535 return -EINVAL;
3536}
3537
3538static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3539{
3540 dm_thin_id dev_id;
3541 int r;
3542
3543 r = check_arg_count(argc, 2);
3544 if (r)
3545 return r;
3546
3547 r = read_dev_id(argv[1], &dev_id, 1);
3548 if (r)
3549 return r;
3550
3551 r = dm_pool_create_thin(pool->pmd, dev_id);
3552 if (r) {
3553 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3554 argv[1]);
3555 return r;
3556 }
3557
3558 return 0;
3559}
3560
3561static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3562{
3563 dm_thin_id dev_id;
3564 dm_thin_id origin_dev_id;
3565 int r;
3566
3567 r = check_arg_count(argc, 3);
3568 if (r)
3569 return r;
3570
3571 r = read_dev_id(argv[1], &dev_id, 1);
3572 if (r)
3573 return r;
3574
3575 r = read_dev_id(argv[2], &origin_dev_id, 1);
3576 if (r)
3577 return r;
3578
3579 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3580 if (r) {
3581 DMWARN("Creation of new snapshot %s of device %s failed.",
3582 argv[1], argv[2]);
3583 return r;
3584 }
3585
3586 return 0;
3587}
3588
3589static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3590{
3591 dm_thin_id dev_id;
3592 int r;
3593
3594 r = check_arg_count(argc, 2);
3595 if (r)
3596 return r;
3597
3598 r = read_dev_id(argv[1], &dev_id, 1);
3599 if (r)
3600 return r;
3601
3602 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3603 if (r)
3604 DMWARN("Deletion of thin device %s failed.", argv[1]);
3605
3606 return r;
3607}
3608
3609static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3610{
3611 dm_thin_id old_id, new_id;
3612 int r;
3613
3614 r = check_arg_count(argc, 3);
3615 if (r)
3616 return r;
3617
3618 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3619 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3620 return -EINVAL;
3621 }
3622
3623 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3624 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3625 return -EINVAL;
3626 }
3627
3628 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3629 if (r) {
3630 DMWARN("Failed to change transaction id from %s to %s.",
3631 argv[1], argv[2]);
3632 return r;
3633 }
3634
3635 return 0;
3636}
3637
cc8394d8
JT
3638static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3639{
3640 int r;
3641
3642 r = check_arg_count(argc, 1);
3643 if (r)
3644 return r;
3645
020cc3b5 3646 (void) commit(pool);
0d200aef 3647
cc8394d8
JT
3648 r = dm_pool_reserve_metadata_snap(pool->pmd);
3649 if (r)
3650 DMWARN("reserve_metadata_snap message failed.");
3651
3652 return r;
3653}
3654
3655static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3656{
3657 int r;
3658
3659 r = check_arg_count(argc, 1);
3660 if (r)
3661 return r;
3662
3663 r = dm_pool_release_metadata_snap(pool->pmd);
3664 if (r)
3665 DMWARN("release_metadata_snap message failed.");
3666
3667 return r;
3668}
3669
991d9fa0
JT
3670/*
3671 * Messages supported:
3672 * create_thin <dev_id>
3673 * create_snap <dev_id> <origin_id>
3674 * delete <dev_id>
991d9fa0 3675 * set_transaction_id <current_trans_id> <new_trans_id>
cc8394d8
JT
3676 * reserve_metadata_snap
3677 * release_metadata_snap
991d9fa0
JT
3678 */
3679static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3680{
3681 int r = -EINVAL;
3682 struct pool_c *pt = ti->private;
3683 struct pool *pool = pt->pool;
3684
2a7eaea0
JT
3685 if (get_pool_mode(pool) >= PM_READ_ONLY) {
3686 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3687 dm_device_name(pool->pool_md));
fd467696 3688 return -EOPNOTSUPP;
2a7eaea0
JT
3689 }
3690
991d9fa0
JT
3691 if (!strcasecmp(argv[0], "create_thin"))
3692 r = process_create_thin_mesg(argc, argv, pool);
3693
3694 else if (!strcasecmp(argv[0], "create_snap"))
3695 r = process_create_snap_mesg(argc, argv, pool);
3696
3697 else if (!strcasecmp(argv[0], "delete"))
3698 r = process_delete_mesg(argc, argv, pool);
3699
3700 else if (!strcasecmp(argv[0], "set_transaction_id"))
3701 r = process_set_transaction_id_mesg(argc, argv, pool);
3702
cc8394d8
JT
3703 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3704 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3705
3706 else if (!strcasecmp(argv[0], "release_metadata_snap"))
3707 r = process_release_metadata_snap_mesg(argc, argv, pool);
3708
991d9fa0
JT
3709 else
3710 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3711
e49e5829 3712 if (!r)
020cc3b5 3713 (void) commit(pool);
991d9fa0
JT
3714
3715 return r;
3716}
3717
e49e5829
JT
3718static void emit_flags(struct pool_features *pf, char *result,
3719 unsigned sz, unsigned maxlen)
3720{
3721 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
787a996c
MS
3722 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3723 pf->error_if_no_space;
e49e5829
JT
3724 DMEMIT("%u ", count);
3725
3726 if (!pf->zero_new_blocks)
3727 DMEMIT("skip_block_zeroing ");
3728
3729 if (!pf->discard_enabled)
3730 DMEMIT("ignore_discard ");
3731
3732 if (!pf->discard_passdown)
3733 DMEMIT("no_discard_passdown ");
3734
3735 if (pf->mode == PM_READ_ONLY)
3736 DMEMIT("read_only ");
787a996c
MS
3737
3738 if (pf->error_if_no_space)
3739 DMEMIT("error_if_no_space ");
e49e5829
JT
3740}
3741
991d9fa0
JT
3742/*
3743 * Status line is:
3744 * <transaction id> <used metadata sectors>/<total metadata sectors>
3745 * <used data sectors>/<total data sectors> <held metadata root>
e4c78e21 3746 * <pool mode> <discard config> <no space config> <needs_check>
991d9fa0 3747 */
fd7c092e
MP
3748static void pool_status(struct dm_target *ti, status_type_t type,
3749 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0 3750{
e49e5829 3751 int r;
991d9fa0
JT
3752 unsigned sz = 0;
3753 uint64_t transaction_id;
3754 dm_block_t nr_free_blocks_data;
3755 dm_block_t nr_free_blocks_metadata;
3756 dm_block_t nr_blocks_data;
3757 dm_block_t nr_blocks_metadata;
3758 dm_block_t held_root;
3759 char buf[BDEVNAME_SIZE];
3760 char buf2[BDEVNAME_SIZE];
3761 struct pool_c *pt = ti->private;
3762 struct pool *pool = pt->pool;
3763
3764 switch (type) {
3765 case STATUSTYPE_INFO:
e49e5829
JT
3766 if (get_pool_mode(pool) == PM_FAIL) {
3767 DMEMIT("Fail");
3768 break;
3769 }
3770
1f4e0ff0
AK
3771 /* Commit to ensure statistics aren't out-of-date */
3772 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
020cc3b5 3773 (void) commit(pool);
1f4e0ff0 3774
fd7c092e
MP
3775 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3776 if (r) {
4fa5971a
MS
3777 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3778 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3779 goto err;
3780 }
991d9fa0 3781
fd7c092e
MP
3782 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3783 if (r) {
4fa5971a
MS
3784 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3785 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3786 goto err;
3787 }
991d9fa0
JT
3788
3789 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
fd7c092e 3790 if (r) {
4fa5971a
MS
3791 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3792 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3793 goto err;
3794 }
991d9fa0 3795
fd7c092e
MP
3796 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3797 if (r) {
4fa5971a
MS
3798 DMERR("%s: dm_pool_get_free_block_count returned %d",
3799 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3800 goto err;
3801 }
991d9fa0
JT
3802
3803 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
fd7c092e 3804 if (r) {
4fa5971a
MS
3805 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3806 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3807 goto err;
3808 }
991d9fa0 3809
cc8394d8 3810 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
fd7c092e 3811 if (r) {
4fa5971a
MS
3812 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3813 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3814 goto err;
3815 }
991d9fa0
JT
3816
3817 DMEMIT("%llu %llu/%llu %llu/%llu ",
3818 (unsigned long long)transaction_id,
3819 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3820 (unsigned long long)nr_blocks_metadata,
3821 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3822 (unsigned long long)nr_blocks_data);
3823
3824 if (held_root)
e49e5829
JT
3825 DMEMIT("%llu ", held_root);
3826 else
3827 DMEMIT("- ");
3828
3e1a0699
JT
3829 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3830 DMEMIT("out_of_data_space ");
3831 else if (pool->pf.mode == PM_READ_ONLY)
e49e5829 3832 DMEMIT("ro ");
991d9fa0 3833 else
e49e5829
JT
3834 DMEMIT("rw ");
3835
018debea 3836 if (!pool->pf.discard_enabled)
787a996c 3837 DMEMIT("ignore_discard ");
018debea 3838 else if (pool->pf.discard_passdown)
787a996c
MS
3839 DMEMIT("discard_passdown ");
3840 else
3841 DMEMIT("no_discard_passdown ");
3842
3843 if (pool->pf.error_if_no_space)
3844 DMEMIT("error_if_no_space ");
e49e5829 3845 else
787a996c 3846 DMEMIT("queue_if_no_space ");
991d9fa0 3847
e4c78e21
MS
3848 if (dm_pool_metadata_needs_check(pool->pmd))
3849 DMEMIT("needs_check ");
3850 else
3851 DMEMIT("- ");
3852
991d9fa0
JT
3853 break;
3854
3855 case STATUSTYPE_TABLE:
3856 DMEMIT("%s %s %lu %llu ",
3857 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3858 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3859 (unsigned long)pool->sectors_per_block,
3860 (unsigned long long)pt->low_water_blocks);
0424caa1 3861 emit_flags(&pt->requested_pf, result, sz, maxlen);
991d9fa0
JT
3862 break;
3863 }
fd7c092e 3864 return;
991d9fa0 3865
fd7c092e
MP
3866err:
3867 DMEMIT("Error");
991d9fa0
JT
3868}
3869
3870static int pool_iterate_devices(struct dm_target *ti,
3871 iterate_devices_callout_fn fn, void *data)
3872{
3873 struct pool_c *pt = ti->private;
3874
3875 return fn(ti, pt->data_dev, 0, ti->len, data);
3876}
3877
3878static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3879 struct bio_vec *biovec, int max_size)
3880{
3881 struct pool_c *pt = ti->private;
3882 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3883
3884 if (!q->merge_bvec_fn)
3885 return max_size;
3886
3887 bvm->bi_bdev = pt->data_dev->bdev;
3888
3889 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3890}
3891
3892static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3893{
3894 struct pool_c *pt = ti->private;
3895 struct pool *pool = pt->pool;
604ea906
MS
3896 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3897
3898 /*
d200c30e
MS
3899 * If max_sectors is smaller than pool->sectors_per_block adjust it
3900 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3901 * This is especially beneficial when the pool's data device is a RAID
3902 * device that has a full stripe width that matches pool->sectors_per_block
3903 * -- because even though partial RAID stripe-sized IOs will be issued to a
3904 * single RAID stripe; when aggregated they will end on a full RAID stripe
3905 * boundary.. which avoids additional partial RAID stripe writes cascading
604ea906 3906 */
604ea906
MS
3907 if (limits->max_sectors < pool->sectors_per_block) {
3908 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3909 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3910 limits->max_sectors--;
3911 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3912 }
604ea906 3913 }
991d9fa0 3914
0cc67cd9
MS
3915 /*
3916 * If the system-determined stacked limits are compatible with the
3917 * pool's blocksize (io_opt is a factor) do not override them.
3918 */
3919 if (io_opt_sectors < pool->sectors_per_block ||
604ea906
MS
3920 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3921 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3922 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3923 else
3924 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
0cc67cd9
MS
3925 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3926 }
0424caa1
MS
3927
3928 /*
3929 * pt->adjusted_pf is a staging area for the actual features to use.
3930 * They get transferred to the live pool in bind_control_target()
3931 * called from pool_preresume().
3932 */
b60ab990
MS
3933 if (!pt->adjusted_pf.discard_enabled) {
3934 /*
3935 * Must explicitly disallow stacking discard limits otherwise the
3936 * block layer will stack them if pool's data device has support.
3937 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3938 * user to see that, so make sure to set all discard limits to 0.
3939 */
3940 limits->discard_granularity = 0;
0424caa1 3941 return;
b60ab990 3942 }
0424caa1
MS
3943
3944 disable_passdown_if_not_supported(pt);
3945
34fbcf62
JT
3946 /*
3947 * The pool uses the same discard limits as the underlying data
3948 * device. DM core has already set this up.
3949 */
991d9fa0
JT
3950}
3951
3952static struct target_type pool_target = {
3953 .name = "thin-pool",
3954 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3955 DM_TARGET_IMMUTABLE,
e4c78e21 3956 .version = {1, 16, 0},
991d9fa0
JT
3957 .module = THIS_MODULE,
3958 .ctr = pool_ctr,
3959 .dtr = pool_dtr,
3960 .map = pool_map,
80e96c54
MS
3961 .presuspend = pool_presuspend,
3962 .presuspend_undo = pool_presuspend_undo,
991d9fa0
JT
3963 .postsuspend = pool_postsuspend,
3964 .preresume = pool_preresume,
3965 .resume = pool_resume,
3966 .message = pool_message,
3967 .status = pool_status,
3968 .merge = pool_merge,
3969 .iterate_devices = pool_iterate_devices,
3970 .io_hints = pool_io_hints,
3971};
3972
3973/*----------------------------------------------------------------
3974 * Thin target methods
3975 *--------------------------------------------------------------*/
b10ebd34
JT
3976static void thin_get(struct thin_c *tc)
3977{
3978 atomic_inc(&tc->refcount);
3979}
3980
3981static void thin_put(struct thin_c *tc)
3982{
3983 if (atomic_dec_and_test(&tc->refcount))
3984 complete(&tc->can_destroy);
3985}
3986
991d9fa0
JT
3987static void thin_dtr(struct dm_target *ti)
3988{
3989 struct thin_c *tc = ti->private;
c140e1c4
MS
3990 unsigned long flags;
3991
3992 spin_lock_irqsave(&tc->pool->lock, flags);
3993 list_del_rcu(&tc->list);
3994 spin_unlock_irqrestore(&tc->pool->lock, flags);
3995 synchronize_rcu();
991d9fa0 3996
17181fb7
MP
3997 thin_put(tc);
3998 wait_for_completion(&tc->can_destroy);
3999
991d9fa0
JT
4000 mutex_lock(&dm_thin_pool_table.mutex);
4001
4002 __pool_dec(tc->pool);
4003 dm_pool_close_thin_device(tc->td);
4004 dm_put_device(ti, tc->pool_dev);
2dd9c257
JT
4005 if (tc->origin_dev)
4006 dm_put_device(ti, tc->origin_dev);
991d9fa0
JT
4007 kfree(tc);
4008
4009 mutex_unlock(&dm_thin_pool_table.mutex);
4010}
4011
4012/*
4013 * Thin target parameters:
4014 *
2dd9c257 4015 * <pool_dev> <dev_id> [origin_dev]
991d9fa0
JT
4016 *
4017 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
4018 * dev_id: the internal device identifier
2dd9c257 4019 * origin_dev: a device external to the pool that should act as the origin
67e2e2b2
JT
4020 *
4021 * If the pool device has discards disabled, they get disabled for the thin
4022 * device as well.
991d9fa0
JT
4023 */
4024static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
4025{
4026 int r;
4027 struct thin_c *tc;
2dd9c257 4028 struct dm_dev *pool_dev, *origin_dev;
991d9fa0 4029 struct mapped_device *pool_md;
5e3283e2 4030 unsigned long flags;
991d9fa0
JT
4031
4032 mutex_lock(&dm_thin_pool_table.mutex);
4033
2dd9c257 4034 if (argc != 2 && argc != 3) {
991d9fa0
JT
4035 ti->error = "Invalid argument count";
4036 r = -EINVAL;
4037 goto out_unlock;
4038 }
4039
4040 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
4041 if (!tc) {
4042 ti->error = "Out of memory";
4043 r = -ENOMEM;
4044 goto out_unlock;
4045 }
583024d2 4046 tc->thin_md = dm_table_get_md(ti->table);
c140e1c4 4047 spin_lock_init(&tc->lock);
a374bb21 4048 INIT_LIST_HEAD(&tc->deferred_cells);
c140e1c4
MS
4049 bio_list_init(&tc->deferred_bio_list);
4050 bio_list_init(&tc->retry_on_resume_list);
67324ea1 4051 tc->sort_bio_list = RB_ROOT;
991d9fa0 4052
2dd9c257
JT
4053 if (argc == 3) {
4054 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
4055 if (r) {
4056 ti->error = "Error opening origin device";
4057 goto bad_origin_dev;
4058 }
4059 tc->origin_dev = origin_dev;
4060 }
4061
991d9fa0
JT
4062 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
4063 if (r) {
4064 ti->error = "Error opening pool device";
4065 goto bad_pool_dev;
4066 }
4067 tc->pool_dev = pool_dev;
4068
4069 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4070 ti->error = "Invalid device id";
4071 r = -EINVAL;
4072 goto bad_common;
4073 }
4074
4075 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4076 if (!pool_md) {
4077 ti->error = "Couldn't get pool mapped device";
4078 r = -EINVAL;
4079 goto bad_common;
4080 }
4081
4082 tc->pool = __pool_table_lookup(pool_md);
4083 if (!tc->pool) {
4084 ti->error = "Couldn't find pool object";
4085 r = -EINVAL;
4086 goto bad_pool_lookup;
4087 }
4088 __pool_inc(tc->pool);
4089
e49e5829
JT
4090 if (get_pool_mode(tc->pool) == PM_FAIL) {
4091 ti->error = "Couldn't open thin device, Pool is in fail mode";
1acacc07 4092 r = -EINVAL;
80e96c54 4093 goto bad_pool;
e49e5829
JT
4094 }
4095
991d9fa0
JT
4096 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4097 if (r) {
4098 ti->error = "Couldn't open thin internal device";
80e96c54 4099 goto bad_pool;
991d9fa0
JT
4100 }
4101
542f9038
MS
4102 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4103 if (r)
80e96c54 4104 goto bad;
542f9038 4105
55a62eef 4106 ti->num_flush_bios = 1;
16ad3d10 4107 ti->flush_supported = true;
59c3d2c6 4108 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
67e2e2b2
JT
4109
4110 /* In case the pool supports discards, pass them on. */
b60ab990 4111 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 4112 if (tc->pool->pf.discard_enabled) {
0ac55489 4113 ti->discards_supported = true;
55a62eef 4114 ti->num_discard_bios = 1;
34fbcf62 4115 ti->split_discard_bios = false;
67e2e2b2 4116 }
991d9fa0 4117
991d9fa0
JT
4118 mutex_unlock(&dm_thin_pool_table.mutex);
4119
5e3283e2 4120 spin_lock_irqsave(&tc->pool->lock, flags);
80e96c54
MS
4121 if (tc->pool->suspended) {
4122 spin_unlock_irqrestore(&tc->pool->lock, flags);
4123 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
4124 ti->error = "Unable to activate thin device while pool is suspended";
4125 r = -EINVAL;
4126 goto bad;
4127 }
2b94e896
MD
4128 atomic_set(&tc->refcount, 1);
4129 init_completion(&tc->can_destroy);
c140e1c4 4130 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
5e3283e2 4131 spin_unlock_irqrestore(&tc->pool->lock, flags);
c140e1c4
MS
4132 /*
4133 * This synchronize_rcu() call is needed here otherwise we risk a
4134 * wake_worker() call finding no bios to process (because the newly
4135 * added tc isn't yet visible). So this reduces latency since we
4136 * aren't then dependent on the periodic commit to wake_worker().
4137 */
4138 synchronize_rcu();
4139
80e96c54
MS
4140 dm_put(pool_md);
4141
991d9fa0
JT
4142 return 0;
4143
80e96c54 4144bad:
1acacc07 4145 dm_pool_close_thin_device(tc->td);
80e96c54 4146bad_pool:
991d9fa0
JT
4147 __pool_dec(tc->pool);
4148bad_pool_lookup:
4149 dm_put(pool_md);
4150bad_common:
4151 dm_put_device(ti, tc->pool_dev);
4152bad_pool_dev:
2dd9c257
JT
4153 if (tc->origin_dev)
4154 dm_put_device(ti, tc->origin_dev);
4155bad_origin_dev:
991d9fa0
JT
4156 kfree(tc);
4157out_unlock:
4158 mutex_unlock(&dm_thin_pool_table.mutex);
4159
4160 return r;
4161}
4162
7de3ee57 4163static int thin_map(struct dm_target *ti, struct bio *bio)
991d9fa0 4164{
4f024f37 4165 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
991d9fa0 4166
7de3ee57 4167 return thin_bio_map(ti, bio);
991d9fa0
JT
4168}
4169
7de3ee57 4170static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
eb2aa48d
JT
4171{
4172 unsigned long flags;
59c3d2c6 4173 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 4174 struct list_head work;
a24c2569 4175 struct dm_thin_new_mapping *m, *tmp;
eb2aa48d
JT
4176 struct pool *pool = h->tc->pool;
4177
4178 if (h->shared_read_entry) {
4179 INIT_LIST_HEAD(&work);
44feb387 4180 dm_deferred_entry_dec(h->shared_read_entry, &work);
eb2aa48d
JT
4181
4182 spin_lock_irqsave(&pool->lock, flags);
4183 list_for_each_entry_safe(m, tmp, &work, list) {
4184 list_del(&m->list);
50f3c3ef 4185 __complete_mapping_preparation(m);
eb2aa48d
JT
4186 }
4187 spin_unlock_irqrestore(&pool->lock, flags);
4188 }
4189
104655fd
JT
4190 if (h->all_io_entry) {
4191 INIT_LIST_HEAD(&work);
44feb387 4192 dm_deferred_entry_dec(h->all_io_entry, &work);
563af186
JT
4193 if (!list_empty(&work)) {
4194 spin_lock_irqsave(&pool->lock, flags);
4195 list_for_each_entry_safe(m, tmp, &work, list)
daec338b 4196 list_add_tail(&m->list, &pool->prepared_discards);
563af186
JT
4197 spin_unlock_irqrestore(&pool->lock, flags);
4198 wake_worker(pool);
4199 }
104655fd
JT
4200 }
4201
34fbcf62
JT
4202 if (h->cell)
4203 cell_defer_no_holder(h->tc, h->cell);
4204
eb2aa48d
JT
4205 return 0;
4206}
4207
738211f7 4208static void thin_presuspend(struct dm_target *ti)
991d9fa0 4209{
738211f7
JT
4210 struct thin_c *tc = ti->private;
4211
991d9fa0 4212 if (dm_noflush_suspending(ti))
738211f7
JT
4213 noflush_work(tc, do_noflush_start);
4214}
4215
4216static void thin_postsuspend(struct dm_target *ti)
4217{
4218 struct thin_c *tc = ti->private;
4219
4220 /*
4221 * The dm_noflush_suspending flag has been cleared by now, so
4222 * unfortunately we must always run this.
4223 */
4224 noflush_work(tc, do_noflush_stop);
991d9fa0
JT
4225}
4226
e5aea7b4
JT
4227static int thin_preresume(struct dm_target *ti)
4228{
4229 struct thin_c *tc = ti->private;
4230
4231 if (tc->origin_dev)
4232 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4233
4234 return 0;
4235}
4236
991d9fa0
JT
4237/*
4238 * <nr mapped sectors> <highest mapped sector>
4239 */
fd7c092e
MP
4240static void thin_status(struct dm_target *ti, status_type_t type,
4241 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0
JT
4242{
4243 int r;
4244 ssize_t sz = 0;
4245 dm_block_t mapped, highest;
4246 char buf[BDEVNAME_SIZE];
4247 struct thin_c *tc = ti->private;
4248
e49e5829
JT
4249 if (get_pool_mode(tc->pool) == PM_FAIL) {
4250 DMEMIT("Fail");
fd7c092e 4251 return;
e49e5829
JT
4252 }
4253
991d9fa0
JT
4254 if (!tc->td)
4255 DMEMIT("-");
4256 else {
4257 switch (type) {
4258 case STATUSTYPE_INFO:
4259 r = dm_thin_get_mapped_count(tc->td, &mapped);
fd7c092e
MP
4260 if (r) {
4261 DMERR("dm_thin_get_mapped_count returned %d", r);
4262 goto err;
4263 }
991d9fa0
JT
4264
4265 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
fd7c092e
MP
4266 if (r < 0) {
4267 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
4268 goto err;
4269 }
991d9fa0
JT
4270
4271 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4272 if (r)
4273 DMEMIT("%llu", ((highest + 1) *
4274 tc->pool->sectors_per_block) - 1);
4275 else
4276 DMEMIT("-");
4277 break;
4278
4279 case STATUSTYPE_TABLE:
4280 DMEMIT("%s %lu",
4281 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4282 (unsigned long) tc->dev_id);
2dd9c257
JT
4283 if (tc->origin_dev)
4284 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
991d9fa0
JT
4285 break;
4286 }
4287 }
4288
fd7c092e
MP
4289 return;
4290
4291err:
4292 DMEMIT("Error");
991d9fa0
JT
4293}
4294
36f12aeb
MS
4295static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
4296 struct bio_vec *biovec, int max_size)
4297{
4298 struct thin_c *tc = ti->private;
4299 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
4300
4301 if (!q->merge_bvec_fn)
4302 return max_size;
4303
4304 bvm->bi_bdev = tc->pool_dev->bdev;
4305 bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
4306
4307 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
4308}
4309
991d9fa0
JT
4310static int thin_iterate_devices(struct dm_target *ti,
4311 iterate_devices_callout_fn fn, void *data)
4312{
55f2b8bd 4313 sector_t blocks;
991d9fa0 4314 struct thin_c *tc = ti->private;
55f2b8bd 4315 struct pool *pool = tc->pool;
991d9fa0
JT
4316
4317 /*
4318 * We can't call dm_pool_get_data_dev_size() since that blocks. So
4319 * we follow a more convoluted path through to the pool's target.
4320 */
55f2b8bd 4321 if (!pool->ti)
991d9fa0
JT
4322 return 0; /* nothing is bound */
4323
55f2b8bd
MS
4324 blocks = pool->ti->len;
4325 (void) sector_div(blocks, pool->sectors_per_block);
991d9fa0 4326 if (blocks)
55f2b8bd 4327 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
991d9fa0
JT
4328
4329 return 0;
4330}
4331
34fbcf62
JT
4332static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4333{
4334 struct thin_c *tc = ti->private;
4335 struct pool *pool = tc->pool;
4336
4337 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4338 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4339}
4340
991d9fa0
JT
4341static struct target_type thin_target = {
4342 .name = "thin",
e4c78e21 4343 .version = {1, 16, 0},
991d9fa0
JT
4344 .module = THIS_MODULE,
4345 .ctr = thin_ctr,
4346 .dtr = thin_dtr,
4347 .map = thin_map,
eb2aa48d 4348 .end_io = thin_endio,
e5aea7b4 4349 .preresume = thin_preresume,
738211f7 4350 .presuspend = thin_presuspend,
991d9fa0
JT
4351 .postsuspend = thin_postsuspend,
4352 .status = thin_status,
36f12aeb 4353 .merge = thin_merge,
991d9fa0 4354 .iterate_devices = thin_iterate_devices,
34fbcf62 4355 .io_hints = thin_io_hints,
991d9fa0
JT
4356};
4357
4358/*----------------------------------------------------------------*/
4359
4360static int __init dm_thin_init(void)
4361{
4362 int r;
4363
4364 pool_table_init();
4365
4366 r = dm_register_target(&thin_target);
4367 if (r)
4368 return r;
4369
4370 r = dm_register_target(&pool_target);
4371 if (r)
a24c2569
MS
4372 goto bad_pool_target;
4373
4374 r = -ENOMEM;
4375
a24c2569
MS
4376 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4377 if (!_new_mapping_cache)
4378 goto bad_new_mapping_cache;
4379
a24c2569
MS
4380 return 0;
4381
a24c2569 4382bad_new_mapping_cache:
a24c2569
MS
4383 dm_unregister_target(&pool_target);
4384bad_pool_target:
4385 dm_unregister_target(&thin_target);
991d9fa0
JT
4386
4387 return r;
4388}
4389
4390static void dm_thin_exit(void)
4391{
4392 dm_unregister_target(&thin_target);
4393 dm_unregister_target(&pool_target);
a24c2569 4394
a24c2569 4395 kmem_cache_destroy(_new_mapping_cache);
991d9fa0
JT
4396}
4397
4398module_init(dm_thin_init);
4399module_exit(dm_thin_exit);
4400
80c57893
MS
4401module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4402MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4403
7cab8bf1 4404MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
991d9fa0
JT
4405MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4406MODULE_LICENSE("GPL");