2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
7 #include <linux/blkdev.h>
8 #include <linux/device-mapper.h>
9 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/kdev_t.h>
13 #include <linux/list.h>
14 #include <linux/list_bl.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/log2.h>
20 #include <linux/dm-kcopyd.h>
24 #include "dm-exception-store.h"
26 #define DM_MSG_PREFIX "snapshots"
28 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
30 #define dm_target_is_snapshot_merge(ti) \
31 ((ti)->type->name == dm_snapshot_merge_target_name)
34 * The size of the mempool used to track chunks in use.
38 #define DM_TRACKED_CHUNK_HASH_SIZE 16
39 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
40 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42 struct dm_exception_table
{
45 struct hlist_bl_head
*table
;
49 struct rw_semaphore lock
;
51 struct dm_dev
*origin
;
56 /* List of snapshots per Origin */
57 struct list_head list
;
60 * You can't use a snapshot if this is 0 (e.g. if full).
61 * A snapshot-merge target never clears this.
66 * The snapshot overflowed because of a write to the snapshot device.
67 * We don't have to invalidate the snapshot in this case, but we need
68 * to prevent further writes.
70 int snapshot_overflowed
;
72 /* Origin writes don't trigger exceptions until this is set */
75 atomic_t pending_exceptions_count
;
77 spinlock_t pe_allocation_lock
;
79 /* Protected by "pe_allocation_lock" */
80 sector_t exception_start_sequence
;
82 /* Protected by kcopyd single-threaded callback */
83 sector_t exception_complete_sequence
;
86 * A list of pending exceptions that completed out of order.
87 * Protected by kcopyd single-threaded callback.
89 struct rb_root out_of_order_tree
;
91 mempool_t pending_pool
;
93 struct dm_exception_table pending
;
94 struct dm_exception_table complete
;
97 * pe_lock protects all pending_exception operations and access
98 * as well as the snapshot_bios list.
102 /* Chunks with outstanding reads */
103 spinlock_t tracked_chunk_lock
;
104 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
106 /* The on disk metadata handler */
107 struct dm_exception_store
*store
;
109 unsigned in_progress
;
110 struct wait_queue_head in_progress_wait
;
112 struct dm_kcopyd_client
*kcopyd_client
;
114 /* Wait for events based on state_bits */
115 unsigned long state_bits
;
117 /* Range of chunks currently being merged. */
118 chunk_t first_merging_chunk
;
119 int num_merging_chunks
;
122 * The merge operation failed if this flag is set.
123 * Failure modes are handled as follows:
124 * - I/O error reading the header
125 * => don't load the target; abort.
126 * - Header does not have "valid" flag set
127 * => use the origin; forget about the snapshot.
128 * - I/O error when reading exceptions
129 * => don't load the target; abort.
130 * (We can't use the intermediate origin state.)
131 * - I/O error while merging
132 * => stop merging; set merge_failed; process I/O normally.
136 bool discard_zeroes_cow
:1;
137 bool discard_passdown_origin
:1;
140 * Incoming bios that overlap with chunks being merged must wait
141 * for them to be committed.
143 struct bio_list bios_queued_during_merge
;
146 * Flush data after merge.
148 struct bio flush_bio
;
153 * RUNNING_MERGE - Merge operation is in progress.
154 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
155 * cleared afterwards.
157 #define RUNNING_MERGE 0
158 #define SHUTDOWN_MERGE 1
161 * Maximum number of chunks being copied on write.
163 * The value was decided experimentally as a trade-off between memory
164 * consumption, stalling the kernel's workqueues and maintaining a high enough
167 #define DEFAULT_COW_THRESHOLD 2048
169 static unsigned cow_threshold
= DEFAULT_COW_THRESHOLD
;
170 module_param_named(snapshot_cow_threshold
, cow_threshold
, uint
, 0644);
171 MODULE_PARM_DESC(snapshot_cow_threshold
, "Maximum number of chunks being copied on write");
173 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle
,
174 "A percentage of time allocated for copy on write");
176 struct dm_dev
*dm_snap_origin(struct dm_snapshot
*s
)
180 EXPORT_SYMBOL(dm_snap_origin
);
182 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
186 EXPORT_SYMBOL(dm_snap_cow
);
188 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
191 return chunk
<< store
->chunk_shift
;
194 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
197 * There is only ever one instance of a particular block
198 * device so we can compare pointers safely.
203 struct dm_snap_pending_exception
{
204 struct dm_exception e
;
207 * Origin buffers waiting for this to complete are held
210 struct bio_list origin_bios
;
211 struct bio_list snapshot_bios
;
213 /* Pointer back to snapshot context */
214 struct dm_snapshot
*snap
;
217 * 1 indicates the exception has already been sent to
222 /* There was copying error. */
225 /* A sequence number, it is used for in-order completion. */
226 sector_t exception_sequence
;
228 struct rb_node out_of_order_node
;
231 * For writing a complete chunk, bypassing the copy.
233 struct bio
*full_bio
;
234 bio_end_io_t
*full_bio_end_io
;
238 * Hash table mapping origin volumes to lists of snapshots and
239 * a lock to protect it
241 static struct kmem_cache
*exception_cache
;
242 static struct kmem_cache
*pending_cache
;
244 struct dm_snap_tracked_chunk
{
245 struct hlist_node node
;
249 static void init_tracked_chunk(struct bio
*bio
)
251 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
252 INIT_HLIST_NODE(&c
->node
);
255 static bool is_bio_tracked(struct bio
*bio
)
257 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
258 return !hlist_unhashed(&c
->node
);
261 static void track_chunk(struct dm_snapshot
*s
, struct bio
*bio
, chunk_t chunk
)
263 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
267 spin_lock_irq(&s
->tracked_chunk_lock
);
268 hlist_add_head(&c
->node
,
269 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
270 spin_unlock_irq(&s
->tracked_chunk_lock
);
273 static void stop_tracking_chunk(struct dm_snapshot
*s
, struct bio
*bio
)
275 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
278 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
280 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
283 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
285 struct dm_snap_tracked_chunk
*c
;
288 spin_lock_irq(&s
->tracked_chunk_lock
);
290 hlist_for_each_entry(c
,
291 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
292 if (c
->chunk
== chunk
) {
298 spin_unlock_irq(&s
->tracked_chunk_lock
);
304 * This conflicting I/O is extremely improbable in the caller,
305 * so msleep(1) is sufficient and there is no need for a wait queue.
307 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
309 while (__chunk_is_tracked(s
, chunk
))
314 * One of these per registered origin, held in the snapshot_origins hash
317 /* The origin device */
318 struct block_device
*bdev
;
320 struct list_head hash_list
;
322 /* List of snapshots for this origin */
323 struct list_head snapshots
;
327 * This structure is allocated for each origin target
331 struct dm_target
*ti
;
332 unsigned split_boundary
;
333 struct list_head hash_list
;
337 * Size of the hash table for origin volumes. If we make this
338 * the size of the minors list then it should be nearly perfect
340 #define ORIGIN_HASH_SIZE 256
341 #define ORIGIN_MASK 0xFF
342 static struct list_head
*_origins
;
343 static struct list_head
*_dm_origins
;
344 static struct rw_semaphore _origins_lock
;
346 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
347 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
348 static uint64_t _pending_exceptions_done_count
;
350 static int init_origin_hash(void)
354 _origins
= kmalloc_array(ORIGIN_HASH_SIZE
, sizeof(struct list_head
),
357 DMERR("unable to allocate memory for _origins");
360 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
361 INIT_LIST_HEAD(_origins
+ i
);
363 _dm_origins
= kmalloc_array(ORIGIN_HASH_SIZE
,
364 sizeof(struct list_head
),
367 DMERR("unable to allocate memory for _dm_origins");
371 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
372 INIT_LIST_HEAD(_dm_origins
+ i
);
374 init_rwsem(&_origins_lock
);
379 static void exit_origin_hash(void)
385 static unsigned origin_hash(struct block_device
*bdev
)
387 return bdev
->bd_dev
& ORIGIN_MASK
;
390 static struct origin
*__lookup_origin(struct block_device
*origin
)
392 struct list_head
*ol
;
395 ol
= &_origins
[origin_hash(origin
)];
396 list_for_each_entry (o
, ol
, hash_list
)
397 if (bdev_equal(o
->bdev
, origin
))
403 static void __insert_origin(struct origin
*o
)
405 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
406 list_add_tail(&o
->hash_list
, sl
);
409 static struct dm_origin
*__lookup_dm_origin(struct block_device
*origin
)
411 struct list_head
*ol
;
414 ol
= &_dm_origins
[origin_hash(origin
)];
415 list_for_each_entry (o
, ol
, hash_list
)
416 if (bdev_equal(o
->dev
->bdev
, origin
))
422 static void __insert_dm_origin(struct dm_origin
*o
)
424 struct list_head
*sl
= &_dm_origins
[origin_hash(o
->dev
->bdev
)];
425 list_add_tail(&o
->hash_list
, sl
);
428 static void __remove_dm_origin(struct dm_origin
*o
)
430 list_del(&o
->hash_list
);
434 * _origins_lock must be held when calling this function.
435 * Returns number of snapshots registered using the supplied cow device, plus:
436 * snap_src - a snapshot suitable for use as a source of exception handover
437 * snap_dest - a snapshot capable of receiving exception handover.
438 * snap_merge - an existing snapshot-merge target linked to the same origin.
439 * There can be at most one snapshot-merge target. The parameter is optional.
441 * Possible return values and states of snap_src and snap_dest.
442 * 0: NULL, NULL - first new snapshot
443 * 1: snap_src, NULL - normal snapshot
444 * 2: snap_src, snap_dest - waiting for handover
445 * 2: snap_src, NULL - handed over, waiting for old to be deleted
446 * 1: NULL, snap_dest - source got destroyed without handover
448 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
449 struct dm_snapshot
**snap_src
,
450 struct dm_snapshot
**snap_dest
,
451 struct dm_snapshot
**snap_merge
)
453 struct dm_snapshot
*s
;
458 o
= __lookup_origin(snap
->origin
->bdev
);
462 list_for_each_entry(s
, &o
->snapshots
, list
) {
463 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
465 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
475 } else if (snap_dest
)
486 * On success, returns 1 if this snapshot is a handover destination,
487 * otherwise returns 0.
489 static int __validate_exception_handover(struct dm_snapshot
*snap
)
491 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
492 struct dm_snapshot
*snap_merge
= NULL
;
494 /* Does snapshot need exceptions handed over to it? */
495 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
496 &snap_merge
) == 2) ||
498 snap
->ti
->error
= "Snapshot cow pairing for exception "
499 "table handover failed";
504 * If no snap_src was found, snap cannot become a handover
511 * Non-snapshot-merge handover?
513 if (!dm_target_is_snapshot_merge(snap
->ti
))
517 * Do not allow more than one merging snapshot.
520 snap
->ti
->error
= "A snapshot is already merging.";
524 if (!snap_src
->store
->type
->prepare_merge
||
525 !snap_src
->store
->type
->commit_merge
) {
526 snap
->ti
->error
= "Snapshot exception store does not "
527 "support snapshot-merge.";
534 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
536 struct dm_snapshot
*l
;
538 /* Sort the list according to chunk size, largest-first smallest-last */
539 list_for_each_entry(l
, &o
->snapshots
, list
)
540 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
542 list_add_tail(&s
->list
, &l
->list
);
546 * Make a note of the snapshot and its origin so we can look it
547 * up when the origin has a write on it.
549 * Also validate snapshot exception store handovers.
550 * On success, returns 1 if this registration is a handover destination,
551 * otherwise returns 0.
553 static int register_snapshot(struct dm_snapshot
*snap
)
555 struct origin
*o
, *new_o
= NULL
;
556 struct block_device
*bdev
= snap
->origin
->bdev
;
559 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
563 down_write(&_origins_lock
);
565 r
= __validate_exception_handover(snap
);
571 o
= __lookup_origin(bdev
);
578 /* Initialise the struct */
579 INIT_LIST_HEAD(&o
->snapshots
);
585 __insert_snapshot(o
, snap
);
588 up_write(&_origins_lock
);
594 * Move snapshot to correct place in list according to chunk size.
596 static void reregister_snapshot(struct dm_snapshot
*s
)
598 struct block_device
*bdev
= s
->origin
->bdev
;
600 down_write(&_origins_lock
);
603 __insert_snapshot(__lookup_origin(bdev
), s
);
605 up_write(&_origins_lock
);
608 static void unregister_snapshot(struct dm_snapshot
*s
)
612 down_write(&_origins_lock
);
613 o
= __lookup_origin(s
->origin
->bdev
);
616 if (o
&& list_empty(&o
->snapshots
)) {
617 list_del(&o
->hash_list
);
621 up_write(&_origins_lock
);
625 * Implementation of the exception hash tables.
626 * The lowest hash_shift bits of the chunk number are ignored, allowing
627 * some consecutive chunks to be grouped together.
629 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
);
631 /* Lock to protect access to the completed and pending exception hash tables. */
632 struct dm_exception_table_lock
{
633 struct hlist_bl_head
*complete_slot
;
634 struct hlist_bl_head
*pending_slot
;
637 static void dm_exception_table_lock_init(struct dm_snapshot
*s
, chunk_t chunk
,
638 struct dm_exception_table_lock
*lock
)
640 struct dm_exception_table
*complete
= &s
->complete
;
641 struct dm_exception_table
*pending
= &s
->pending
;
643 lock
->complete_slot
= &complete
->table
[exception_hash(complete
, chunk
)];
644 lock
->pending_slot
= &pending
->table
[exception_hash(pending
, chunk
)];
647 static void dm_exception_table_lock(struct dm_exception_table_lock
*lock
)
649 hlist_bl_lock(lock
->complete_slot
);
650 hlist_bl_lock(lock
->pending_slot
);
653 static void dm_exception_table_unlock(struct dm_exception_table_lock
*lock
)
655 hlist_bl_unlock(lock
->pending_slot
);
656 hlist_bl_unlock(lock
->complete_slot
);
659 static int dm_exception_table_init(struct dm_exception_table
*et
,
660 uint32_t size
, unsigned hash_shift
)
664 et
->hash_shift
= hash_shift
;
665 et
->hash_mask
= size
- 1;
666 et
->table
= dm_vcalloc(size
, sizeof(struct hlist_bl_head
));
670 for (i
= 0; i
< size
; i
++)
671 INIT_HLIST_BL_HEAD(et
->table
+ i
);
676 static void dm_exception_table_exit(struct dm_exception_table
*et
,
677 struct kmem_cache
*mem
)
679 struct hlist_bl_head
*slot
;
680 struct dm_exception
*ex
;
681 struct hlist_bl_node
*pos
, *n
;
684 size
= et
->hash_mask
+ 1;
685 for (i
= 0; i
< size
; i
++) {
686 slot
= et
->table
+ i
;
688 hlist_bl_for_each_entry_safe(ex
, pos
, n
, slot
, hash_list
)
689 kmem_cache_free(mem
, ex
);
695 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
697 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
700 static void dm_remove_exception(struct dm_exception
*e
)
702 hlist_bl_del(&e
->hash_list
);
706 * Return the exception data for a sector, or NULL if not
709 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
712 struct hlist_bl_head
*slot
;
713 struct hlist_bl_node
*pos
;
714 struct dm_exception
*e
;
716 slot
= &et
->table
[exception_hash(et
, chunk
)];
717 hlist_bl_for_each_entry(e
, pos
, slot
, hash_list
)
718 if (chunk
>= e
->old_chunk
&&
719 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
725 static struct dm_exception
*alloc_completed_exception(gfp_t gfp
)
727 struct dm_exception
*e
;
729 e
= kmem_cache_alloc(exception_cache
, gfp
);
730 if (!e
&& gfp
== GFP_NOIO
)
731 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
736 static void free_completed_exception(struct dm_exception
*e
)
738 kmem_cache_free(exception_cache
, e
);
741 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
743 struct dm_snap_pending_exception
*pe
= mempool_alloc(&s
->pending_pool
,
746 atomic_inc(&s
->pending_exceptions_count
);
752 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
754 struct dm_snapshot
*s
= pe
->snap
;
756 mempool_free(pe
, &s
->pending_pool
);
757 smp_mb__before_atomic();
758 atomic_dec(&s
->pending_exceptions_count
);
761 static void dm_insert_exception(struct dm_exception_table
*eh
,
762 struct dm_exception
*new_e
)
764 struct hlist_bl_head
*l
;
765 struct hlist_bl_node
*pos
;
766 struct dm_exception
*e
= NULL
;
768 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
770 /* Add immediately if this table doesn't support consecutive chunks */
774 /* List is ordered by old_chunk */
775 hlist_bl_for_each_entry(e
, pos
, l
, hash_list
) {
776 /* Insert after an existing chunk? */
777 if (new_e
->old_chunk
== (e
->old_chunk
+
778 dm_consecutive_chunk_count(e
) + 1) &&
779 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
780 dm_consecutive_chunk_count(e
) + 1)) {
781 dm_consecutive_chunk_count_inc(e
);
782 free_completed_exception(new_e
);
786 /* Insert before an existing chunk? */
787 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
788 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
789 dm_consecutive_chunk_count_inc(e
);
792 free_completed_exception(new_e
);
796 if (new_e
->old_chunk
< e
->old_chunk
)
803 * Either the table doesn't support consecutive chunks or slot
806 hlist_bl_add_head(&new_e
->hash_list
, l
);
807 } else if (new_e
->old_chunk
< e
->old_chunk
) {
808 /* Add before an existing exception */
809 hlist_bl_add_before(&new_e
->hash_list
, &e
->hash_list
);
811 /* Add to l's tail: e is the last exception in this slot */
812 hlist_bl_add_behind(&new_e
->hash_list
, &e
->hash_list
);
817 * Callback used by the exception stores to load exceptions when
820 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
822 struct dm_exception_table_lock lock
;
823 struct dm_snapshot
*s
= context
;
824 struct dm_exception
*e
;
826 e
= alloc_completed_exception(GFP_KERNEL
);
832 /* Consecutive_count is implicitly initialised to zero */
836 * Although there is no need to lock access to the exception tables
837 * here, if we don't then hlist_bl_add_head(), called by
838 * dm_insert_exception(), will complain about accessing the
839 * corresponding list without locking it first.
841 dm_exception_table_lock_init(s
, old
, &lock
);
843 dm_exception_table_lock(&lock
);
844 dm_insert_exception(&s
->complete
, e
);
845 dm_exception_table_unlock(&lock
);
851 * Return a minimum chunk size of all snapshots that have the specified origin.
852 * Return zero if the origin has no snapshots.
854 static uint32_t __minimum_chunk_size(struct origin
*o
)
856 struct dm_snapshot
*snap
;
857 unsigned chunk_size
= 0;
860 list_for_each_entry(snap
, &o
->snapshots
, list
)
861 chunk_size
= min_not_zero(chunk_size
,
862 snap
->store
->chunk_size
);
864 return (uint32_t) chunk_size
;
870 static int calc_max_buckets(void)
872 /* use a fixed size of 2MB */
873 unsigned long mem
= 2 * 1024 * 1024;
874 mem
/= sizeof(struct hlist_bl_head
);
880 * Allocate room for a suitable hash table.
882 static int init_hash_tables(struct dm_snapshot
*s
)
884 sector_t hash_size
, cow_dev_size
, max_buckets
;
887 * Calculate based on the size of the original volume or
890 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
891 max_buckets
= calc_max_buckets();
893 hash_size
= cow_dev_size
>> s
->store
->chunk_shift
;
894 hash_size
= min(hash_size
, max_buckets
);
898 hash_size
= rounddown_pow_of_two(hash_size
);
899 if (dm_exception_table_init(&s
->complete
, hash_size
,
900 DM_CHUNK_CONSECUTIVE_BITS
))
904 * Allocate hash table for in-flight exceptions
905 * Make this smaller than the real hash table
911 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
912 dm_exception_table_exit(&s
->complete
, exception_cache
);
919 static void merge_shutdown(struct dm_snapshot
*s
)
921 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
922 smp_mb__after_atomic();
923 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
926 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
928 s
->first_merging_chunk
= 0;
929 s
->num_merging_chunks
= 0;
931 return bio_list_get(&s
->bios_queued_during_merge
);
935 * Remove one chunk from the index of completed exceptions.
937 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
940 struct dm_exception
*e
;
942 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
944 DMERR("Corruption detected: exception for block %llu is "
945 "on disk but not in memory",
946 (unsigned long long)old_chunk
);
951 * If this is the only chunk using this exception, remove exception.
953 if (!dm_consecutive_chunk_count(e
)) {
954 dm_remove_exception(e
);
955 free_completed_exception(e
);
960 * The chunk may be either at the beginning or the end of a
961 * group of consecutive chunks - never in the middle. We are
962 * removing chunks in the opposite order to that in which they
963 * were added, so this should always be true.
964 * Decrement the consecutive chunk counter and adjust the
965 * starting point if necessary.
967 if (old_chunk
== e
->old_chunk
) {
970 } else if (old_chunk
!= e
->old_chunk
+
971 dm_consecutive_chunk_count(e
)) {
972 DMERR("Attempt to merge block %llu from the "
973 "middle of a chunk range [%llu - %llu]",
974 (unsigned long long)old_chunk
,
975 (unsigned long long)e
->old_chunk
,
977 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
981 dm_consecutive_chunk_count_dec(e
);
986 static void flush_bios(struct bio
*bio
);
988 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
990 struct bio
*b
= NULL
;
992 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
994 down_write(&s
->lock
);
997 * Process chunks (and associated exceptions) in reverse order
998 * so that dm_consecutive_chunk_count_dec() accounting works.
1001 r
= __remove_single_exception_chunk(s
, old_chunk
);
1004 } while (old_chunk
-- > s
->first_merging_chunk
);
1006 b
= __release_queued_bios_after_merge(s
);
1016 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
1017 sector_t sector
, unsigned chunk_size
);
1019 static void merge_callback(int read_err
, unsigned long write_err
,
1022 static uint64_t read_pending_exceptions_done_count(void)
1024 uint64_t pending_exceptions_done
;
1026 spin_lock(&_pending_exceptions_done_spinlock
);
1027 pending_exceptions_done
= _pending_exceptions_done_count
;
1028 spin_unlock(&_pending_exceptions_done_spinlock
);
1030 return pending_exceptions_done
;
1033 static void increment_pending_exceptions_done_count(void)
1035 spin_lock(&_pending_exceptions_done_spinlock
);
1036 _pending_exceptions_done_count
++;
1037 spin_unlock(&_pending_exceptions_done_spinlock
);
1039 wake_up_all(&_pending_exceptions_done
);
1042 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
1044 int i
, linear_chunks
;
1045 chunk_t old_chunk
, new_chunk
;
1046 struct dm_io_region src
, dest
;
1048 uint64_t previous_count
;
1050 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
1051 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
1055 * valid flag never changes during merge, so no lock required.
1058 DMERR("Snapshot is invalid: can't merge");
1062 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
1064 if (linear_chunks
<= 0) {
1065 if (linear_chunks
< 0) {
1066 DMERR("Read error in exception store: "
1067 "shutting down merge");
1068 down_write(&s
->lock
);
1069 s
->merge_failed
= true;
1075 /* Adjust old_chunk and new_chunk to reflect start of linear region */
1076 old_chunk
= old_chunk
+ 1 - linear_chunks
;
1077 new_chunk
= new_chunk
+ 1 - linear_chunks
;
1080 * Use one (potentially large) I/O to copy all 'linear_chunks'
1081 * from the exception store to the origin
1083 io_size
= linear_chunks
* s
->store
->chunk_size
;
1085 dest
.bdev
= s
->origin
->bdev
;
1086 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
1087 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
1089 src
.bdev
= s
->cow
->bdev
;
1090 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
1091 src
.count
= dest
.count
;
1094 * Reallocate any exceptions needed in other snapshots then
1095 * wait for the pending exceptions to complete.
1096 * Each time any pending exception (globally on the system)
1097 * completes we are woken and repeat the process to find out
1098 * if we can proceed. While this may not seem a particularly
1099 * efficient algorithm, it is not expected to have any
1100 * significant impact on performance.
1102 previous_count
= read_pending_exceptions_done_count();
1103 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
1104 wait_event(_pending_exceptions_done
,
1105 (read_pending_exceptions_done_count() !=
1107 /* Retry after the wait, until all exceptions are done. */
1108 previous_count
= read_pending_exceptions_done_count();
1111 down_write(&s
->lock
);
1112 s
->first_merging_chunk
= old_chunk
;
1113 s
->num_merging_chunks
= linear_chunks
;
1116 /* Wait until writes to all 'linear_chunks' drain */
1117 for (i
= 0; i
< linear_chunks
; i
++)
1118 __check_for_conflicting_io(s
, old_chunk
+ i
);
1120 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
1127 static void error_bios(struct bio
*bio
);
1129 static int flush_data(struct dm_snapshot
*s
)
1131 struct bio
*flush_bio
= &s
->flush_bio
;
1133 bio_reset(flush_bio
);
1134 bio_set_dev(flush_bio
, s
->origin
->bdev
);
1135 flush_bio
->bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
1137 return submit_bio_wait(flush_bio
);
1140 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
1142 struct dm_snapshot
*s
= context
;
1143 struct bio
*b
= NULL
;
1145 if (read_err
|| write_err
) {
1147 DMERR("Read error: shutting down merge.");
1149 DMERR("Write error: shutting down merge.");
1153 if (flush_data(s
) < 0) {
1154 DMERR("Flush after merge failed: shutting down merge");
1158 if (s
->store
->type
->commit_merge(s
->store
,
1159 s
->num_merging_chunks
) < 0) {
1160 DMERR("Write error in exception store: shutting down merge");
1164 if (remove_single_exception_chunk(s
) < 0)
1167 snapshot_merge_next_chunks(s
);
1172 down_write(&s
->lock
);
1173 s
->merge_failed
= true;
1174 b
= __release_queued_bios_after_merge(s
);
1181 static void start_merge(struct dm_snapshot
*s
)
1183 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1184 snapshot_merge_next_chunks(s
);
1188 * Stop the merging process and wait until it finishes.
1190 static void stop_merge(struct dm_snapshot
*s
)
1192 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1193 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, TASK_UNINTERRUPTIBLE
);
1194 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1197 static int parse_snapshot_features(struct dm_arg_set
*as
, struct dm_snapshot
*s
,
1198 struct dm_target
*ti
)
1202 const char *arg_name
;
1204 static const struct dm_arg _args
[] = {
1205 {0, 2, "Invalid number of feature arguments"},
1209 * No feature arguments supplied.
1214 r
= dm_read_arg_group(_args
, as
, &argc
, &ti
->error
);
1218 while (argc
&& !r
) {
1219 arg_name
= dm_shift_arg(as
);
1222 if (!strcasecmp(arg_name
, "discard_zeroes_cow"))
1223 s
->discard_zeroes_cow
= true;
1225 else if (!strcasecmp(arg_name
, "discard_passdown_origin"))
1226 s
->discard_passdown_origin
= true;
1229 ti
->error
= "Unrecognised feature requested";
1235 if (!s
->discard_zeroes_cow
&& s
->discard_passdown_origin
) {
1237 * TODO: really these are disjoint.. but ti->num_discard_bios
1238 * and dm_bio_get_target_bio_nr() require rigid constraints.
1240 ti
->error
= "discard_passdown_origin feature depends on discard_zeroes_cow";
1248 * Construct a snapshot mapping:
1249 * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
1251 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1253 struct dm_snapshot
*s
;
1254 struct dm_arg_set as
;
1257 char *origin_path
, *cow_path
;
1258 dev_t origin_dev
, cow_dev
;
1259 unsigned args_used
, num_flush_bios
= 1;
1260 fmode_t origin_mode
= FMODE_READ
;
1263 ti
->error
= "requires 4 or more arguments";
1268 if (dm_target_is_snapshot_merge(ti
)) {
1270 origin_mode
= FMODE_WRITE
;
1273 s
= kzalloc(sizeof(*s
), GFP_KERNEL
);
1275 ti
->error
= "Cannot allocate private snapshot structure";
1282 dm_consume_args(&as
, 4);
1283 r
= parse_snapshot_features(&as
, s
, ti
);
1287 origin_path
= argv
[0];
1291 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1293 ti
->error
= "Cannot get origin device";
1296 origin_dev
= s
->origin
->bdev
->bd_dev
;
1302 cow_dev
= dm_get_dev_t(cow_path
);
1303 if (cow_dev
&& cow_dev
== origin_dev
) {
1304 ti
->error
= "COW device cannot be the same as origin device";
1309 r
= dm_get_device(ti
, cow_path
, dm_table_get_mode(ti
->table
), &s
->cow
);
1311 ti
->error
= "Cannot get COW device";
1315 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1317 ti
->error
= "Couldn't create exception store";
1327 s
->snapshot_overflowed
= 0;
1329 atomic_set(&s
->pending_exceptions_count
, 0);
1330 spin_lock_init(&s
->pe_allocation_lock
);
1331 s
->exception_start_sequence
= 0;
1332 s
->exception_complete_sequence
= 0;
1333 s
->out_of_order_tree
= RB_ROOT
;
1334 init_rwsem(&s
->lock
);
1335 INIT_LIST_HEAD(&s
->list
);
1336 spin_lock_init(&s
->pe_lock
);
1338 s
->merge_failed
= false;
1339 s
->first_merging_chunk
= 0;
1340 s
->num_merging_chunks
= 0;
1341 bio_list_init(&s
->bios_queued_during_merge
);
1342 bio_init(&s
->flush_bio
, NULL
, 0);
1344 /* Allocate hash table for COW data */
1345 if (init_hash_tables(s
)) {
1346 ti
->error
= "Unable to allocate hash table space";
1348 goto bad_hash_tables
;
1351 init_waitqueue_head(&s
->in_progress_wait
);
1353 s
->kcopyd_client
= dm_kcopyd_client_create(&dm_kcopyd_throttle
);
1354 if (IS_ERR(s
->kcopyd_client
)) {
1355 r
= PTR_ERR(s
->kcopyd_client
);
1356 ti
->error
= "Could not create kcopyd client";
1360 r
= mempool_init_slab_pool(&s
->pending_pool
, MIN_IOS
, pending_cache
);
1362 ti
->error
= "Could not allocate mempool for pending exceptions";
1363 goto bad_pending_pool
;
1366 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1367 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1369 spin_lock_init(&s
->tracked_chunk_lock
);
1372 ti
->num_flush_bios
= num_flush_bios
;
1373 if (s
->discard_zeroes_cow
)
1374 ti
->num_discard_bios
= (s
->discard_passdown_origin
? 2 : 1);
1375 ti
->per_io_data_size
= sizeof(struct dm_snap_tracked_chunk
);
1377 /* Add snapshot to the list of snapshots for this origin */
1378 /* Exceptions aren't triggered till snapshot_resume() is called */
1379 r
= register_snapshot(s
);
1381 ti
->error
= "Snapshot origin struct allocation failed";
1382 goto bad_load_and_register
;
1384 /* invalid handover, register_snapshot has set ti->error */
1385 goto bad_load_and_register
;
1389 * Metadata must only be loaded into one table at once, so skip this
1390 * if metadata will be handed over during resume.
1391 * Chunk size will be set during the handover - set it to zero to
1392 * ensure it's ignored.
1395 s
->store
->chunk_size
= 0;
1399 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1402 ti
->error
= "Failed to read snapshot metadata";
1403 goto bad_read_metadata
;
1406 DMWARN("Snapshot is marked invalid.");
1409 if (!s
->store
->chunk_size
) {
1410 ti
->error
= "Chunk size not set";
1411 goto bad_read_metadata
;
1414 r
= dm_set_target_max_io_len(ti
, s
->store
->chunk_size
);
1416 goto bad_read_metadata
;
1421 unregister_snapshot(s
);
1422 bad_load_and_register
:
1423 mempool_exit(&s
->pending_pool
);
1425 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1427 dm_exception_table_exit(&s
->pending
, pending_cache
);
1428 dm_exception_table_exit(&s
->complete
, exception_cache
);
1430 dm_exception_store_destroy(s
->store
);
1432 dm_put_device(ti
, s
->cow
);
1434 dm_put_device(ti
, s
->origin
);
1442 static void __free_exceptions(struct dm_snapshot
*s
)
1444 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1445 s
->kcopyd_client
= NULL
;
1447 dm_exception_table_exit(&s
->pending
, pending_cache
);
1448 dm_exception_table_exit(&s
->complete
, exception_cache
);
1451 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1452 struct dm_snapshot
*snap_dest
)
1455 struct dm_exception_table table_swap
;
1456 struct dm_exception_store
*store_swap
;
1460 * Swap all snapshot context information between the two instances.
1462 u
.table_swap
= snap_dest
->complete
;
1463 snap_dest
->complete
= snap_src
->complete
;
1464 snap_src
->complete
= u
.table_swap
;
1466 u
.store_swap
= snap_dest
->store
;
1467 snap_dest
->store
= snap_src
->store
;
1468 snap_dest
->store
->userspace_supports_overflow
= u
.store_swap
->userspace_supports_overflow
;
1469 snap_src
->store
= u
.store_swap
;
1471 snap_dest
->store
->snap
= snap_dest
;
1472 snap_src
->store
->snap
= snap_src
;
1474 snap_dest
->ti
->max_io_len
= snap_dest
->store
->chunk_size
;
1475 snap_dest
->valid
= snap_src
->valid
;
1476 snap_dest
->snapshot_overflowed
= snap_src
->snapshot_overflowed
;
1479 * Set source invalid to ensure it receives no further I/O.
1481 snap_src
->valid
= 0;
1484 static void snapshot_dtr(struct dm_target
*ti
)
1486 #ifdef CONFIG_DM_DEBUG
1489 struct dm_snapshot
*s
= ti
->private;
1490 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1492 down_read(&_origins_lock
);
1493 /* Check whether exception handover must be cancelled */
1494 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1495 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1496 down_write(&snap_dest
->lock
);
1497 snap_dest
->valid
= 0;
1498 up_write(&snap_dest
->lock
);
1499 DMERR("Cancelling snapshot handover.");
1501 up_read(&_origins_lock
);
1503 if (dm_target_is_snapshot_merge(ti
))
1506 /* Prevent further origin writes from using this snapshot. */
1507 /* After this returns there can be no new kcopyd jobs. */
1508 unregister_snapshot(s
);
1510 while (atomic_read(&s
->pending_exceptions_count
))
1513 * Ensure instructions in mempool_exit aren't reordered
1514 * before atomic_read.
1518 #ifdef CONFIG_DM_DEBUG
1519 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1520 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1523 __free_exceptions(s
);
1525 mempool_exit(&s
->pending_pool
);
1527 dm_exception_store_destroy(s
->store
);
1529 bio_uninit(&s
->flush_bio
);
1531 dm_put_device(ti
, s
->cow
);
1533 dm_put_device(ti
, s
->origin
);
1535 WARN_ON(s
->in_progress
);
1540 static void account_start_copy(struct dm_snapshot
*s
)
1542 spin_lock(&s
->in_progress_wait
.lock
);
1544 spin_unlock(&s
->in_progress_wait
.lock
);
1547 static void account_end_copy(struct dm_snapshot
*s
)
1549 spin_lock(&s
->in_progress_wait
.lock
);
1550 BUG_ON(!s
->in_progress
);
1552 if (likely(s
->in_progress
<= cow_threshold
) &&
1553 unlikely(waitqueue_active(&s
->in_progress_wait
)))
1554 wake_up_locked(&s
->in_progress_wait
);
1555 spin_unlock(&s
->in_progress_wait
.lock
);
1558 static bool wait_for_in_progress(struct dm_snapshot
*s
, bool unlock_origins
)
1560 if (unlikely(s
->in_progress
> cow_threshold
)) {
1561 spin_lock(&s
->in_progress_wait
.lock
);
1562 if (likely(s
->in_progress
> cow_threshold
)) {
1564 * NOTE: this throttle doesn't account for whether
1565 * the caller is servicing an IO that will trigger a COW
1566 * so excess throttling may result for chunks not required
1567 * to be COW'd. But if cow_threshold was reached, extra
1568 * throttling is unlikely to negatively impact performance.
1570 DECLARE_WAITQUEUE(wait
, current
);
1571 __add_wait_queue(&s
->in_progress_wait
, &wait
);
1572 __set_current_state(TASK_UNINTERRUPTIBLE
);
1573 spin_unlock(&s
->in_progress_wait
.lock
);
1575 up_read(&_origins_lock
);
1577 remove_wait_queue(&s
->in_progress_wait
, &wait
);
1580 spin_unlock(&s
->in_progress_wait
.lock
);
1586 * Flush a list of buffers.
1588 static void flush_bios(struct bio
*bio
)
1594 bio
->bi_next
= NULL
;
1595 submit_bio_noacct(bio
);
1600 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
, bool limit
);
1603 * Flush a list of buffers.
1605 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1612 bio
->bi_next
= NULL
;
1613 r
= do_origin(s
->origin
, bio
, false);
1614 if (r
== DM_MAPIO_REMAPPED
)
1615 submit_bio_noacct(bio
);
1621 * Error a list of buffers.
1623 static void error_bios(struct bio
*bio
)
1629 bio
->bi_next
= NULL
;
1635 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1641 DMERR("Invalidating snapshot: Error reading/writing.");
1642 else if (err
== -ENOMEM
)
1643 DMERR("Invalidating snapshot: Unable to allocate exception.");
1645 if (s
->store
->type
->drop_snapshot
)
1646 s
->store
->type
->drop_snapshot(s
->store
);
1650 dm_table_event(s
->ti
->table
);
1653 static void invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1655 down_write(&s
->lock
);
1656 __invalidate_snapshot(s
, err
);
1660 static void pending_complete(void *context
, int success
)
1662 struct dm_snap_pending_exception
*pe
= context
;
1663 struct dm_exception
*e
;
1664 struct dm_snapshot
*s
= pe
->snap
;
1665 struct bio
*origin_bios
= NULL
;
1666 struct bio
*snapshot_bios
= NULL
;
1667 struct bio
*full_bio
= NULL
;
1668 struct dm_exception_table_lock lock
;
1671 dm_exception_table_lock_init(s
, pe
->e
.old_chunk
, &lock
);
1674 /* Read/write error - snapshot is unusable */
1675 invalidate_snapshot(s
, -EIO
);
1678 dm_exception_table_lock(&lock
);
1682 e
= alloc_completed_exception(GFP_NOIO
);
1684 invalidate_snapshot(s
, -ENOMEM
);
1687 dm_exception_table_lock(&lock
);
1692 down_read(&s
->lock
);
1693 dm_exception_table_lock(&lock
);
1696 free_completed_exception(e
);
1703 * Add a proper exception. After inserting the completed exception all
1704 * subsequent snapshot reads to this chunk will be redirected to the
1705 * COW device. This ensures that we do not starve. Moreover, as long
1706 * as the pending exception exists, neither origin writes nor snapshot
1707 * merging can overwrite the chunk in origin.
1709 dm_insert_exception(&s
->complete
, e
);
1712 /* Wait for conflicting reads to drain */
1713 if (__chunk_is_tracked(s
, pe
->e
.old_chunk
)) {
1714 dm_exception_table_unlock(&lock
);
1715 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1716 dm_exception_table_lock(&lock
);
1720 /* Remove the in-flight exception from the list */
1721 dm_remove_exception(&pe
->e
);
1723 dm_exception_table_unlock(&lock
);
1725 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1726 origin_bios
= bio_list_get(&pe
->origin_bios
);
1727 full_bio
= pe
->full_bio
;
1729 full_bio
->bi_end_io
= pe
->full_bio_end_io
;
1730 increment_pending_exceptions_done_count();
1732 /* Submit any pending write bios */
1735 bio_io_error(full_bio
);
1736 error_bios(snapshot_bios
);
1739 bio_endio(full_bio
);
1740 flush_bios(snapshot_bios
);
1743 retry_origin_bios(s
, origin_bios
);
1745 free_pending_exception(pe
);
1748 static void complete_exception(struct dm_snap_pending_exception
*pe
)
1750 struct dm_snapshot
*s
= pe
->snap
;
1752 /* Update the metadata if we are persistent */
1753 s
->store
->type
->commit_exception(s
->store
, &pe
->e
, !pe
->copy_error
,
1754 pending_complete
, pe
);
1758 * Called when the copy I/O has finished. kcopyd actually runs
1759 * this code so don't block.
1761 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1763 struct dm_snap_pending_exception
*pe
= context
;
1764 struct dm_snapshot
*s
= pe
->snap
;
1766 pe
->copy_error
= read_err
|| write_err
;
1768 if (pe
->exception_sequence
== s
->exception_complete_sequence
) {
1769 struct rb_node
*next
;
1771 s
->exception_complete_sequence
++;
1772 complete_exception(pe
);
1774 next
= rb_first(&s
->out_of_order_tree
);
1776 pe
= rb_entry(next
, struct dm_snap_pending_exception
,
1778 if (pe
->exception_sequence
!= s
->exception_complete_sequence
)
1780 next
= rb_next(next
);
1781 s
->exception_complete_sequence
++;
1782 rb_erase(&pe
->out_of_order_node
, &s
->out_of_order_tree
);
1783 complete_exception(pe
);
1787 struct rb_node
*parent
= NULL
;
1788 struct rb_node
**p
= &s
->out_of_order_tree
.rb_node
;
1789 struct dm_snap_pending_exception
*pe2
;
1792 pe2
= rb_entry(*p
, struct dm_snap_pending_exception
, out_of_order_node
);
1795 BUG_ON(pe
->exception_sequence
== pe2
->exception_sequence
);
1796 if (pe
->exception_sequence
< pe2
->exception_sequence
)
1797 p
= &((*p
)->rb_left
);
1799 p
= &((*p
)->rb_right
);
1802 rb_link_node(&pe
->out_of_order_node
, parent
, p
);
1803 rb_insert_color(&pe
->out_of_order_node
, &s
->out_of_order_tree
);
1805 account_end_copy(s
);
1809 * Dispatches the copy operation to kcopyd.
1811 static void start_copy(struct dm_snap_pending_exception
*pe
)
1813 struct dm_snapshot
*s
= pe
->snap
;
1814 struct dm_io_region src
, dest
;
1815 struct block_device
*bdev
= s
->origin
->bdev
;
1818 dev_size
= get_dev_size(bdev
);
1821 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1822 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1824 dest
.bdev
= s
->cow
->bdev
;
1825 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1826 dest
.count
= src
.count
;
1828 /* Hand over to kcopyd */
1829 account_start_copy(s
);
1830 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, copy_callback
, pe
);
1833 static void full_bio_end_io(struct bio
*bio
)
1835 void *callback_data
= bio
->bi_private
;
1837 dm_kcopyd_do_callback(callback_data
, 0, bio
->bi_status
? 1 : 0);
1840 static void start_full_bio(struct dm_snap_pending_exception
*pe
,
1843 struct dm_snapshot
*s
= pe
->snap
;
1844 void *callback_data
;
1847 pe
->full_bio_end_io
= bio
->bi_end_io
;
1849 account_start_copy(s
);
1850 callback_data
= dm_kcopyd_prepare_callback(s
->kcopyd_client
,
1853 bio
->bi_end_io
= full_bio_end_io
;
1854 bio
->bi_private
= callback_data
;
1856 submit_bio_noacct(bio
);
1859 static struct dm_snap_pending_exception
*
1860 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1862 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1867 return container_of(e
, struct dm_snap_pending_exception
, e
);
1871 * Inserts a pending exception into the pending table.
1873 * NOTE: a write lock must be held on the chunk's pending exception table slot
1874 * before calling this.
1876 static struct dm_snap_pending_exception
*
1877 __insert_pending_exception(struct dm_snapshot
*s
,
1878 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1880 pe
->e
.old_chunk
= chunk
;
1881 bio_list_init(&pe
->origin_bios
);
1882 bio_list_init(&pe
->snapshot_bios
);
1884 pe
->full_bio
= NULL
;
1886 spin_lock(&s
->pe_allocation_lock
);
1887 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1888 spin_unlock(&s
->pe_allocation_lock
);
1889 free_pending_exception(pe
);
1893 pe
->exception_sequence
= s
->exception_start_sequence
++;
1894 spin_unlock(&s
->pe_allocation_lock
);
1896 dm_insert_exception(&s
->pending
, &pe
->e
);
1902 * Looks to see if this snapshot already has a pending exception
1903 * for this chunk, otherwise it allocates a new one and inserts
1904 * it into the pending table.
1906 * NOTE: a write lock must be held on the chunk's pending exception table slot
1907 * before calling this.
1909 static struct dm_snap_pending_exception
*
1910 __find_pending_exception(struct dm_snapshot
*s
,
1911 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1913 struct dm_snap_pending_exception
*pe2
;
1915 pe2
= __lookup_pending_exception(s
, chunk
);
1917 free_pending_exception(pe
);
1921 return __insert_pending_exception(s
, pe
, chunk
);
1924 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1925 struct bio
*bio
, chunk_t chunk
)
1927 bio_set_dev(bio
, s
->cow
->bdev
);
1928 bio
->bi_iter
.bi_sector
=
1929 chunk_to_sector(s
->store
, dm_chunk_number(e
->new_chunk
) +
1930 (chunk
- e
->old_chunk
)) +
1931 (bio
->bi_iter
.bi_sector
& s
->store
->chunk_mask
);
1934 static void zero_callback(int read_err
, unsigned long write_err
, void *context
)
1936 struct bio
*bio
= context
;
1937 struct dm_snapshot
*s
= bio
->bi_private
;
1939 account_end_copy(s
);
1940 bio
->bi_status
= write_err
? BLK_STS_IOERR
: 0;
1944 static void zero_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1945 struct bio
*bio
, chunk_t chunk
)
1947 struct dm_io_region dest
;
1949 dest
.bdev
= s
->cow
->bdev
;
1950 dest
.sector
= bio
->bi_iter
.bi_sector
;
1951 dest
.count
= s
->store
->chunk_size
;
1953 account_start_copy(s
);
1954 WARN_ON_ONCE(bio
->bi_private
);
1955 bio
->bi_private
= s
;
1956 dm_kcopyd_zero(s
->kcopyd_client
, 1, &dest
, 0, zero_callback
, bio
);
1959 static bool io_overlaps_chunk(struct dm_snapshot
*s
, struct bio
*bio
)
1961 return bio
->bi_iter
.bi_size
==
1962 (s
->store
->chunk_size
<< SECTOR_SHIFT
);
1965 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
)
1967 struct dm_exception
*e
;
1968 struct dm_snapshot
*s
= ti
->private;
1969 int r
= DM_MAPIO_REMAPPED
;
1971 struct dm_snap_pending_exception
*pe
= NULL
;
1972 struct dm_exception_table_lock lock
;
1974 init_tracked_chunk(bio
);
1976 if (bio
->bi_opf
& REQ_PREFLUSH
) {
1977 bio_set_dev(bio
, s
->cow
->bdev
);
1978 return DM_MAPIO_REMAPPED
;
1981 chunk
= sector_to_chunk(s
->store
, bio
->bi_iter
.bi_sector
);
1982 dm_exception_table_lock_init(s
, chunk
, &lock
);
1984 /* Full snapshots are not usable */
1985 /* To get here the table must be live so s->active is always set. */
1987 return DM_MAPIO_KILL
;
1989 if (bio_data_dir(bio
) == WRITE
) {
1990 while (unlikely(!wait_for_in_progress(s
, false)))
1991 ; /* wait_for_in_progress() has slept */
1994 down_read(&s
->lock
);
1995 dm_exception_table_lock(&lock
);
1997 if (!s
->valid
|| (unlikely(s
->snapshot_overflowed
) &&
1998 bio_data_dir(bio
) == WRITE
)) {
2003 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
2004 if (s
->discard_passdown_origin
&& dm_bio_get_target_bio_nr(bio
)) {
2006 * passdown discard to origin (without triggering
2007 * snapshot exceptions via do_origin; doing so would
2008 * defeat the goal of freeing space in origin that is
2009 * implied by the "discard_passdown_origin" feature)
2011 bio_set_dev(bio
, s
->origin
->bdev
);
2012 track_chunk(s
, bio
, chunk
);
2015 /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
2018 /* If the block is already remapped - use that, else remap it */
2019 e
= dm_lookup_exception(&s
->complete
, chunk
);
2021 remap_exception(s
, e
, bio
, chunk
);
2022 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
) &&
2023 io_overlaps_chunk(s
, bio
)) {
2024 dm_exception_table_unlock(&lock
);
2026 zero_exception(s
, e
, bio
, chunk
);
2027 r
= DM_MAPIO_SUBMITTED
; /* discard is not issued */
2033 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
2035 * If no exception exists, complete discard immediately
2036 * otherwise it'll trigger copy-out.
2039 r
= DM_MAPIO_SUBMITTED
;
2044 * Write to snapshot - higher level takes care of RW/RO
2045 * flags so we should only get this if we are
2048 if (bio_data_dir(bio
) == WRITE
) {
2049 pe
= __lookup_pending_exception(s
, chunk
);
2051 dm_exception_table_unlock(&lock
);
2052 pe
= alloc_pending_exception(s
);
2053 dm_exception_table_lock(&lock
);
2055 e
= dm_lookup_exception(&s
->complete
, chunk
);
2057 free_pending_exception(pe
);
2058 remap_exception(s
, e
, bio
, chunk
);
2062 pe
= __find_pending_exception(s
, pe
, chunk
);
2064 dm_exception_table_unlock(&lock
);
2067 down_write(&s
->lock
);
2069 if (s
->store
->userspace_supports_overflow
) {
2070 if (s
->valid
&& !s
->snapshot_overflowed
) {
2071 s
->snapshot_overflowed
= 1;
2072 DMERR("Snapshot overflowed: Unable to allocate exception.");
2075 __invalidate_snapshot(s
, -ENOMEM
);
2083 remap_exception(s
, &pe
->e
, bio
, chunk
);
2085 r
= DM_MAPIO_SUBMITTED
;
2087 if (!pe
->started
&& io_overlaps_chunk(s
, bio
)) {
2090 dm_exception_table_unlock(&lock
);
2093 start_full_bio(pe
, bio
);
2097 bio_list_add(&pe
->snapshot_bios
, bio
);
2100 /* this is protected by the exception table lock */
2103 dm_exception_table_unlock(&lock
);
2110 bio_set_dev(bio
, s
->origin
->bdev
);
2111 track_chunk(s
, bio
, chunk
);
2115 dm_exception_table_unlock(&lock
);
2122 * A snapshot-merge target behaves like a combination of a snapshot
2123 * target and a snapshot-origin target. It only generates new
2124 * exceptions in other snapshots and not in the one that is being
2127 * For each chunk, if there is an existing exception, it is used to
2128 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
2129 * which in turn might generate exceptions in other snapshots.
2130 * If merging is currently taking place on the chunk in question, the
2131 * I/O is deferred by adding it to s->bios_queued_during_merge.
2133 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
)
2135 struct dm_exception
*e
;
2136 struct dm_snapshot
*s
= ti
->private;
2137 int r
= DM_MAPIO_REMAPPED
;
2140 init_tracked_chunk(bio
);
2142 if (bio
->bi_opf
& REQ_PREFLUSH
) {
2143 if (!dm_bio_get_target_bio_nr(bio
))
2144 bio_set_dev(bio
, s
->origin
->bdev
);
2146 bio_set_dev(bio
, s
->cow
->bdev
);
2147 return DM_MAPIO_REMAPPED
;
2150 if (unlikely(bio_op(bio
) == REQ_OP_DISCARD
)) {
2151 /* Once merging, discards no longer effect change */
2153 return DM_MAPIO_SUBMITTED
;
2156 chunk
= sector_to_chunk(s
->store
, bio
->bi_iter
.bi_sector
);
2158 down_write(&s
->lock
);
2160 /* Full merging snapshots are redirected to the origin */
2162 goto redirect_to_origin
;
2164 /* If the block is already remapped - use that */
2165 e
= dm_lookup_exception(&s
->complete
, chunk
);
2167 /* Queue writes overlapping with chunks being merged */
2168 if (bio_data_dir(bio
) == WRITE
&&
2169 chunk
>= s
->first_merging_chunk
&&
2170 chunk
< (s
->first_merging_chunk
+
2171 s
->num_merging_chunks
)) {
2172 bio_set_dev(bio
, s
->origin
->bdev
);
2173 bio_list_add(&s
->bios_queued_during_merge
, bio
);
2174 r
= DM_MAPIO_SUBMITTED
;
2178 remap_exception(s
, e
, bio
, chunk
);
2180 if (bio_data_dir(bio
) == WRITE
)
2181 track_chunk(s
, bio
, chunk
);
2186 bio_set_dev(bio
, s
->origin
->bdev
);
2188 if (bio_data_dir(bio
) == WRITE
) {
2190 return do_origin(s
->origin
, bio
, false);
2199 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
2200 blk_status_t
*error
)
2202 struct dm_snapshot
*s
= ti
->private;
2204 if (is_bio_tracked(bio
))
2205 stop_tracking_chunk(s
, bio
);
2207 return DM_ENDIO_DONE
;
2210 static void snapshot_merge_presuspend(struct dm_target
*ti
)
2212 struct dm_snapshot
*s
= ti
->private;
2217 static int snapshot_preresume(struct dm_target
*ti
)
2220 struct dm_snapshot
*s
= ti
->private;
2221 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
2223 down_read(&_origins_lock
);
2224 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
2225 if (snap_src
&& snap_dest
) {
2226 down_read(&snap_src
->lock
);
2227 if (s
== snap_src
) {
2228 DMERR("Unable to resume snapshot source until "
2229 "handover completes.");
2231 } else if (!dm_suspended(snap_src
->ti
)) {
2232 DMERR("Unable to perform snapshot handover until "
2233 "source is suspended.");
2236 up_read(&snap_src
->lock
);
2238 up_read(&_origins_lock
);
2243 static void snapshot_resume(struct dm_target
*ti
)
2245 struct dm_snapshot
*s
= ti
->private;
2246 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
, *snap_merging
= NULL
;
2247 struct dm_origin
*o
;
2248 struct mapped_device
*origin_md
= NULL
;
2249 bool must_restart_merging
= false;
2251 down_read(&_origins_lock
);
2253 o
= __lookup_dm_origin(s
->origin
->bdev
);
2255 origin_md
= dm_table_get_md(o
->ti
->table
);
2257 (void) __find_snapshots_sharing_cow(s
, NULL
, NULL
, &snap_merging
);
2259 origin_md
= dm_table_get_md(snap_merging
->ti
->table
);
2261 if (origin_md
== dm_table_get_md(ti
->table
))
2264 if (dm_hold(origin_md
))
2268 up_read(&_origins_lock
);
2271 dm_internal_suspend_fast(origin_md
);
2272 if (snap_merging
&& test_bit(RUNNING_MERGE
, &snap_merging
->state_bits
)) {
2273 must_restart_merging
= true;
2274 stop_merge(snap_merging
);
2278 down_read(&_origins_lock
);
2280 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
2281 if (snap_src
&& snap_dest
) {
2282 down_write(&snap_src
->lock
);
2283 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
2284 __handover_exceptions(snap_src
, snap_dest
);
2285 up_write(&snap_dest
->lock
);
2286 up_write(&snap_src
->lock
);
2289 up_read(&_origins_lock
);
2292 if (must_restart_merging
)
2293 start_merge(snap_merging
);
2294 dm_internal_resume_fast(origin_md
);
2298 /* Now we have correct chunk size, reregister */
2299 reregister_snapshot(s
);
2301 down_write(&s
->lock
);
2306 static uint32_t get_origin_minimum_chunksize(struct block_device
*bdev
)
2308 uint32_t min_chunksize
;
2310 down_read(&_origins_lock
);
2311 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
2312 up_read(&_origins_lock
);
2314 return min_chunksize
;
2317 static void snapshot_merge_resume(struct dm_target
*ti
)
2319 struct dm_snapshot
*s
= ti
->private;
2322 * Handover exceptions from existing snapshot.
2324 snapshot_resume(ti
);
2327 * snapshot-merge acts as an origin, so set ti->max_io_len
2329 ti
->max_io_len
= get_origin_minimum_chunksize(s
->origin
->bdev
);
2334 static void snapshot_status(struct dm_target
*ti
, status_type_t type
,
2335 unsigned status_flags
, char *result
, unsigned maxlen
)
2338 struct dm_snapshot
*snap
= ti
->private;
2339 unsigned num_features
;
2342 case STATUSTYPE_INFO
:
2344 down_write(&snap
->lock
);
2348 else if (snap
->merge_failed
)
2349 DMEMIT("Merge failed");
2350 else if (snap
->snapshot_overflowed
)
2353 if (snap
->store
->type
->usage
) {
2354 sector_t total_sectors
, sectors_allocated
,
2356 snap
->store
->type
->usage(snap
->store
,
2360 DMEMIT("%llu/%llu %llu",
2361 (unsigned long long)sectors_allocated
,
2362 (unsigned long long)total_sectors
,
2363 (unsigned long long)metadata_sectors
);
2369 up_write(&snap
->lock
);
2373 case STATUSTYPE_TABLE
:
2375 * kdevname returns a static pointer so we need
2376 * to make private copies if the output is to
2379 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
2380 sz
+= snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
2382 num_features
= snap
->discard_zeroes_cow
+ snap
->discard_passdown_origin
;
2384 DMEMIT(" %u", num_features
);
2385 if (snap
->discard_zeroes_cow
)
2386 DMEMIT(" discard_zeroes_cow");
2387 if (snap
->discard_passdown_origin
)
2388 DMEMIT(" discard_passdown_origin");
2394 static int snapshot_iterate_devices(struct dm_target
*ti
,
2395 iterate_devices_callout_fn fn
, void *data
)
2397 struct dm_snapshot
*snap
= ti
->private;
2400 r
= fn(ti
, snap
->origin
, 0, ti
->len
, data
);
2403 r
= fn(ti
, snap
->cow
, 0, get_dev_size(snap
->cow
->bdev
), data
);
2408 static void snapshot_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
2410 struct dm_snapshot
*snap
= ti
->private;
2412 if (snap
->discard_zeroes_cow
) {
2413 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
2415 down_read(&_origins_lock
);
2417 (void) __find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
, NULL
);
2418 if (snap_src
&& snap_dest
)
2421 /* All discards are split on chunk_size boundary */
2422 limits
->discard_granularity
= snap
->store
->chunk_size
;
2423 limits
->max_discard_sectors
= snap
->store
->chunk_size
;
2425 up_read(&_origins_lock
);
2429 /*-----------------------------------------------------------------
2431 *---------------------------------------------------------------*/
2434 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2435 * supplied bio was ignored. The caller may submit it immediately.
2436 * (No remapping actually occurs as the origin is always a direct linear
2439 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2440 * and any supplied bio is added to a list to be submitted once all
2441 * the necessary exceptions exist.
2443 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
2446 int r
= DM_MAPIO_REMAPPED
;
2447 struct dm_snapshot
*snap
;
2448 struct dm_exception
*e
;
2449 struct dm_snap_pending_exception
*pe
, *pe2
;
2450 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
2451 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
2452 struct dm_exception_table_lock lock
;
2455 /* Do all the snapshots on this origin */
2456 list_for_each_entry (snap
, snapshots
, list
) {
2458 * Don't make new exceptions in a merging snapshot
2459 * because it has effectively been deleted
2461 if (dm_target_is_snapshot_merge(snap
->ti
))
2464 /* Nothing to do if writing beyond end of snapshot */
2465 if (sector
>= dm_table_get_size(snap
->ti
->table
))
2469 * Remember, different snapshots can have
2470 * different chunk sizes.
2472 chunk
= sector_to_chunk(snap
->store
, sector
);
2473 dm_exception_table_lock_init(snap
, chunk
, &lock
);
2475 down_read(&snap
->lock
);
2476 dm_exception_table_lock(&lock
);
2478 /* Only deal with valid and active snapshots */
2479 if (!snap
->valid
|| !snap
->active
)
2482 pe
= __lookup_pending_exception(snap
, chunk
);
2485 * Check exception table to see if block is already
2486 * remapped in this snapshot and trigger an exception
2489 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2493 dm_exception_table_unlock(&lock
);
2494 pe
= alloc_pending_exception(snap
);
2495 dm_exception_table_lock(&lock
);
2497 pe2
= __lookup_pending_exception(snap
, chunk
);
2500 e
= dm_lookup_exception(&snap
->complete
, chunk
);
2502 free_pending_exception(pe
);
2506 pe
= __insert_pending_exception(snap
, pe
, chunk
);
2508 dm_exception_table_unlock(&lock
);
2509 up_read(&snap
->lock
);
2511 invalidate_snapshot(snap
, -ENOMEM
);
2515 free_pending_exception(pe
);
2520 r
= DM_MAPIO_SUBMITTED
;
2523 * If an origin bio was supplied, queue it to wait for the
2524 * completion of this exception, and start this one last,
2525 * at the end of the function.
2528 bio_list_add(&pe
->origin_bios
, bio
);
2533 pe_to_start_last
= pe
;
2539 pe_to_start_now
= pe
;
2543 dm_exception_table_unlock(&lock
);
2544 up_read(&snap
->lock
);
2546 if (pe_to_start_now
) {
2547 start_copy(pe_to_start_now
);
2548 pe_to_start_now
= NULL
;
2553 * Submit the exception against which the bio is queued last,
2554 * to give the other exceptions a head start.
2556 if (pe_to_start_last
)
2557 start_copy(pe_to_start_last
);
2563 * Called on a write from the origin driver.
2565 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
, bool limit
)
2568 int r
= DM_MAPIO_REMAPPED
;
2571 down_read(&_origins_lock
);
2572 o
= __lookup_origin(origin
->bdev
);
2575 struct dm_snapshot
*s
;
2576 list_for_each_entry(s
, &o
->snapshots
, list
)
2577 if (unlikely(!wait_for_in_progress(s
, true)))
2581 r
= __origin_write(&o
->snapshots
, bio
->bi_iter
.bi_sector
, bio
);
2583 up_read(&_origins_lock
);
2589 * Trigger exceptions in all non-merging snapshots.
2591 * The chunk size of the merging snapshot may be larger than the chunk
2592 * size of some other snapshot so we may need to reallocate multiple
2593 * chunks in other snapshots.
2595 * We scan all the overlapping exceptions in the other snapshots.
2596 * Returns 1 if anything was reallocated and must be waited for,
2597 * otherwise returns 0.
2599 * size must be a multiple of merging_snap's chunk_size.
2601 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2602 sector_t sector
, unsigned size
)
2609 * The origin's __minimum_chunk_size() got stored in max_io_len
2610 * by snapshot_merge_resume().
2612 down_read(&_origins_lock
);
2613 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2614 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->max_io_len
)
2615 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2618 up_read(&_origins_lock
);
2624 * Origin: maps a linear range of a device, with hooks for snapshotting.
2628 * Construct an origin mapping: <dev_path>
2629 * The context for an origin is merely a 'struct dm_dev *'
2630 * pointing to the real device.
2632 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2635 struct dm_origin
*o
;
2638 ti
->error
= "origin: incorrect number of arguments";
2642 o
= kmalloc(sizeof(struct dm_origin
), GFP_KERNEL
);
2644 ti
->error
= "Cannot allocate private origin structure";
2649 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &o
->dev
);
2651 ti
->error
= "Cannot get target device";
2657 ti
->num_flush_bios
= 1;
2667 static void origin_dtr(struct dm_target
*ti
)
2669 struct dm_origin
*o
= ti
->private;
2671 dm_put_device(ti
, o
->dev
);
2675 static int origin_map(struct dm_target
*ti
, struct bio
*bio
)
2677 struct dm_origin
*o
= ti
->private;
2678 unsigned available_sectors
;
2680 bio_set_dev(bio
, o
->dev
->bdev
);
2682 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
))
2683 return DM_MAPIO_REMAPPED
;
2685 if (bio_data_dir(bio
) != WRITE
)
2686 return DM_MAPIO_REMAPPED
;
2688 available_sectors
= o
->split_boundary
-
2689 ((unsigned)bio
->bi_iter
.bi_sector
& (o
->split_boundary
- 1));
2691 if (bio_sectors(bio
) > available_sectors
)
2692 dm_accept_partial_bio(bio
, available_sectors
);
2694 /* Only tell snapshots if this is a write */
2695 return do_origin(o
->dev
, bio
, true);
2699 * Set the target "max_io_len" field to the minimum of all the snapshots'
2702 static void origin_resume(struct dm_target
*ti
)
2704 struct dm_origin
*o
= ti
->private;
2706 o
->split_boundary
= get_origin_minimum_chunksize(o
->dev
->bdev
);
2708 down_write(&_origins_lock
);
2709 __insert_dm_origin(o
);
2710 up_write(&_origins_lock
);
2713 static void origin_postsuspend(struct dm_target
*ti
)
2715 struct dm_origin
*o
= ti
->private;
2717 down_write(&_origins_lock
);
2718 __remove_dm_origin(o
);
2719 up_write(&_origins_lock
);
2722 static void origin_status(struct dm_target
*ti
, status_type_t type
,
2723 unsigned status_flags
, char *result
, unsigned maxlen
)
2725 struct dm_origin
*o
= ti
->private;
2728 case STATUSTYPE_INFO
:
2732 case STATUSTYPE_TABLE
:
2733 snprintf(result
, maxlen
, "%s", o
->dev
->name
);
2738 static int origin_iterate_devices(struct dm_target
*ti
,
2739 iterate_devices_callout_fn fn
, void *data
)
2741 struct dm_origin
*o
= ti
->private;
2743 return fn(ti
, o
->dev
, 0, ti
->len
, data
);
2746 static struct target_type origin_target
= {
2747 .name
= "snapshot-origin",
2748 .version
= {1, 9, 0},
2749 .module
= THIS_MODULE
,
2753 .resume
= origin_resume
,
2754 .postsuspend
= origin_postsuspend
,
2755 .status
= origin_status
,
2756 .iterate_devices
= origin_iterate_devices
,
2759 static struct target_type snapshot_target
= {
2761 .version
= {1, 16, 0},
2762 .module
= THIS_MODULE
,
2763 .ctr
= snapshot_ctr
,
2764 .dtr
= snapshot_dtr
,
2765 .map
= snapshot_map
,
2766 .end_io
= snapshot_end_io
,
2767 .preresume
= snapshot_preresume
,
2768 .resume
= snapshot_resume
,
2769 .status
= snapshot_status
,
2770 .iterate_devices
= snapshot_iterate_devices
,
2771 .io_hints
= snapshot_io_hints
,
2774 static struct target_type merge_target
= {
2775 .name
= dm_snapshot_merge_target_name
,
2776 .version
= {1, 5, 0},
2777 .module
= THIS_MODULE
,
2778 .ctr
= snapshot_ctr
,
2779 .dtr
= snapshot_dtr
,
2780 .map
= snapshot_merge_map
,
2781 .end_io
= snapshot_end_io
,
2782 .presuspend
= snapshot_merge_presuspend
,
2783 .preresume
= snapshot_preresume
,
2784 .resume
= snapshot_merge_resume
,
2785 .status
= snapshot_status
,
2786 .iterate_devices
= snapshot_iterate_devices
,
2787 .io_hints
= snapshot_io_hints
,
2790 static int __init
dm_snapshot_init(void)
2794 r
= dm_exception_store_init();
2796 DMERR("Failed to initialize exception stores");
2800 r
= init_origin_hash();
2802 DMERR("init_origin_hash failed.");
2803 goto bad_origin_hash
;
2806 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2807 if (!exception_cache
) {
2808 DMERR("Couldn't create exception cache.");
2810 goto bad_exception_cache
;
2813 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2814 if (!pending_cache
) {
2815 DMERR("Couldn't create pending cache.");
2817 goto bad_pending_cache
;
2820 r
= dm_register_target(&snapshot_target
);
2822 DMERR("snapshot target register failed %d", r
);
2823 goto bad_register_snapshot_target
;
2826 r
= dm_register_target(&origin_target
);
2828 DMERR("Origin target register failed %d", r
);
2829 goto bad_register_origin_target
;
2832 r
= dm_register_target(&merge_target
);
2834 DMERR("Merge target register failed %d", r
);
2835 goto bad_register_merge_target
;
2840 bad_register_merge_target
:
2841 dm_unregister_target(&origin_target
);
2842 bad_register_origin_target
:
2843 dm_unregister_target(&snapshot_target
);
2844 bad_register_snapshot_target
:
2845 kmem_cache_destroy(pending_cache
);
2847 kmem_cache_destroy(exception_cache
);
2848 bad_exception_cache
:
2851 dm_exception_store_exit();
2856 static void __exit
dm_snapshot_exit(void)
2858 dm_unregister_target(&snapshot_target
);
2859 dm_unregister_target(&origin_target
);
2860 dm_unregister_target(&merge_target
);
2863 kmem_cache_destroy(pending_cache
);
2864 kmem_cache_destroy(exception_cache
);
2866 dm_exception_store_exit();
2870 module_init(dm_snapshot_init
);
2871 module_exit(dm_snapshot_exit
);
2873 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2874 MODULE_AUTHOR("Joe Thornber");
2875 MODULE_LICENSE("GPL");
2876 MODULE_ALIAS("dm-snapshot-origin");
2877 MODULE_ALIAS("dm-snapshot-merge");