4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
23 #include "dm-exception-store.h"
25 #define DM_MSG_PREFIX "snapshots"
27 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
29 #define dm_target_is_snapshot_merge(ti) \
30 ((ti)->type->name == dm_snapshot_merge_target_name)
33 * The size of the mempool used to track chunks in use.
37 #define DM_TRACKED_CHUNK_HASH_SIZE 16
38 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
39 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
41 struct dm_exception_table
{
44 struct list_head
*table
;
48 struct rw_semaphore lock
;
50 struct dm_dev
*origin
;
55 /* List of snapshots per Origin */
56 struct list_head list
;
59 * You can't use a snapshot if this is 0 (e.g. if full).
60 * A snapshot-merge target never clears this.
64 /* Origin writes don't trigger exceptions until this is set */
67 atomic_t pending_exceptions_count
;
69 mempool_t
*pending_pool
;
71 struct dm_exception_table pending
;
72 struct dm_exception_table complete
;
75 * pe_lock protects all pending_exception operations and access
76 * as well as the snapshot_bios list.
80 /* Chunks with outstanding reads */
81 spinlock_t tracked_chunk_lock
;
82 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
84 /* The on disk metadata handler */
85 struct dm_exception_store
*store
;
87 struct dm_kcopyd_client
*kcopyd_client
;
89 /* Wait for events based on state_bits */
90 unsigned long state_bits
;
92 /* Range of chunks currently being merged. */
93 chunk_t first_merging_chunk
;
94 int num_merging_chunks
;
97 * The merge operation failed if this flag is set.
98 * Failure modes are handled as follows:
99 * - I/O error reading the header
100 * => don't load the target; abort.
101 * - Header does not have "valid" flag set
102 * => use the origin; forget about the snapshot.
103 * - I/O error when reading exceptions
104 * => don't load the target; abort.
105 * (We can't use the intermediate origin state.)
106 * - I/O error while merging
107 * => stop merging; set merge_failed; process I/O normally.
112 * Incoming bios that overlap with chunks being merged must wait
113 * for them to be committed.
115 struct bio_list bios_queued_during_merge
;
120 * RUNNING_MERGE - Merge operation is in progress.
121 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
122 * cleared afterwards.
124 #define RUNNING_MERGE 0
125 #define SHUTDOWN_MERGE 1
127 struct dm_dev
*dm_snap_origin(struct dm_snapshot
*s
)
131 EXPORT_SYMBOL(dm_snap_origin
);
133 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
137 EXPORT_SYMBOL(dm_snap_cow
);
139 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
142 return chunk
<< store
->chunk_shift
;
145 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
148 * There is only ever one instance of a particular block
149 * device so we can compare pointers safely.
154 struct dm_snap_pending_exception
{
155 struct dm_exception e
;
158 * Origin buffers waiting for this to complete are held
161 struct bio_list origin_bios
;
162 struct bio_list snapshot_bios
;
164 /* Pointer back to snapshot context */
165 struct dm_snapshot
*snap
;
168 * 1 indicates the exception has already been sent to
174 * For writing a complete chunk, bypassing the copy.
176 struct bio
*full_bio
;
177 bio_end_io_t
*full_bio_end_io
;
178 void *full_bio_private
;
182 * Hash table mapping origin volumes to lists of snapshots and
183 * a lock to protect it
185 static struct kmem_cache
*exception_cache
;
186 static struct kmem_cache
*pending_cache
;
188 struct dm_snap_tracked_chunk
{
189 struct hlist_node node
;
193 static void init_tracked_chunk(struct bio
*bio
)
195 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
196 INIT_HLIST_NODE(&c
->node
);
199 static bool is_bio_tracked(struct bio
*bio
)
201 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
202 return !hlist_unhashed(&c
->node
);
205 static void track_chunk(struct dm_snapshot
*s
, struct bio
*bio
, chunk_t chunk
)
207 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
211 spin_lock_irq(&s
->tracked_chunk_lock
);
212 hlist_add_head(&c
->node
,
213 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
214 spin_unlock_irq(&s
->tracked_chunk_lock
);
217 static void stop_tracking_chunk(struct dm_snapshot
*s
, struct bio
*bio
)
219 struct dm_snap_tracked_chunk
*c
= dm_per_bio_data(bio
, sizeof(struct dm_snap_tracked_chunk
));
222 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
224 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
227 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
229 struct dm_snap_tracked_chunk
*c
;
232 spin_lock_irq(&s
->tracked_chunk_lock
);
234 hlist_for_each_entry(c
,
235 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
236 if (c
->chunk
== chunk
) {
242 spin_unlock_irq(&s
->tracked_chunk_lock
);
248 * This conflicting I/O is extremely improbable in the caller,
249 * so msleep(1) is sufficient and there is no need for a wait queue.
251 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
253 while (__chunk_is_tracked(s
, chunk
))
258 * One of these per registered origin, held in the snapshot_origins hash
261 /* The origin device */
262 struct block_device
*bdev
;
264 struct list_head hash_list
;
266 /* List of snapshots for this origin */
267 struct list_head snapshots
;
271 * Size of the hash table for origin volumes. If we make this
272 * the size of the minors list then it should be nearly perfect
274 #define ORIGIN_HASH_SIZE 256
275 #define ORIGIN_MASK 0xFF
276 static struct list_head
*_origins
;
277 static struct rw_semaphore _origins_lock
;
279 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
280 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
281 static uint64_t _pending_exceptions_done_count
;
283 static int init_origin_hash(void)
287 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
290 DMERR("unable to allocate memory");
294 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
295 INIT_LIST_HEAD(_origins
+ i
);
296 init_rwsem(&_origins_lock
);
301 static void exit_origin_hash(void)
306 static unsigned origin_hash(struct block_device
*bdev
)
308 return bdev
->bd_dev
& ORIGIN_MASK
;
311 static struct origin
*__lookup_origin(struct block_device
*origin
)
313 struct list_head
*ol
;
316 ol
= &_origins
[origin_hash(origin
)];
317 list_for_each_entry (o
, ol
, hash_list
)
318 if (bdev_equal(o
->bdev
, origin
))
324 static void __insert_origin(struct origin
*o
)
326 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
327 list_add_tail(&o
->hash_list
, sl
);
331 * _origins_lock must be held when calling this function.
332 * Returns number of snapshots registered using the supplied cow device, plus:
333 * snap_src - a snapshot suitable for use as a source of exception handover
334 * snap_dest - a snapshot capable of receiving exception handover.
335 * snap_merge - an existing snapshot-merge target linked to the same origin.
336 * There can be at most one snapshot-merge target. The parameter is optional.
338 * Possible return values and states of snap_src and snap_dest.
339 * 0: NULL, NULL - first new snapshot
340 * 1: snap_src, NULL - normal snapshot
341 * 2: snap_src, snap_dest - waiting for handover
342 * 2: snap_src, NULL - handed over, waiting for old to be deleted
343 * 1: NULL, snap_dest - source got destroyed without handover
345 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
346 struct dm_snapshot
**snap_src
,
347 struct dm_snapshot
**snap_dest
,
348 struct dm_snapshot
**snap_merge
)
350 struct dm_snapshot
*s
;
355 o
= __lookup_origin(snap
->origin
->bdev
);
359 list_for_each_entry(s
, &o
->snapshots
, list
) {
360 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
362 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
372 } else if (snap_dest
)
383 * On success, returns 1 if this snapshot is a handover destination,
384 * otherwise returns 0.
386 static int __validate_exception_handover(struct dm_snapshot
*snap
)
388 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
389 struct dm_snapshot
*snap_merge
= NULL
;
391 /* Does snapshot need exceptions handed over to it? */
392 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
393 &snap_merge
) == 2) ||
395 snap
->ti
->error
= "Snapshot cow pairing for exception "
396 "table handover failed";
401 * If no snap_src was found, snap cannot become a handover
408 * Non-snapshot-merge handover?
410 if (!dm_target_is_snapshot_merge(snap
->ti
))
414 * Do not allow more than one merging snapshot.
417 snap
->ti
->error
= "A snapshot is already merging.";
421 if (!snap_src
->store
->type
->prepare_merge
||
422 !snap_src
->store
->type
->commit_merge
) {
423 snap
->ti
->error
= "Snapshot exception store does not "
424 "support snapshot-merge.";
431 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
433 struct dm_snapshot
*l
;
435 /* Sort the list according to chunk size, largest-first smallest-last */
436 list_for_each_entry(l
, &o
->snapshots
, list
)
437 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
439 list_add_tail(&s
->list
, &l
->list
);
443 * Make a note of the snapshot and its origin so we can look it
444 * up when the origin has a write on it.
446 * Also validate snapshot exception store handovers.
447 * On success, returns 1 if this registration is a handover destination,
448 * otherwise returns 0.
450 static int register_snapshot(struct dm_snapshot
*snap
)
452 struct origin
*o
, *new_o
= NULL
;
453 struct block_device
*bdev
= snap
->origin
->bdev
;
456 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
460 down_write(&_origins_lock
);
462 r
= __validate_exception_handover(snap
);
468 o
= __lookup_origin(bdev
);
475 /* Initialise the struct */
476 INIT_LIST_HEAD(&o
->snapshots
);
482 __insert_snapshot(o
, snap
);
485 up_write(&_origins_lock
);
491 * Move snapshot to correct place in list according to chunk size.
493 static void reregister_snapshot(struct dm_snapshot
*s
)
495 struct block_device
*bdev
= s
->origin
->bdev
;
497 down_write(&_origins_lock
);
500 __insert_snapshot(__lookup_origin(bdev
), s
);
502 up_write(&_origins_lock
);
505 static void unregister_snapshot(struct dm_snapshot
*s
)
509 down_write(&_origins_lock
);
510 o
= __lookup_origin(s
->origin
->bdev
);
513 if (o
&& list_empty(&o
->snapshots
)) {
514 list_del(&o
->hash_list
);
518 up_write(&_origins_lock
);
522 * Implementation of the exception hash tables.
523 * The lowest hash_shift bits of the chunk number are ignored, allowing
524 * some consecutive chunks to be grouped together.
526 static int dm_exception_table_init(struct dm_exception_table
*et
,
527 uint32_t size
, unsigned hash_shift
)
531 et
->hash_shift
= hash_shift
;
532 et
->hash_mask
= size
- 1;
533 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
537 for (i
= 0; i
< size
; i
++)
538 INIT_LIST_HEAD(et
->table
+ i
);
543 static void dm_exception_table_exit(struct dm_exception_table
*et
,
544 struct kmem_cache
*mem
)
546 struct list_head
*slot
;
547 struct dm_exception
*ex
, *next
;
550 size
= et
->hash_mask
+ 1;
551 for (i
= 0; i
< size
; i
++) {
552 slot
= et
->table
+ i
;
554 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
555 kmem_cache_free(mem
, ex
);
561 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
563 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
566 static void dm_remove_exception(struct dm_exception
*e
)
568 list_del(&e
->hash_list
);
572 * Return the exception data for a sector, or NULL if not
575 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
578 struct list_head
*slot
;
579 struct dm_exception
*e
;
581 slot
= &et
->table
[exception_hash(et
, chunk
)];
582 list_for_each_entry (e
, slot
, hash_list
)
583 if (chunk
>= e
->old_chunk
&&
584 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
590 static struct dm_exception
*alloc_completed_exception(void)
592 struct dm_exception
*e
;
594 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
596 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
601 static void free_completed_exception(struct dm_exception
*e
)
603 kmem_cache_free(exception_cache
, e
);
606 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
608 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
611 atomic_inc(&s
->pending_exceptions_count
);
617 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
619 struct dm_snapshot
*s
= pe
->snap
;
621 mempool_free(pe
, s
->pending_pool
);
622 smp_mb__before_atomic_dec();
623 atomic_dec(&s
->pending_exceptions_count
);
626 static void dm_insert_exception(struct dm_exception_table
*eh
,
627 struct dm_exception
*new_e
)
630 struct dm_exception
*e
= NULL
;
632 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
634 /* Add immediately if this table doesn't support consecutive chunks */
638 /* List is ordered by old_chunk */
639 list_for_each_entry_reverse(e
, l
, hash_list
) {
640 /* Insert after an existing chunk? */
641 if (new_e
->old_chunk
== (e
->old_chunk
+
642 dm_consecutive_chunk_count(e
) + 1) &&
643 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
644 dm_consecutive_chunk_count(e
) + 1)) {
645 dm_consecutive_chunk_count_inc(e
);
646 free_completed_exception(new_e
);
650 /* Insert before an existing chunk? */
651 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
652 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
653 dm_consecutive_chunk_count_inc(e
);
656 free_completed_exception(new_e
);
660 if (new_e
->old_chunk
> e
->old_chunk
)
665 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
669 * Callback used by the exception stores to load exceptions when
672 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
674 struct dm_snapshot
*s
= context
;
675 struct dm_exception
*e
;
677 e
= alloc_completed_exception();
683 /* Consecutive_count is implicitly initialised to zero */
686 dm_insert_exception(&s
->complete
, e
);
692 * Return a minimum chunk size of all snapshots that have the specified origin.
693 * Return zero if the origin has no snapshots.
695 static uint32_t __minimum_chunk_size(struct origin
*o
)
697 struct dm_snapshot
*snap
;
698 unsigned chunk_size
= 0;
701 list_for_each_entry(snap
, &o
->snapshots
, list
)
702 chunk_size
= min_not_zero(chunk_size
,
703 snap
->store
->chunk_size
);
705 return (uint32_t) chunk_size
;
711 static int calc_max_buckets(void)
713 /* use a fixed size of 2MB */
714 unsigned long mem
= 2 * 1024 * 1024;
715 mem
/= sizeof(struct list_head
);
721 * Allocate room for a suitable hash table.
723 static int init_hash_tables(struct dm_snapshot
*s
)
725 sector_t hash_size
, cow_dev_size
, origin_dev_size
, max_buckets
;
728 * Calculate based on the size of the original volume or
731 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
732 origin_dev_size
= get_dev_size(s
->origin
->bdev
);
733 max_buckets
= calc_max_buckets();
735 hash_size
= min(origin_dev_size
, cow_dev_size
) >> s
->store
->chunk_shift
;
736 hash_size
= min(hash_size
, max_buckets
);
740 hash_size
= rounddown_pow_of_two(hash_size
);
741 if (dm_exception_table_init(&s
->complete
, hash_size
,
742 DM_CHUNK_CONSECUTIVE_BITS
))
746 * Allocate hash table for in-flight exceptions
747 * Make this smaller than the real hash table
753 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
754 dm_exception_table_exit(&s
->complete
, exception_cache
);
761 static void merge_shutdown(struct dm_snapshot
*s
)
763 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
764 smp_mb__after_clear_bit();
765 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
768 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
770 s
->first_merging_chunk
= 0;
771 s
->num_merging_chunks
= 0;
773 return bio_list_get(&s
->bios_queued_during_merge
);
777 * Remove one chunk from the index of completed exceptions.
779 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
782 struct dm_exception
*e
;
784 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
786 DMERR("Corruption detected: exception for block %llu is "
787 "on disk but not in memory",
788 (unsigned long long)old_chunk
);
793 * If this is the only chunk using this exception, remove exception.
795 if (!dm_consecutive_chunk_count(e
)) {
796 dm_remove_exception(e
);
797 free_completed_exception(e
);
802 * The chunk may be either at the beginning or the end of a
803 * group of consecutive chunks - never in the middle. We are
804 * removing chunks in the opposite order to that in which they
805 * were added, so this should always be true.
806 * Decrement the consecutive chunk counter and adjust the
807 * starting point if necessary.
809 if (old_chunk
== e
->old_chunk
) {
812 } else if (old_chunk
!= e
->old_chunk
+
813 dm_consecutive_chunk_count(e
)) {
814 DMERR("Attempt to merge block %llu from the "
815 "middle of a chunk range [%llu - %llu]",
816 (unsigned long long)old_chunk
,
817 (unsigned long long)e
->old_chunk
,
819 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
823 dm_consecutive_chunk_count_dec(e
);
828 static void flush_bios(struct bio
*bio
);
830 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
832 struct bio
*b
= NULL
;
834 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
836 down_write(&s
->lock
);
839 * Process chunks (and associated exceptions) in reverse order
840 * so that dm_consecutive_chunk_count_dec() accounting works.
843 r
= __remove_single_exception_chunk(s
, old_chunk
);
846 } while (old_chunk
-- > s
->first_merging_chunk
);
848 b
= __release_queued_bios_after_merge(s
);
858 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
859 sector_t sector
, unsigned chunk_size
);
861 static void merge_callback(int read_err
, unsigned long write_err
,
864 static uint64_t read_pending_exceptions_done_count(void)
866 uint64_t pending_exceptions_done
;
868 spin_lock(&_pending_exceptions_done_spinlock
);
869 pending_exceptions_done
= _pending_exceptions_done_count
;
870 spin_unlock(&_pending_exceptions_done_spinlock
);
872 return pending_exceptions_done
;
875 static void increment_pending_exceptions_done_count(void)
877 spin_lock(&_pending_exceptions_done_spinlock
);
878 _pending_exceptions_done_count
++;
879 spin_unlock(&_pending_exceptions_done_spinlock
);
881 wake_up_all(&_pending_exceptions_done
);
884 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
886 int i
, linear_chunks
;
887 chunk_t old_chunk
, new_chunk
;
888 struct dm_io_region src
, dest
;
890 uint64_t previous_count
;
892 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
893 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
897 * valid flag never changes during merge, so no lock required.
900 DMERR("Snapshot is invalid: can't merge");
904 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
906 if (linear_chunks
<= 0) {
907 if (linear_chunks
< 0) {
908 DMERR("Read error in exception store: "
909 "shutting down merge");
910 down_write(&s
->lock
);
917 /* Adjust old_chunk and new_chunk to reflect start of linear region */
918 old_chunk
= old_chunk
+ 1 - linear_chunks
;
919 new_chunk
= new_chunk
+ 1 - linear_chunks
;
922 * Use one (potentially large) I/O to copy all 'linear_chunks'
923 * from the exception store to the origin
925 io_size
= linear_chunks
* s
->store
->chunk_size
;
927 dest
.bdev
= s
->origin
->bdev
;
928 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
929 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
931 src
.bdev
= s
->cow
->bdev
;
932 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
933 src
.count
= dest
.count
;
936 * Reallocate any exceptions needed in other snapshots then
937 * wait for the pending exceptions to complete.
938 * Each time any pending exception (globally on the system)
939 * completes we are woken and repeat the process to find out
940 * if we can proceed. While this may not seem a particularly
941 * efficient algorithm, it is not expected to have any
942 * significant impact on performance.
944 previous_count
= read_pending_exceptions_done_count();
945 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
946 wait_event(_pending_exceptions_done
,
947 (read_pending_exceptions_done_count() !=
949 /* Retry after the wait, until all exceptions are done. */
950 previous_count
= read_pending_exceptions_done_count();
953 down_write(&s
->lock
);
954 s
->first_merging_chunk
= old_chunk
;
955 s
->num_merging_chunks
= linear_chunks
;
958 /* Wait until writes to all 'linear_chunks' drain */
959 for (i
= 0; i
< linear_chunks
; i
++)
960 __check_for_conflicting_io(s
, old_chunk
+ i
);
962 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
969 static void error_bios(struct bio
*bio
);
971 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
973 struct dm_snapshot
*s
= context
;
974 struct bio
*b
= NULL
;
976 if (read_err
|| write_err
) {
978 DMERR("Read error: shutting down merge.");
980 DMERR("Write error: shutting down merge.");
984 if (s
->store
->type
->commit_merge(s
->store
,
985 s
->num_merging_chunks
) < 0) {
986 DMERR("Write error in exception store: shutting down merge");
990 if (remove_single_exception_chunk(s
) < 0)
993 snapshot_merge_next_chunks(s
);
998 down_write(&s
->lock
);
1000 b
= __release_queued_bios_after_merge(s
);
1007 static void start_merge(struct dm_snapshot
*s
)
1009 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1010 snapshot_merge_next_chunks(s
);
1013 static int wait_schedule(void *ptr
)
1021 * Stop the merging process and wait until it finishes.
1023 static void stop_merge(struct dm_snapshot
*s
)
1025 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1026 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, wait_schedule
,
1027 TASK_UNINTERRUPTIBLE
);
1028 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1032 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1034 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1036 struct dm_snapshot
*s
;
1039 char *origin_path
, *cow_path
;
1040 unsigned args_used
, num_flush_requests
= 1;
1041 fmode_t origin_mode
= FMODE_READ
;
1044 ti
->error
= "requires exactly 4 arguments";
1049 if (dm_target_is_snapshot_merge(ti
)) {
1050 num_flush_requests
= 2;
1051 origin_mode
= FMODE_WRITE
;
1054 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1056 ti
->error
= "Cannot allocate private snapshot structure";
1061 origin_path
= argv
[0];
1065 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1067 ti
->error
= "Cannot get origin device";
1075 r
= dm_get_device(ti
, cow_path
, dm_table_get_mode(ti
->table
), &s
->cow
);
1077 ti
->error
= "Cannot get COW device";
1081 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1083 ti
->error
= "Couldn't create exception store";
1094 atomic_set(&s
->pending_exceptions_count
, 0);
1095 init_rwsem(&s
->lock
);
1096 INIT_LIST_HEAD(&s
->list
);
1097 spin_lock_init(&s
->pe_lock
);
1099 s
->merge_failed
= 0;
1100 s
->first_merging_chunk
= 0;
1101 s
->num_merging_chunks
= 0;
1102 bio_list_init(&s
->bios_queued_during_merge
);
1104 /* Allocate hash table for COW data */
1105 if (init_hash_tables(s
)) {
1106 ti
->error
= "Unable to allocate hash table space";
1108 goto bad_hash_tables
;
1111 s
->kcopyd_client
= dm_kcopyd_client_create();
1112 if (IS_ERR(s
->kcopyd_client
)) {
1113 r
= PTR_ERR(s
->kcopyd_client
);
1114 ti
->error
= "Could not create kcopyd client";
1118 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
1119 if (!s
->pending_pool
) {
1120 ti
->error
= "Could not allocate mempool for pending exceptions";
1121 goto bad_pending_pool
;
1124 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1125 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1127 spin_lock_init(&s
->tracked_chunk_lock
);
1130 ti
->num_flush_requests
= num_flush_requests
;
1131 ti
->per_bio_data_size
= sizeof(struct dm_snap_tracked_chunk
);
1133 /* Add snapshot to the list of snapshots for this origin */
1134 /* Exceptions aren't triggered till snapshot_resume() is called */
1135 r
= register_snapshot(s
);
1137 ti
->error
= "Snapshot origin struct allocation failed";
1138 goto bad_load_and_register
;
1140 /* invalid handover, register_snapshot has set ti->error */
1141 goto bad_load_and_register
;
1145 * Metadata must only be loaded into one table at once, so skip this
1146 * if metadata will be handed over during resume.
1147 * Chunk size will be set during the handover - set it to zero to
1148 * ensure it's ignored.
1151 s
->store
->chunk_size
= 0;
1155 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1158 ti
->error
= "Failed to read snapshot metadata";
1159 goto bad_read_metadata
;
1162 DMWARN("Snapshot is marked invalid.");
1165 if (!s
->store
->chunk_size
) {
1166 ti
->error
= "Chunk size not set";
1167 goto bad_read_metadata
;
1170 r
= dm_set_target_max_io_len(ti
, s
->store
->chunk_size
);
1172 goto bad_read_metadata
;
1177 unregister_snapshot(s
);
1179 bad_load_and_register
:
1180 mempool_destroy(s
->pending_pool
);
1183 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1186 dm_exception_table_exit(&s
->pending
, pending_cache
);
1187 dm_exception_table_exit(&s
->complete
, exception_cache
);
1190 dm_exception_store_destroy(s
->store
);
1193 dm_put_device(ti
, s
->cow
);
1196 dm_put_device(ti
, s
->origin
);
1205 static void __free_exceptions(struct dm_snapshot
*s
)
1207 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1208 s
->kcopyd_client
= NULL
;
1210 dm_exception_table_exit(&s
->pending
, pending_cache
);
1211 dm_exception_table_exit(&s
->complete
, exception_cache
);
1214 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1215 struct dm_snapshot
*snap_dest
)
1218 struct dm_exception_table table_swap
;
1219 struct dm_exception_store
*store_swap
;
1223 * Swap all snapshot context information between the two instances.
1225 u
.table_swap
= snap_dest
->complete
;
1226 snap_dest
->complete
= snap_src
->complete
;
1227 snap_src
->complete
= u
.table_swap
;
1229 u
.store_swap
= snap_dest
->store
;
1230 snap_dest
->store
= snap_src
->store
;
1231 snap_src
->store
= u
.store_swap
;
1233 snap_dest
->store
->snap
= snap_dest
;
1234 snap_src
->store
->snap
= snap_src
;
1236 snap_dest
->ti
->max_io_len
= snap_dest
->store
->chunk_size
;
1237 snap_dest
->valid
= snap_src
->valid
;
1240 * Set source invalid to ensure it receives no further I/O.
1242 snap_src
->valid
= 0;
1245 static void snapshot_dtr(struct dm_target
*ti
)
1247 #ifdef CONFIG_DM_DEBUG
1250 struct dm_snapshot
*s
= ti
->private;
1251 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1253 down_read(&_origins_lock
);
1254 /* Check whether exception handover must be cancelled */
1255 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1256 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1257 down_write(&snap_dest
->lock
);
1258 snap_dest
->valid
= 0;
1259 up_write(&snap_dest
->lock
);
1260 DMERR("Cancelling snapshot handover.");
1262 up_read(&_origins_lock
);
1264 if (dm_target_is_snapshot_merge(ti
))
1267 /* Prevent further origin writes from using this snapshot. */
1268 /* After this returns there can be no new kcopyd jobs. */
1269 unregister_snapshot(s
);
1271 while (atomic_read(&s
->pending_exceptions_count
))
1274 * Ensure instructions in mempool_destroy aren't reordered
1275 * before atomic_read.
1279 #ifdef CONFIG_DM_DEBUG
1280 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1281 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1284 __free_exceptions(s
);
1286 mempool_destroy(s
->pending_pool
);
1288 dm_exception_store_destroy(s
->store
);
1290 dm_put_device(ti
, s
->cow
);
1292 dm_put_device(ti
, s
->origin
);
1298 * Flush a list of buffers.
1300 static void flush_bios(struct bio
*bio
)
1306 bio
->bi_next
= NULL
;
1307 generic_make_request(bio
);
1312 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
);
1315 * Flush a list of buffers.
1317 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1324 bio
->bi_next
= NULL
;
1325 r
= do_origin(s
->origin
, bio
);
1326 if (r
== DM_MAPIO_REMAPPED
)
1327 generic_make_request(bio
);
1333 * Error a list of buffers.
1335 static void error_bios(struct bio
*bio
)
1341 bio
->bi_next
= NULL
;
1347 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1353 DMERR("Invalidating snapshot: Error reading/writing.");
1354 else if (err
== -ENOMEM
)
1355 DMERR("Invalidating snapshot: Unable to allocate exception.");
1357 if (s
->store
->type
->drop_snapshot
)
1358 s
->store
->type
->drop_snapshot(s
->store
);
1362 dm_table_event(s
->ti
->table
);
1365 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
1367 struct dm_exception
*e
;
1368 struct dm_snapshot
*s
= pe
->snap
;
1369 struct bio
*origin_bios
= NULL
;
1370 struct bio
*snapshot_bios
= NULL
;
1371 struct bio
*full_bio
= NULL
;
1375 /* Read/write error - snapshot is unusable */
1376 down_write(&s
->lock
);
1377 __invalidate_snapshot(s
, -EIO
);
1382 e
= alloc_completed_exception();
1384 down_write(&s
->lock
);
1385 __invalidate_snapshot(s
, -ENOMEM
);
1391 down_write(&s
->lock
);
1393 free_completed_exception(e
);
1398 /* Check for conflicting reads */
1399 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1402 * Add a proper exception, and remove the
1403 * in-flight exception from the list.
1405 dm_insert_exception(&s
->complete
, e
);
1408 dm_remove_exception(&pe
->e
);
1409 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1410 origin_bios
= bio_list_get(&pe
->origin_bios
);
1411 full_bio
= pe
->full_bio
;
1413 full_bio
->bi_end_io
= pe
->full_bio_end_io
;
1414 full_bio
->bi_private
= pe
->full_bio_private
;
1416 free_pending_exception(pe
);
1418 increment_pending_exceptions_done_count();
1422 /* Submit any pending write bios */
1425 bio_io_error(full_bio
);
1426 error_bios(snapshot_bios
);
1429 bio_endio(full_bio
, 0);
1430 flush_bios(snapshot_bios
);
1433 retry_origin_bios(s
, origin_bios
);
1436 static void commit_callback(void *context
, int success
)
1438 struct dm_snap_pending_exception
*pe
= context
;
1440 pending_complete(pe
, success
);
1444 * Called when the copy I/O has finished. kcopyd actually runs
1445 * this code so don't block.
1447 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1449 struct dm_snap_pending_exception
*pe
= context
;
1450 struct dm_snapshot
*s
= pe
->snap
;
1452 if (read_err
|| write_err
)
1453 pending_complete(pe
, 0);
1456 /* Update the metadata if we are persistent */
1457 s
->store
->type
->commit_exception(s
->store
, &pe
->e
,
1458 commit_callback
, pe
);
1462 * Dispatches the copy operation to kcopyd.
1464 static void start_copy(struct dm_snap_pending_exception
*pe
)
1466 struct dm_snapshot
*s
= pe
->snap
;
1467 struct dm_io_region src
, dest
;
1468 struct block_device
*bdev
= s
->origin
->bdev
;
1471 dev_size
= get_dev_size(bdev
);
1474 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1475 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1477 dest
.bdev
= s
->cow
->bdev
;
1478 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1479 dest
.count
= src
.count
;
1481 /* Hand over to kcopyd */
1482 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, copy_callback
, pe
);
1485 static void full_bio_end_io(struct bio
*bio
, int error
)
1487 void *callback_data
= bio
->bi_private
;
1489 dm_kcopyd_do_callback(callback_data
, 0, error
? 1 : 0);
1492 static void start_full_bio(struct dm_snap_pending_exception
*pe
,
1495 struct dm_snapshot
*s
= pe
->snap
;
1496 void *callback_data
;
1499 pe
->full_bio_end_io
= bio
->bi_end_io
;
1500 pe
->full_bio_private
= bio
->bi_private
;
1502 callback_data
= dm_kcopyd_prepare_callback(s
->kcopyd_client
,
1505 bio
->bi_end_io
= full_bio_end_io
;
1506 bio
->bi_private
= callback_data
;
1508 generic_make_request(bio
);
1511 static struct dm_snap_pending_exception
*
1512 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1514 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1519 return container_of(e
, struct dm_snap_pending_exception
, e
);
1523 * Looks to see if this snapshot already has a pending exception
1524 * for this chunk, otherwise it allocates a new one and inserts
1525 * it into the pending table.
1527 * NOTE: a write lock must be held on snap->lock before calling
1530 static struct dm_snap_pending_exception
*
1531 __find_pending_exception(struct dm_snapshot
*s
,
1532 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1534 struct dm_snap_pending_exception
*pe2
;
1536 pe2
= __lookup_pending_exception(s
, chunk
);
1538 free_pending_exception(pe
);
1542 pe
->e
.old_chunk
= chunk
;
1543 bio_list_init(&pe
->origin_bios
);
1544 bio_list_init(&pe
->snapshot_bios
);
1546 pe
->full_bio
= NULL
;
1548 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1549 free_pending_exception(pe
);
1553 dm_insert_exception(&s
->pending
, &pe
->e
);
1558 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1559 struct bio
*bio
, chunk_t chunk
)
1561 bio
->bi_bdev
= s
->cow
->bdev
;
1562 bio
->bi_sector
= chunk_to_sector(s
->store
,
1563 dm_chunk_number(e
->new_chunk
) +
1564 (chunk
- e
->old_chunk
)) +
1566 s
->store
->chunk_mask
);
1569 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
)
1571 struct dm_exception
*e
;
1572 struct dm_snapshot
*s
= ti
->private;
1573 int r
= DM_MAPIO_REMAPPED
;
1575 struct dm_snap_pending_exception
*pe
= NULL
;
1577 init_tracked_chunk(bio
);
1579 if (bio
->bi_rw
& REQ_FLUSH
) {
1580 bio
->bi_bdev
= s
->cow
->bdev
;
1581 return DM_MAPIO_REMAPPED
;
1584 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1586 /* Full snapshots are not usable */
1587 /* To get here the table must be live so s->active is always set. */
1591 /* FIXME: should only take write lock if we need
1592 * to copy an exception */
1593 down_write(&s
->lock
);
1600 /* If the block is already remapped - use that, else remap it */
1601 e
= dm_lookup_exception(&s
->complete
, chunk
);
1603 remap_exception(s
, e
, bio
, chunk
);
1608 * Write to snapshot - higher level takes care of RW/RO
1609 * flags so we should only get this if we are
1612 if (bio_rw(bio
) == WRITE
) {
1613 pe
= __lookup_pending_exception(s
, chunk
);
1616 pe
= alloc_pending_exception(s
);
1617 down_write(&s
->lock
);
1620 free_pending_exception(pe
);
1625 e
= dm_lookup_exception(&s
->complete
, chunk
);
1627 free_pending_exception(pe
);
1628 remap_exception(s
, e
, bio
, chunk
);
1632 pe
= __find_pending_exception(s
, pe
, chunk
);
1634 __invalidate_snapshot(s
, -ENOMEM
);
1640 remap_exception(s
, &pe
->e
, bio
, chunk
);
1642 r
= DM_MAPIO_SUBMITTED
;
1645 bio
->bi_size
== (s
->store
->chunk_size
<< SECTOR_SHIFT
)) {
1648 start_full_bio(pe
, bio
);
1652 bio_list_add(&pe
->snapshot_bios
, bio
);
1655 /* this is protected by snap->lock */
1662 bio
->bi_bdev
= s
->origin
->bdev
;
1663 track_chunk(s
, bio
, chunk
);
1673 * A snapshot-merge target behaves like a combination of a snapshot
1674 * target and a snapshot-origin target. It only generates new
1675 * exceptions in other snapshots and not in the one that is being
1678 * For each chunk, if there is an existing exception, it is used to
1679 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1680 * which in turn might generate exceptions in other snapshots.
1681 * If merging is currently taking place on the chunk in question, the
1682 * I/O is deferred by adding it to s->bios_queued_during_merge.
1684 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
)
1686 struct dm_exception
*e
;
1687 struct dm_snapshot
*s
= ti
->private;
1688 int r
= DM_MAPIO_REMAPPED
;
1691 init_tracked_chunk(bio
);
1693 if (bio
->bi_rw
& REQ_FLUSH
) {
1694 if (!dm_bio_get_target_request_nr(bio
))
1695 bio
->bi_bdev
= s
->origin
->bdev
;
1697 bio
->bi_bdev
= s
->cow
->bdev
;
1698 return DM_MAPIO_REMAPPED
;
1701 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1703 down_write(&s
->lock
);
1705 /* Full merging snapshots are redirected to the origin */
1707 goto redirect_to_origin
;
1709 /* If the block is already remapped - use that */
1710 e
= dm_lookup_exception(&s
->complete
, chunk
);
1712 /* Queue writes overlapping with chunks being merged */
1713 if (bio_rw(bio
) == WRITE
&&
1714 chunk
>= s
->first_merging_chunk
&&
1715 chunk
< (s
->first_merging_chunk
+
1716 s
->num_merging_chunks
)) {
1717 bio
->bi_bdev
= s
->origin
->bdev
;
1718 bio_list_add(&s
->bios_queued_during_merge
, bio
);
1719 r
= DM_MAPIO_SUBMITTED
;
1723 remap_exception(s
, e
, bio
, chunk
);
1725 if (bio_rw(bio
) == WRITE
)
1726 track_chunk(s
, bio
, chunk
);
1731 bio
->bi_bdev
= s
->origin
->bdev
;
1733 if (bio_rw(bio
) == WRITE
) {
1735 return do_origin(s
->origin
, bio
);
1744 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
, int error
)
1746 struct dm_snapshot
*s
= ti
->private;
1748 if (is_bio_tracked(bio
))
1749 stop_tracking_chunk(s
, bio
);
1754 static void snapshot_merge_presuspend(struct dm_target
*ti
)
1756 struct dm_snapshot
*s
= ti
->private;
1761 static int snapshot_preresume(struct dm_target
*ti
)
1764 struct dm_snapshot
*s
= ti
->private;
1765 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1767 down_read(&_origins_lock
);
1768 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1769 if (snap_src
&& snap_dest
) {
1770 down_read(&snap_src
->lock
);
1771 if (s
== snap_src
) {
1772 DMERR("Unable to resume snapshot source until "
1773 "handover completes.");
1775 } else if (!dm_suspended(snap_src
->ti
)) {
1776 DMERR("Unable to perform snapshot handover until "
1777 "source is suspended.");
1780 up_read(&snap_src
->lock
);
1782 up_read(&_origins_lock
);
1787 static void snapshot_resume(struct dm_target
*ti
)
1789 struct dm_snapshot
*s
= ti
->private;
1790 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1792 down_read(&_origins_lock
);
1793 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1794 if (snap_src
&& snap_dest
) {
1795 down_write(&snap_src
->lock
);
1796 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
1797 __handover_exceptions(snap_src
, snap_dest
);
1798 up_write(&snap_dest
->lock
);
1799 up_write(&snap_src
->lock
);
1801 up_read(&_origins_lock
);
1803 /* Now we have correct chunk size, reregister */
1804 reregister_snapshot(s
);
1806 down_write(&s
->lock
);
1811 static uint32_t get_origin_minimum_chunksize(struct block_device
*bdev
)
1813 uint32_t min_chunksize
;
1815 down_read(&_origins_lock
);
1816 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
1817 up_read(&_origins_lock
);
1819 return min_chunksize
;
1822 static void snapshot_merge_resume(struct dm_target
*ti
)
1824 struct dm_snapshot
*s
= ti
->private;
1827 * Handover exceptions from existing snapshot.
1829 snapshot_resume(ti
);
1832 * snapshot-merge acts as an origin, so set ti->max_io_len
1834 ti
->max_io_len
= get_origin_minimum_chunksize(s
->origin
->bdev
);
1839 static int snapshot_status(struct dm_target
*ti
, status_type_t type
,
1840 unsigned status_flags
, char *result
, unsigned maxlen
)
1843 struct dm_snapshot
*snap
= ti
->private;
1846 case STATUSTYPE_INFO
:
1848 down_write(&snap
->lock
);
1852 else if (snap
->merge_failed
)
1853 DMEMIT("Merge failed");
1855 if (snap
->store
->type
->usage
) {
1856 sector_t total_sectors
, sectors_allocated
,
1858 snap
->store
->type
->usage(snap
->store
,
1862 DMEMIT("%llu/%llu %llu",
1863 (unsigned long long)sectors_allocated
,
1864 (unsigned long long)total_sectors
,
1865 (unsigned long long)metadata_sectors
);
1871 up_write(&snap
->lock
);
1875 case STATUSTYPE_TABLE
:
1877 * kdevname returns a static pointer so we need
1878 * to make private copies if the output is to
1881 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
1882 snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
1890 static int snapshot_iterate_devices(struct dm_target
*ti
,
1891 iterate_devices_callout_fn fn
, void *data
)
1893 struct dm_snapshot
*snap
= ti
->private;
1896 r
= fn(ti
, snap
->origin
, 0, ti
->len
, data
);
1899 r
= fn(ti
, snap
->cow
, 0, get_dev_size(snap
->cow
->bdev
), data
);
1905 /*-----------------------------------------------------------------
1907 *---------------------------------------------------------------*/
1910 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1911 * supplied bio was ignored. The caller may submit it immediately.
1912 * (No remapping actually occurs as the origin is always a direct linear
1915 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1916 * and any supplied bio is added to a list to be submitted once all
1917 * the necessary exceptions exist.
1919 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
1922 int r
= DM_MAPIO_REMAPPED
;
1923 struct dm_snapshot
*snap
;
1924 struct dm_exception
*e
;
1925 struct dm_snap_pending_exception
*pe
;
1926 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
1927 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
1930 /* Do all the snapshots on this origin */
1931 list_for_each_entry (snap
, snapshots
, list
) {
1933 * Don't make new exceptions in a merging snapshot
1934 * because it has effectively been deleted
1936 if (dm_target_is_snapshot_merge(snap
->ti
))
1939 down_write(&snap
->lock
);
1941 /* Only deal with valid and active snapshots */
1942 if (!snap
->valid
|| !snap
->active
)
1945 /* Nothing to do if writing beyond end of snapshot */
1946 if (sector
>= dm_table_get_size(snap
->ti
->table
))
1950 * Remember, different snapshots can have
1951 * different chunk sizes.
1953 chunk
= sector_to_chunk(snap
->store
, sector
);
1956 * Check exception table to see if block
1957 * is already remapped in this snapshot
1958 * and trigger an exception if not.
1960 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1964 pe
= __lookup_pending_exception(snap
, chunk
);
1966 up_write(&snap
->lock
);
1967 pe
= alloc_pending_exception(snap
);
1968 down_write(&snap
->lock
);
1971 free_pending_exception(pe
);
1975 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1977 free_pending_exception(pe
);
1981 pe
= __find_pending_exception(snap
, pe
, chunk
);
1983 __invalidate_snapshot(snap
, -ENOMEM
);
1988 r
= DM_MAPIO_SUBMITTED
;
1991 * If an origin bio was supplied, queue it to wait for the
1992 * completion of this exception, and start this one last,
1993 * at the end of the function.
1996 bio_list_add(&pe
->origin_bios
, bio
);
2001 pe_to_start_last
= pe
;
2007 pe_to_start_now
= pe
;
2011 up_write(&snap
->lock
);
2013 if (pe_to_start_now
) {
2014 start_copy(pe_to_start_now
);
2015 pe_to_start_now
= NULL
;
2020 * Submit the exception against which the bio is queued last,
2021 * to give the other exceptions a head start.
2023 if (pe_to_start_last
)
2024 start_copy(pe_to_start_last
);
2030 * Called on a write from the origin driver.
2032 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
2035 int r
= DM_MAPIO_REMAPPED
;
2037 down_read(&_origins_lock
);
2038 o
= __lookup_origin(origin
->bdev
);
2040 r
= __origin_write(&o
->snapshots
, bio
->bi_sector
, bio
);
2041 up_read(&_origins_lock
);
2047 * Trigger exceptions in all non-merging snapshots.
2049 * The chunk size of the merging snapshot may be larger than the chunk
2050 * size of some other snapshot so we may need to reallocate multiple
2051 * chunks in other snapshots.
2053 * We scan all the overlapping exceptions in the other snapshots.
2054 * Returns 1 if anything was reallocated and must be waited for,
2055 * otherwise returns 0.
2057 * size must be a multiple of merging_snap's chunk_size.
2059 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2060 sector_t sector
, unsigned size
)
2067 * The origin's __minimum_chunk_size() got stored in max_io_len
2068 * by snapshot_merge_resume().
2070 down_read(&_origins_lock
);
2071 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2072 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->max_io_len
)
2073 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2076 up_read(&_origins_lock
);
2082 * Origin: maps a linear range of a device, with hooks for snapshotting.
2086 * Construct an origin mapping: <dev_path>
2087 * The context for an origin is merely a 'struct dm_dev *'
2088 * pointing to the real device.
2090 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2096 ti
->error
= "origin: incorrect number of arguments";
2100 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dev
);
2102 ti
->error
= "Cannot get target device";
2107 ti
->num_flush_requests
= 1;
2112 static void origin_dtr(struct dm_target
*ti
)
2114 struct dm_dev
*dev
= ti
->private;
2115 dm_put_device(ti
, dev
);
2118 static int origin_map(struct dm_target
*ti
, struct bio
*bio
)
2120 struct dm_dev
*dev
= ti
->private;
2121 bio
->bi_bdev
= dev
->bdev
;
2123 if (bio
->bi_rw
& REQ_FLUSH
)
2124 return DM_MAPIO_REMAPPED
;
2126 /* Only tell snapshots if this is a write */
2127 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
2131 * Set the target "max_io_len" field to the minimum of all the snapshots'
2134 static void origin_resume(struct dm_target
*ti
)
2136 struct dm_dev
*dev
= ti
->private;
2138 ti
->max_io_len
= get_origin_minimum_chunksize(dev
->bdev
);
2141 static int origin_status(struct dm_target
*ti
, status_type_t type
,
2142 unsigned status_flags
, char *result
, unsigned maxlen
)
2144 struct dm_dev
*dev
= ti
->private;
2147 case STATUSTYPE_INFO
:
2151 case STATUSTYPE_TABLE
:
2152 snprintf(result
, maxlen
, "%s", dev
->name
);
2159 static int origin_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
2160 struct bio_vec
*biovec
, int max_size
)
2162 struct dm_dev
*dev
= ti
->private;
2163 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
2165 if (!q
->merge_bvec_fn
)
2168 bvm
->bi_bdev
= dev
->bdev
;
2170 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
2173 static int origin_iterate_devices(struct dm_target
*ti
,
2174 iterate_devices_callout_fn fn
, void *data
)
2176 struct dm_dev
*dev
= ti
->private;
2178 return fn(ti
, dev
, 0, ti
->len
, data
);
2181 static struct target_type origin_target
= {
2182 .name
= "snapshot-origin",
2183 .version
= {1, 8, 0},
2184 .module
= THIS_MODULE
,
2188 .resume
= origin_resume
,
2189 .status
= origin_status
,
2190 .merge
= origin_merge
,
2191 .iterate_devices
= origin_iterate_devices
,
2194 static struct target_type snapshot_target
= {
2196 .version
= {1, 11, 0},
2197 .module
= THIS_MODULE
,
2198 .ctr
= snapshot_ctr
,
2199 .dtr
= snapshot_dtr
,
2200 .map
= snapshot_map
,
2201 .end_io
= snapshot_end_io
,
2202 .preresume
= snapshot_preresume
,
2203 .resume
= snapshot_resume
,
2204 .status
= snapshot_status
,
2205 .iterate_devices
= snapshot_iterate_devices
,
2208 static struct target_type merge_target
= {
2209 .name
= dm_snapshot_merge_target_name
,
2210 .version
= {1, 2, 0},
2211 .module
= THIS_MODULE
,
2212 .ctr
= snapshot_ctr
,
2213 .dtr
= snapshot_dtr
,
2214 .map
= snapshot_merge_map
,
2215 .end_io
= snapshot_end_io
,
2216 .presuspend
= snapshot_merge_presuspend
,
2217 .preresume
= snapshot_preresume
,
2218 .resume
= snapshot_merge_resume
,
2219 .status
= snapshot_status
,
2220 .iterate_devices
= snapshot_iterate_devices
,
2223 static int __init
dm_snapshot_init(void)
2227 r
= dm_exception_store_init();
2229 DMERR("Failed to initialize exception stores");
2233 r
= dm_register_target(&snapshot_target
);
2235 DMERR("snapshot target register failed %d", r
);
2236 goto bad_register_snapshot_target
;
2239 r
= dm_register_target(&origin_target
);
2241 DMERR("Origin target register failed %d", r
);
2242 goto bad_register_origin_target
;
2245 r
= dm_register_target(&merge_target
);
2247 DMERR("Merge target register failed %d", r
);
2248 goto bad_register_merge_target
;
2251 r
= init_origin_hash();
2253 DMERR("init_origin_hash failed.");
2254 goto bad_origin_hash
;
2257 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2258 if (!exception_cache
) {
2259 DMERR("Couldn't create exception cache.");
2261 goto bad_exception_cache
;
2264 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2265 if (!pending_cache
) {
2266 DMERR("Couldn't create pending cache.");
2268 goto bad_pending_cache
;
2274 kmem_cache_destroy(exception_cache
);
2275 bad_exception_cache
:
2278 dm_unregister_target(&merge_target
);
2279 bad_register_merge_target
:
2280 dm_unregister_target(&origin_target
);
2281 bad_register_origin_target
:
2282 dm_unregister_target(&snapshot_target
);
2283 bad_register_snapshot_target
:
2284 dm_exception_store_exit();
2289 static void __exit
dm_snapshot_exit(void)
2291 dm_unregister_target(&snapshot_target
);
2292 dm_unregister_target(&origin_target
);
2293 dm_unregister_target(&merge_target
);
2296 kmem_cache_destroy(pending_cache
);
2297 kmem_cache_destroy(exception_cache
);
2299 dm_exception_store_exit();
2303 module_init(dm_snapshot_init
);
2304 module_exit(dm_snapshot_exit
);
2306 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2307 MODULE_AUTHOR("Joe Thornber");
2308 MODULE_LICENSE("GPL");