]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/dm-snap.c
dm snapshot: permit invalid activation
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-snap.c
1 /*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include "dm-snap.h"
22 #include "dm-bio-list.h"
23 #include "kcopyd.h"
24
25 #define DM_MSG_PREFIX "snapshots"
26
27 /*
28 * The percentage increment we will wake up users at
29 */
30 #define WAKE_UP_PERCENT 5
31
32 /*
33 * kcopyd priority of snapshot operations
34 */
35 #define SNAPSHOT_COPY_PRIORITY 2
36
37 /*
38 * Each snapshot reserves this many pages for io
39 */
40 #define SNAPSHOT_PAGES 256
41
42 static struct workqueue_struct *ksnapd;
43 static void flush_queued_bios(struct work_struct *work);
44
45 struct dm_snap_pending_exception {
46 struct dm_snap_exception e;
47
48 /*
49 * Origin buffers waiting for this to complete are held
50 * in a bio list
51 */
52 struct bio_list origin_bios;
53 struct bio_list snapshot_bios;
54
55 /*
56 * Short-term queue of pending exceptions prior to submission.
57 */
58 struct list_head list;
59
60 /*
61 * The primary pending_exception is the one that holds
62 * the ref_count and the list of origin_bios for a
63 * group of pending_exceptions. It is always last to get freed.
64 * These fields get set up when writing to the origin.
65 */
66 struct dm_snap_pending_exception *primary_pe;
67
68 /*
69 * Number of pending_exceptions processing this chunk.
70 * When this drops to zero we must complete the origin bios.
71 * If incrementing or decrementing this, hold pe->snap->lock for
72 * the sibling concerned and not pe->primary_pe->snap->lock unless
73 * they are the same.
74 */
75 atomic_t ref_count;
76
77 /* Pointer back to snapshot context */
78 struct dm_snapshot *snap;
79
80 /*
81 * 1 indicates the exception has already been sent to
82 * kcopyd.
83 */
84 int started;
85 };
86
87 /*
88 * Hash table mapping origin volumes to lists of snapshots and
89 * a lock to protect it
90 */
91 static struct kmem_cache *exception_cache;
92 static struct kmem_cache *pending_cache;
93 static mempool_t *pending_pool;
94
95 /*
96 * One of these per registered origin, held in the snapshot_origins hash
97 */
98 struct origin {
99 /* The origin device */
100 struct block_device *bdev;
101
102 struct list_head hash_list;
103
104 /* List of snapshots for this origin */
105 struct list_head snapshots;
106 };
107
108 /*
109 * Size of the hash table for origin volumes. If we make this
110 * the size of the minors list then it should be nearly perfect
111 */
112 #define ORIGIN_HASH_SIZE 256
113 #define ORIGIN_MASK 0xFF
114 static struct list_head *_origins;
115 static struct rw_semaphore _origins_lock;
116
117 static int init_origin_hash(void)
118 {
119 int i;
120
121 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
122 GFP_KERNEL);
123 if (!_origins) {
124 DMERR("unable to allocate memory");
125 return -ENOMEM;
126 }
127
128 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
129 INIT_LIST_HEAD(_origins + i);
130 init_rwsem(&_origins_lock);
131
132 return 0;
133 }
134
135 static void exit_origin_hash(void)
136 {
137 kfree(_origins);
138 }
139
140 static unsigned origin_hash(struct block_device *bdev)
141 {
142 return bdev->bd_dev & ORIGIN_MASK;
143 }
144
145 static struct origin *__lookup_origin(struct block_device *origin)
146 {
147 struct list_head *ol;
148 struct origin *o;
149
150 ol = &_origins[origin_hash(origin)];
151 list_for_each_entry (o, ol, hash_list)
152 if (bdev_equal(o->bdev, origin))
153 return o;
154
155 return NULL;
156 }
157
158 static void __insert_origin(struct origin *o)
159 {
160 struct list_head *sl = &_origins[origin_hash(o->bdev)];
161 list_add_tail(&o->hash_list, sl);
162 }
163
164 /*
165 * Make a note of the snapshot and its origin so we can look it
166 * up when the origin has a write on it.
167 */
168 static int register_snapshot(struct dm_snapshot *snap)
169 {
170 struct origin *o;
171 struct block_device *bdev = snap->origin->bdev;
172
173 down_write(&_origins_lock);
174 o = __lookup_origin(bdev);
175
176 if (!o) {
177 /* New origin */
178 o = kmalloc(sizeof(*o), GFP_KERNEL);
179 if (!o) {
180 up_write(&_origins_lock);
181 return -ENOMEM;
182 }
183
184 /* Initialise the struct */
185 INIT_LIST_HEAD(&o->snapshots);
186 o->bdev = bdev;
187
188 __insert_origin(o);
189 }
190
191 list_add_tail(&snap->list, &o->snapshots);
192
193 up_write(&_origins_lock);
194 return 0;
195 }
196
197 static void unregister_snapshot(struct dm_snapshot *s)
198 {
199 struct origin *o;
200
201 down_write(&_origins_lock);
202 o = __lookup_origin(s->origin->bdev);
203
204 list_del(&s->list);
205 if (list_empty(&o->snapshots)) {
206 list_del(&o->hash_list);
207 kfree(o);
208 }
209
210 up_write(&_origins_lock);
211 }
212
213 /*
214 * Implementation of the exception hash tables.
215 */
216 static int init_exception_table(struct exception_table *et, uint32_t size)
217 {
218 unsigned int i;
219
220 et->hash_mask = size - 1;
221 et->table = dm_vcalloc(size, sizeof(struct list_head));
222 if (!et->table)
223 return -ENOMEM;
224
225 for (i = 0; i < size; i++)
226 INIT_LIST_HEAD(et->table + i);
227
228 return 0;
229 }
230
231 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
232 {
233 struct list_head *slot;
234 struct dm_snap_exception *ex, *next;
235 int i, size;
236
237 size = et->hash_mask + 1;
238 for (i = 0; i < size; i++) {
239 slot = et->table + i;
240
241 list_for_each_entry_safe (ex, next, slot, hash_list)
242 kmem_cache_free(mem, ex);
243 }
244
245 vfree(et->table);
246 }
247
248 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
249 {
250 return chunk & et->hash_mask;
251 }
252
253 static void insert_exception(struct exception_table *eh,
254 struct dm_snap_exception *e)
255 {
256 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
257 list_add(&e->hash_list, l);
258 }
259
260 static void remove_exception(struct dm_snap_exception *e)
261 {
262 list_del(&e->hash_list);
263 }
264
265 /*
266 * Return the exception data for a sector, or NULL if not
267 * remapped.
268 */
269 static struct dm_snap_exception *lookup_exception(struct exception_table *et,
270 chunk_t chunk)
271 {
272 struct list_head *slot;
273 struct dm_snap_exception *e;
274
275 slot = &et->table[exception_hash(et, chunk)];
276 list_for_each_entry (e, slot, hash_list)
277 if (e->old_chunk == chunk)
278 return e;
279
280 return NULL;
281 }
282
283 static struct dm_snap_exception *alloc_exception(void)
284 {
285 struct dm_snap_exception *e;
286
287 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
288 if (!e)
289 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
290
291 return e;
292 }
293
294 static void free_exception(struct dm_snap_exception *e)
295 {
296 kmem_cache_free(exception_cache, e);
297 }
298
299 static struct dm_snap_pending_exception *alloc_pending_exception(void)
300 {
301 return mempool_alloc(pending_pool, GFP_NOIO);
302 }
303
304 static void free_pending_exception(struct dm_snap_pending_exception *pe)
305 {
306 mempool_free(pe, pending_pool);
307 }
308
309 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
310 {
311 struct dm_snap_exception *e;
312
313 e = alloc_exception();
314 if (!e)
315 return -ENOMEM;
316
317 e->old_chunk = old;
318 e->new_chunk = new;
319 insert_exception(&s->complete, e);
320 return 0;
321 }
322
323 /*
324 * Hard coded magic.
325 */
326 static int calc_max_buckets(void)
327 {
328 /* use a fixed size of 2MB */
329 unsigned long mem = 2 * 1024 * 1024;
330 mem /= sizeof(struct list_head);
331
332 return mem;
333 }
334
335 /*
336 * Rounds a number down to a power of 2.
337 */
338 static uint32_t round_down(uint32_t n)
339 {
340 while (n & (n - 1))
341 n &= (n - 1);
342 return n;
343 }
344
345 /*
346 * Allocate room for a suitable hash table.
347 */
348 static int init_hash_tables(struct dm_snapshot *s)
349 {
350 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
351
352 /*
353 * Calculate based on the size of the original volume or
354 * the COW volume...
355 */
356 cow_dev_size = get_dev_size(s->cow->bdev);
357 origin_dev_size = get_dev_size(s->origin->bdev);
358 max_buckets = calc_max_buckets();
359
360 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
361 hash_size = min(hash_size, max_buckets);
362
363 /* Round it down to a power of 2 */
364 hash_size = round_down(hash_size);
365 if (init_exception_table(&s->complete, hash_size))
366 return -ENOMEM;
367
368 /*
369 * Allocate hash table for in-flight exceptions
370 * Make this smaller than the real hash table
371 */
372 hash_size >>= 3;
373 if (hash_size < 64)
374 hash_size = 64;
375
376 if (init_exception_table(&s->pending, hash_size)) {
377 exit_exception_table(&s->complete, exception_cache);
378 return -ENOMEM;
379 }
380
381 return 0;
382 }
383
384 /*
385 * Round a number up to the nearest 'size' boundary. size must
386 * be a power of 2.
387 */
388 static ulong round_up(ulong n, ulong size)
389 {
390 size--;
391 return (n + size) & ~size;
392 }
393
394 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
395 char **error)
396 {
397 unsigned long chunk_size;
398 char *value;
399
400 chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
401 if (*chunk_size_arg == '\0' || *value != '\0') {
402 *error = "Invalid chunk size";
403 return -EINVAL;
404 }
405
406 if (!chunk_size) {
407 s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
408 return 0;
409 }
410
411 /*
412 * Chunk size must be multiple of page size. Silently
413 * round up if it's not.
414 */
415 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
416
417 /* Check chunk_size is a power of 2 */
418 if (chunk_size & (chunk_size - 1)) {
419 *error = "Chunk size is not a power of 2";
420 return -EINVAL;
421 }
422
423 /* Validate the chunk size against the device block size */
424 if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
425 *error = "Chunk size is not a multiple of device blocksize";
426 return -EINVAL;
427 }
428
429 s->chunk_size = chunk_size;
430 s->chunk_mask = chunk_size - 1;
431 s->chunk_shift = ffs(chunk_size) - 1;
432
433 return 0;
434 }
435
436 /*
437 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
438 */
439 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
440 {
441 struct dm_snapshot *s;
442 int r = -EINVAL;
443 char persistent;
444 char *origin_path;
445 char *cow_path;
446
447 if (argc != 4) {
448 ti->error = "requires exactly 4 arguments";
449 r = -EINVAL;
450 goto bad1;
451 }
452
453 origin_path = argv[0];
454 cow_path = argv[1];
455 persistent = toupper(*argv[2]);
456
457 if (persistent != 'P' && persistent != 'N') {
458 ti->error = "Persistent flag is not P or N";
459 r = -EINVAL;
460 goto bad1;
461 }
462
463 s = kmalloc(sizeof(*s), GFP_KERNEL);
464 if (s == NULL) {
465 ti->error = "Cannot allocate snapshot context private "
466 "structure";
467 r = -ENOMEM;
468 goto bad1;
469 }
470
471 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
472 if (r) {
473 ti->error = "Cannot get origin device";
474 goto bad2;
475 }
476
477 r = dm_get_device(ti, cow_path, 0, 0,
478 FMODE_READ | FMODE_WRITE, &s->cow);
479 if (r) {
480 dm_put_device(ti, s->origin);
481 ti->error = "Cannot get COW device";
482 goto bad2;
483 }
484
485 r = set_chunk_size(s, argv[3], &ti->error);
486 if (r)
487 goto bad3;
488
489 s->type = persistent;
490
491 s->valid = 1;
492 s->active = 0;
493 s->last_percent = 0;
494 init_rwsem(&s->lock);
495 spin_lock_init(&s->pe_lock);
496 s->table = ti->table;
497
498 /* Allocate hash table for COW data */
499 if (init_hash_tables(s)) {
500 ti->error = "Unable to allocate hash table space";
501 r = -ENOMEM;
502 goto bad3;
503 }
504
505 s->store.snap = s;
506
507 if (persistent == 'P')
508 r = dm_create_persistent(&s->store);
509 else
510 r = dm_create_transient(&s->store);
511
512 if (r) {
513 ti->error = "Couldn't create exception store";
514 r = -EINVAL;
515 goto bad4;
516 }
517
518 r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
519 if (r) {
520 ti->error = "Could not create kcopyd client";
521 goto bad5;
522 }
523
524 /* Metadata must only be loaded into one table at once */
525 r = s->store.read_metadata(&s->store);
526 if (r < 0) {
527 ti->error = "Failed to read snapshot metadata";
528 goto bad6;
529 } else if (r > 0) {
530 s->valid = 0;
531 DMWARN("Snapshot is marked invalid.");
532 }
533
534 bio_list_init(&s->queued_bios);
535 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
536
537 /* Add snapshot to the list of snapshots for this origin */
538 /* Exceptions aren't triggered till snapshot_resume() is called */
539 if (register_snapshot(s)) {
540 r = -EINVAL;
541 ti->error = "Cannot register snapshot origin";
542 goto bad6;
543 }
544
545 ti->private = s;
546 ti->split_io = s->chunk_size;
547
548 return 0;
549
550 bad6:
551 kcopyd_client_destroy(s->kcopyd_client);
552
553 bad5:
554 s->store.destroy(&s->store);
555
556 bad4:
557 exit_exception_table(&s->pending, pending_cache);
558 exit_exception_table(&s->complete, exception_cache);
559
560 bad3:
561 dm_put_device(ti, s->cow);
562 dm_put_device(ti, s->origin);
563
564 bad2:
565 kfree(s);
566
567 bad1:
568 return r;
569 }
570
571 static void __free_exceptions(struct dm_snapshot *s)
572 {
573 kcopyd_client_destroy(s->kcopyd_client);
574 s->kcopyd_client = NULL;
575
576 exit_exception_table(&s->pending, pending_cache);
577 exit_exception_table(&s->complete, exception_cache);
578
579 s->store.destroy(&s->store);
580 }
581
582 static void snapshot_dtr(struct dm_target *ti)
583 {
584 struct dm_snapshot *s = ti->private;
585
586 flush_workqueue(ksnapd);
587
588 /* Prevent further origin writes from using this snapshot. */
589 /* After this returns there can be no new kcopyd jobs. */
590 unregister_snapshot(s);
591
592 __free_exceptions(s);
593
594 dm_put_device(ti, s->origin);
595 dm_put_device(ti, s->cow);
596
597 kfree(s);
598 }
599
600 /*
601 * Flush a list of buffers.
602 */
603 static void flush_bios(struct bio *bio)
604 {
605 struct bio *n;
606
607 while (bio) {
608 n = bio->bi_next;
609 bio->bi_next = NULL;
610 generic_make_request(bio);
611 bio = n;
612 }
613 }
614
615 static void flush_queued_bios(struct work_struct *work)
616 {
617 struct dm_snapshot *s =
618 container_of(work, struct dm_snapshot, queued_bios_work);
619 struct bio *queued_bios;
620 unsigned long flags;
621
622 spin_lock_irqsave(&s->pe_lock, flags);
623 queued_bios = bio_list_get(&s->queued_bios);
624 spin_unlock_irqrestore(&s->pe_lock, flags);
625
626 flush_bios(queued_bios);
627 }
628
629 /*
630 * Error a list of buffers.
631 */
632 static void error_bios(struct bio *bio)
633 {
634 struct bio *n;
635
636 while (bio) {
637 n = bio->bi_next;
638 bio->bi_next = NULL;
639 bio_io_error(bio, bio->bi_size);
640 bio = n;
641 }
642 }
643
644 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
645 {
646 if (!s->valid)
647 return;
648
649 if (err == -EIO)
650 DMERR("Invalidating snapshot: Error reading/writing.");
651 else if (err == -ENOMEM)
652 DMERR("Invalidating snapshot: Unable to allocate exception.");
653
654 if (s->store.drop_snapshot)
655 s->store.drop_snapshot(&s->store);
656
657 s->valid = 0;
658
659 dm_table_event(s->table);
660 }
661
662 static void get_pending_exception(struct dm_snap_pending_exception *pe)
663 {
664 atomic_inc(&pe->ref_count);
665 }
666
667 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
668 {
669 struct dm_snap_pending_exception *primary_pe;
670 struct bio *origin_bios = NULL;
671
672 primary_pe = pe->primary_pe;
673
674 /*
675 * If this pe is involved in a write to the origin and
676 * it is the last sibling to complete then release
677 * the bios for the original write to the origin.
678 */
679 if (primary_pe &&
680 atomic_dec_and_test(&primary_pe->ref_count))
681 origin_bios = bio_list_get(&primary_pe->origin_bios);
682
683 /*
684 * Free the pe if it's not linked to an origin write or if
685 * it's not itself a primary pe.
686 */
687 if (!primary_pe || primary_pe != pe)
688 free_pending_exception(pe);
689
690 /*
691 * Free the primary pe if nothing references it.
692 */
693 if (primary_pe && !atomic_read(&primary_pe->ref_count))
694 free_pending_exception(primary_pe);
695
696 return origin_bios;
697 }
698
699 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
700 {
701 struct dm_snap_exception *e;
702 struct dm_snapshot *s = pe->snap;
703 struct bio *origin_bios = NULL;
704 struct bio *snapshot_bios = NULL;
705 int error = 0;
706
707 if (!success) {
708 /* Read/write error - snapshot is unusable */
709 down_write(&s->lock);
710 __invalidate_snapshot(s, -EIO);
711 error = 1;
712 goto out;
713 }
714
715 e = alloc_exception();
716 if (!e) {
717 down_write(&s->lock);
718 __invalidate_snapshot(s, -ENOMEM);
719 error = 1;
720 goto out;
721 }
722 *e = pe->e;
723
724 down_write(&s->lock);
725 if (!s->valid) {
726 free_exception(e);
727 error = 1;
728 goto out;
729 }
730
731 /*
732 * Add a proper exception, and remove the
733 * in-flight exception from the list.
734 */
735 insert_exception(&s->complete, e);
736
737 out:
738 remove_exception(&pe->e);
739 snapshot_bios = bio_list_get(&pe->snapshot_bios);
740 origin_bios = put_pending_exception(pe);
741
742 up_write(&s->lock);
743
744 /* Submit any pending write bios */
745 if (error)
746 error_bios(snapshot_bios);
747 else
748 flush_bios(snapshot_bios);
749
750 flush_bios(origin_bios);
751 }
752
753 static void commit_callback(void *context, int success)
754 {
755 struct dm_snap_pending_exception *pe = context;
756
757 pending_complete(pe, success);
758 }
759
760 /*
761 * Called when the copy I/O has finished. kcopyd actually runs
762 * this code so don't block.
763 */
764 static void copy_callback(int read_err, unsigned int write_err, void *context)
765 {
766 struct dm_snap_pending_exception *pe = context;
767 struct dm_snapshot *s = pe->snap;
768
769 if (read_err || write_err)
770 pending_complete(pe, 0);
771
772 else
773 /* Update the metadata if we are persistent */
774 s->store.commit_exception(&s->store, &pe->e, commit_callback,
775 pe);
776 }
777
778 /*
779 * Dispatches the copy operation to kcopyd.
780 */
781 static void start_copy(struct dm_snap_pending_exception *pe)
782 {
783 struct dm_snapshot *s = pe->snap;
784 struct io_region src, dest;
785 struct block_device *bdev = s->origin->bdev;
786 sector_t dev_size;
787
788 dev_size = get_dev_size(bdev);
789
790 src.bdev = bdev;
791 src.sector = chunk_to_sector(s, pe->e.old_chunk);
792 src.count = min(s->chunk_size, dev_size - src.sector);
793
794 dest.bdev = s->cow->bdev;
795 dest.sector = chunk_to_sector(s, pe->e.new_chunk);
796 dest.count = src.count;
797
798 /* Hand over to kcopyd */
799 kcopyd_copy(s->kcopyd_client,
800 &src, 1, &dest, 0, copy_callback, pe);
801 }
802
803 /*
804 * Looks to see if this snapshot already has a pending exception
805 * for this chunk, otherwise it allocates a new one and inserts
806 * it into the pending table.
807 *
808 * NOTE: a write lock must be held on snap->lock before calling
809 * this.
810 */
811 static struct dm_snap_pending_exception *
812 __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
813 {
814 struct dm_snap_exception *e;
815 struct dm_snap_pending_exception *pe;
816 chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
817
818 /*
819 * Is there a pending exception for this already ?
820 */
821 e = lookup_exception(&s->pending, chunk);
822 if (e) {
823 /* cast the exception to a pending exception */
824 pe = container_of(e, struct dm_snap_pending_exception, e);
825 goto out;
826 }
827
828 /*
829 * Create a new pending exception, we don't want
830 * to hold the lock while we do this.
831 */
832 up_write(&s->lock);
833 pe = alloc_pending_exception();
834 down_write(&s->lock);
835
836 if (!s->valid) {
837 free_pending_exception(pe);
838 return NULL;
839 }
840
841 e = lookup_exception(&s->pending, chunk);
842 if (e) {
843 free_pending_exception(pe);
844 pe = container_of(e, struct dm_snap_pending_exception, e);
845 goto out;
846 }
847
848 pe->e.old_chunk = chunk;
849 bio_list_init(&pe->origin_bios);
850 bio_list_init(&pe->snapshot_bios);
851 pe->primary_pe = NULL;
852 atomic_set(&pe->ref_count, 0);
853 pe->snap = s;
854 pe->started = 0;
855
856 if (s->store.prepare_exception(&s->store, &pe->e)) {
857 free_pending_exception(pe);
858 return NULL;
859 }
860
861 get_pending_exception(pe);
862 insert_exception(&s->pending, &pe->e);
863
864 out:
865 return pe;
866 }
867
868 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
869 struct bio *bio)
870 {
871 bio->bi_bdev = s->cow->bdev;
872 bio->bi_sector = chunk_to_sector(s, e->new_chunk) +
873 (bio->bi_sector & s->chunk_mask);
874 }
875
876 static int snapshot_map(struct dm_target *ti, struct bio *bio,
877 union map_info *map_context)
878 {
879 struct dm_snap_exception *e;
880 struct dm_snapshot *s = ti->private;
881 int r = DM_MAPIO_REMAPPED;
882 chunk_t chunk;
883 struct dm_snap_pending_exception *pe = NULL;
884
885 chunk = sector_to_chunk(s, bio->bi_sector);
886
887 /* Full snapshots are not usable */
888 /* To get here the table must be live so s->active is always set. */
889 if (!s->valid)
890 return -EIO;
891
892 if (unlikely(bio_barrier(bio)))
893 return -EOPNOTSUPP;
894
895 /* FIXME: should only take write lock if we need
896 * to copy an exception */
897 down_write(&s->lock);
898
899 if (!s->valid) {
900 r = -EIO;
901 goto out_unlock;
902 }
903
904 /* If the block is already remapped - use that, else remap it */
905 e = lookup_exception(&s->complete, chunk);
906 if (e) {
907 remap_exception(s, e, bio);
908 goto out_unlock;
909 }
910
911 /*
912 * Write to snapshot - higher level takes care of RW/RO
913 * flags so we should only get this if we are
914 * writeable.
915 */
916 if (bio_rw(bio) == WRITE) {
917 pe = __find_pending_exception(s, bio);
918 if (!pe) {
919 __invalidate_snapshot(s, -ENOMEM);
920 r = -EIO;
921 goto out_unlock;
922 }
923
924 remap_exception(s, &pe->e, bio);
925 bio_list_add(&pe->snapshot_bios, bio);
926
927 r = DM_MAPIO_SUBMITTED;
928
929 if (!pe->started) {
930 /* this is protected by snap->lock */
931 pe->started = 1;
932 up_write(&s->lock);
933 start_copy(pe);
934 goto out;
935 }
936 } else
937 /*
938 * FIXME: this read path scares me because we
939 * always use the origin when we have a pending
940 * exception. However I can't think of a
941 * situation where this is wrong - ejt.
942 */
943 bio->bi_bdev = s->origin->bdev;
944
945 out_unlock:
946 up_write(&s->lock);
947 out:
948 return r;
949 }
950
951 static void snapshot_resume(struct dm_target *ti)
952 {
953 struct dm_snapshot *s = ti->private;
954
955 down_write(&s->lock);
956 s->active = 1;
957 up_write(&s->lock);
958 }
959
960 static int snapshot_status(struct dm_target *ti, status_type_t type,
961 char *result, unsigned int maxlen)
962 {
963 struct dm_snapshot *snap = ti->private;
964
965 switch (type) {
966 case STATUSTYPE_INFO:
967 if (!snap->valid)
968 snprintf(result, maxlen, "Invalid");
969 else {
970 if (snap->store.fraction_full) {
971 sector_t numerator, denominator;
972 snap->store.fraction_full(&snap->store,
973 &numerator,
974 &denominator);
975 snprintf(result, maxlen, "%llu/%llu",
976 (unsigned long long)numerator,
977 (unsigned long long)denominator);
978 }
979 else
980 snprintf(result, maxlen, "Unknown");
981 }
982 break;
983
984 case STATUSTYPE_TABLE:
985 /*
986 * kdevname returns a static pointer so we need
987 * to make private copies if the output is to
988 * make sense.
989 */
990 snprintf(result, maxlen, "%s %s %c %llu",
991 snap->origin->name, snap->cow->name,
992 snap->type,
993 (unsigned long long)snap->chunk_size);
994 break;
995 }
996
997 return 0;
998 }
999
1000 /*-----------------------------------------------------------------
1001 * Origin methods
1002 *---------------------------------------------------------------*/
1003 static int __origin_write(struct list_head *snapshots, struct bio *bio)
1004 {
1005 int r = DM_MAPIO_REMAPPED, first = 0;
1006 struct dm_snapshot *snap;
1007 struct dm_snap_exception *e;
1008 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1009 chunk_t chunk;
1010 LIST_HEAD(pe_queue);
1011
1012 /* Do all the snapshots on this origin */
1013 list_for_each_entry (snap, snapshots, list) {
1014
1015 down_write(&snap->lock);
1016
1017 /* Only deal with valid and active snapshots */
1018 if (!snap->valid || !snap->active)
1019 goto next_snapshot;
1020
1021 /* Nothing to do if writing beyond end of snapshot */
1022 if (bio->bi_sector >= dm_table_get_size(snap->table))
1023 goto next_snapshot;
1024
1025 /*
1026 * Remember, different snapshots can have
1027 * different chunk sizes.
1028 */
1029 chunk = sector_to_chunk(snap, bio->bi_sector);
1030
1031 /*
1032 * Check exception table to see if block
1033 * is already remapped in this snapshot
1034 * and trigger an exception if not.
1035 *
1036 * ref_count is initialised to 1 so pending_complete()
1037 * won't destroy the primary_pe while we're inside this loop.
1038 */
1039 e = lookup_exception(&snap->complete, chunk);
1040 if (e)
1041 goto next_snapshot;
1042
1043 pe = __find_pending_exception(snap, bio);
1044 if (!pe) {
1045 __invalidate_snapshot(snap, -ENOMEM);
1046 goto next_snapshot;
1047 }
1048
1049 if (!primary_pe) {
1050 /*
1051 * Either every pe here has same
1052 * primary_pe or none has one yet.
1053 */
1054 if (pe->primary_pe)
1055 primary_pe = pe->primary_pe;
1056 else {
1057 primary_pe = pe;
1058 first = 1;
1059 }
1060
1061 bio_list_add(&primary_pe->origin_bios, bio);
1062
1063 r = DM_MAPIO_SUBMITTED;
1064 }
1065
1066 if (!pe->primary_pe) {
1067 pe->primary_pe = primary_pe;
1068 get_pending_exception(primary_pe);
1069 }
1070
1071 if (!pe->started) {
1072 pe->started = 1;
1073 list_add_tail(&pe->list, &pe_queue);
1074 }
1075
1076 next_snapshot:
1077 up_write(&snap->lock);
1078 }
1079
1080 if (!primary_pe)
1081 return r;
1082
1083 /*
1084 * If this is the first time we're processing this chunk and
1085 * ref_count is now 1 it means all the pending exceptions
1086 * got completed while we were in the loop above, so it falls to
1087 * us here to remove the primary_pe and submit any origin_bios.
1088 */
1089
1090 if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1091 flush_bios(bio_list_get(&primary_pe->origin_bios));
1092 free_pending_exception(primary_pe);
1093 /* If we got here, pe_queue is necessarily empty. */
1094 return r;
1095 }
1096
1097 /*
1098 * Now that we have a complete pe list we can start the copying.
1099 */
1100 list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1101 start_copy(pe);
1102
1103 return r;
1104 }
1105
1106 /*
1107 * Called on a write from the origin driver.
1108 */
1109 static int do_origin(struct dm_dev *origin, struct bio *bio)
1110 {
1111 struct origin *o;
1112 int r = DM_MAPIO_REMAPPED;
1113
1114 down_read(&_origins_lock);
1115 o = __lookup_origin(origin->bdev);
1116 if (o)
1117 r = __origin_write(&o->snapshots, bio);
1118 up_read(&_origins_lock);
1119
1120 return r;
1121 }
1122
1123 /*
1124 * Origin: maps a linear range of a device, with hooks for snapshotting.
1125 */
1126
1127 /*
1128 * Construct an origin mapping: <dev_path>
1129 * The context for an origin is merely a 'struct dm_dev *'
1130 * pointing to the real device.
1131 */
1132 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1133 {
1134 int r;
1135 struct dm_dev *dev;
1136
1137 if (argc != 1) {
1138 ti->error = "origin: incorrect number of arguments";
1139 return -EINVAL;
1140 }
1141
1142 r = dm_get_device(ti, argv[0], 0, ti->len,
1143 dm_table_get_mode(ti->table), &dev);
1144 if (r) {
1145 ti->error = "Cannot get target device";
1146 return r;
1147 }
1148
1149 ti->private = dev;
1150 return 0;
1151 }
1152
1153 static void origin_dtr(struct dm_target *ti)
1154 {
1155 struct dm_dev *dev = ti->private;
1156 dm_put_device(ti, dev);
1157 }
1158
1159 static int origin_map(struct dm_target *ti, struct bio *bio,
1160 union map_info *map_context)
1161 {
1162 struct dm_dev *dev = ti->private;
1163 bio->bi_bdev = dev->bdev;
1164
1165 if (unlikely(bio_barrier(bio)))
1166 return -EOPNOTSUPP;
1167
1168 /* Only tell snapshots if this is a write */
1169 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1170 }
1171
1172 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1173
1174 /*
1175 * Set the target "split_io" field to the minimum of all the snapshots'
1176 * chunk sizes.
1177 */
1178 static void origin_resume(struct dm_target *ti)
1179 {
1180 struct dm_dev *dev = ti->private;
1181 struct dm_snapshot *snap;
1182 struct origin *o;
1183 chunk_t chunk_size = 0;
1184
1185 down_read(&_origins_lock);
1186 o = __lookup_origin(dev->bdev);
1187 if (o)
1188 list_for_each_entry (snap, &o->snapshots, list)
1189 chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1190 up_read(&_origins_lock);
1191
1192 ti->split_io = chunk_size;
1193 }
1194
1195 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1196 unsigned int maxlen)
1197 {
1198 struct dm_dev *dev = ti->private;
1199
1200 switch (type) {
1201 case STATUSTYPE_INFO:
1202 result[0] = '\0';
1203 break;
1204
1205 case STATUSTYPE_TABLE:
1206 snprintf(result, maxlen, "%s", dev->name);
1207 break;
1208 }
1209
1210 return 0;
1211 }
1212
1213 static struct target_type origin_target = {
1214 .name = "snapshot-origin",
1215 .version = {1, 5, 0},
1216 .module = THIS_MODULE,
1217 .ctr = origin_ctr,
1218 .dtr = origin_dtr,
1219 .map = origin_map,
1220 .resume = origin_resume,
1221 .status = origin_status,
1222 };
1223
1224 static struct target_type snapshot_target = {
1225 .name = "snapshot",
1226 .version = {1, 5, 0},
1227 .module = THIS_MODULE,
1228 .ctr = snapshot_ctr,
1229 .dtr = snapshot_dtr,
1230 .map = snapshot_map,
1231 .resume = snapshot_resume,
1232 .status = snapshot_status,
1233 };
1234
1235 static int __init dm_snapshot_init(void)
1236 {
1237 int r;
1238
1239 r = dm_register_target(&snapshot_target);
1240 if (r) {
1241 DMERR("snapshot target register failed %d", r);
1242 return r;
1243 }
1244
1245 r = dm_register_target(&origin_target);
1246 if (r < 0) {
1247 DMERR("Origin target register failed %d", r);
1248 goto bad1;
1249 }
1250
1251 r = init_origin_hash();
1252 if (r) {
1253 DMERR("init_origin_hash failed.");
1254 goto bad2;
1255 }
1256
1257 exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1258 if (!exception_cache) {
1259 DMERR("Couldn't create exception cache.");
1260 r = -ENOMEM;
1261 goto bad3;
1262 }
1263
1264 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1265 if (!pending_cache) {
1266 DMERR("Couldn't create pending cache.");
1267 r = -ENOMEM;
1268 goto bad4;
1269 }
1270
1271 pending_pool = mempool_create_slab_pool(128, pending_cache);
1272 if (!pending_pool) {
1273 DMERR("Couldn't create pending pool.");
1274 r = -ENOMEM;
1275 goto bad5;
1276 }
1277
1278 ksnapd = create_singlethread_workqueue("ksnapd");
1279 if (!ksnapd) {
1280 DMERR("Failed to create ksnapd workqueue.");
1281 r = -ENOMEM;
1282 goto bad6;
1283 }
1284
1285 return 0;
1286
1287 bad6:
1288 mempool_destroy(pending_pool);
1289 bad5:
1290 kmem_cache_destroy(pending_cache);
1291 bad4:
1292 kmem_cache_destroy(exception_cache);
1293 bad3:
1294 exit_origin_hash();
1295 bad2:
1296 dm_unregister_target(&origin_target);
1297 bad1:
1298 dm_unregister_target(&snapshot_target);
1299 return r;
1300 }
1301
1302 static void __exit dm_snapshot_exit(void)
1303 {
1304 int r;
1305
1306 destroy_workqueue(ksnapd);
1307
1308 r = dm_unregister_target(&snapshot_target);
1309 if (r)
1310 DMERR("snapshot unregister failed %d", r);
1311
1312 r = dm_unregister_target(&origin_target);
1313 if (r)
1314 DMERR("origin unregister failed %d", r);
1315
1316 exit_origin_hash();
1317 mempool_destroy(pending_pool);
1318 kmem_cache_destroy(pending_cache);
1319 kmem_cache_destroy(exception_cache);
1320 }
1321
1322 /* Module hooks */
1323 module_init(dm_snapshot_init);
1324 module_exit(dm_snapshot_exit);
1325
1326 MODULE_DESCRIPTION(DM_NAME " snapshot target");
1327 MODULE_AUTHOR("Joe Thornber");
1328 MODULE_LICENSE("GPL");