]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * dm-snapshot.c | |
3 | * | |
4 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
5 | * | |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
9 | #include <linux/blkdev.h> | |
1da177e4 LT |
10 | #include <linux/ctype.h> |
11 | #include <linux/device-mapper.h> | |
90fa1527 | 12 | #include <linux/delay.h> |
1da177e4 LT |
13 | #include <linux/fs.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/kdev_t.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/vmalloc.h> | |
6f3c3f0a | 21 | #include <linux/log2.h> |
a765e20e | 22 | #include <linux/dm-kcopyd.h> |
1da177e4 | 23 | |
aea53d92 | 24 | #include "dm-exception-store.h" |
1da177e4 LT |
25 | #include "dm-snap.h" |
26 | #include "dm-bio-list.h" | |
1da177e4 | 27 | |
72d94861 AK |
28 | #define DM_MSG_PREFIX "snapshots" |
29 | ||
1da177e4 LT |
30 | /* |
31 | * The percentage increment we will wake up users at | |
32 | */ | |
33 | #define WAKE_UP_PERCENT 5 | |
34 | ||
35 | /* | |
36 | * kcopyd priority of snapshot operations | |
37 | */ | |
38 | #define SNAPSHOT_COPY_PRIORITY 2 | |
39 | ||
40 | /* | |
8ee2767a | 41 | * Reserve 1MB for each snapshot initially (with minimum of 1 page). |
1da177e4 | 42 | */ |
8ee2767a | 43 | #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) |
1da177e4 | 44 | |
cd45daff MP |
45 | /* |
46 | * The size of the mempool used to track chunks in use. | |
47 | */ | |
48 | #define MIN_IOS 256 | |
49 | ||
c642f9e0 | 50 | static struct workqueue_struct *ksnapd; |
c4028958 | 51 | static void flush_queued_bios(struct work_struct *work); |
ca3a931f | 52 | |
028867ac AK |
53 | struct dm_snap_pending_exception { |
54 | struct dm_snap_exception e; | |
1da177e4 LT |
55 | |
56 | /* | |
57 | * Origin buffers waiting for this to complete are held | |
58 | * in a bio list | |
59 | */ | |
60 | struct bio_list origin_bios; | |
61 | struct bio_list snapshot_bios; | |
62 | ||
eccf0817 AK |
63 | /* |
64 | * Short-term queue of pending exceptions prior to submission. | |
65 | */ | |
66 | struct list_head list; | |
67 | ||
1da177e4 | 68 | /* |
b4b610f6 | 69 | * The primary pending_exception is the one that holds |
4b832e8d | 70 | * the ref_count and the list of origin_bios for a |
b4b610f6 AK |
71 | * group of pending_exceptions. It is always last to get freed. |
72 | * These fields get set up when writing to the origin. | |
1da177e4 | 73 | */ |
028867ac | 74 | struct dm_snap_pending_exception *primary_pe; |
b4b610f6 AK |
75 | |
76 | /* | |
77 | * Number of pending_exceptions processing this chunk. | |
78 | * When this drops to zero we must complete the origin bios. | |
79 | * If incrementing or decrementing this, hold pe->snap->lock for | |
80 | * the sibling concerned and not pe->primary_pe->snap->lock unless | |
81 | * they are the same. | |
82 | */ | |
4b832e8d | 83 | atomic_t ref_count; |
1da177e4 LT |
84 | |
85 | /* Pointer back to snapshot context */ | |
86 | struct dm_snapshot *snap; | |
87 | ||
88 | /* | |
89 | * 1 indicates the exception has already been sent to | |
90 | * kcopyd. | |
91 | */ | |
92 | int started; | |
93 | }; | |
94 | ||
95 | /* | |
96 | * Hash table mapping origin volumes to lists of snapshots and | |
97 | * a lock to protect it | |
98 | */ | |
e18b890b CL |
99 | static struct kmem_cache *exception_cache; |
100 | static struct kmem_cache *pending_cache; | |
1da177e4 | 101 | |
cd45daff MP |
102 | struct dm_snap_tracked_chunk { |
103 | struct hlist_node node; | |
104 | chunk_t chunk; | |
105 | }; | |
106 | ||
107 | static struct kmem_cache *tracked_chunk_cache; | |
108 | ||
109 | static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, | |
110 | chunk_t chunk) | |
111 | { | |
112 | struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, | |
113 | GFP_NOIO); | |
114 | unsigned long flags; | |
115 | ||
116 | c->chunk = chunk; | |
117 | ||
118 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
119 | hlist_add_head(&c->node, | |
120 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); | |
121 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
122 | ||
123 | return c; | |
124 | } | |
125 | ||
126 | static void stop_tracking_chunk(struct dm_snapshot *s, | |
127 | struct dm_snap_tracked_chunk *c) | |
128 | { | |
129 | unsigned long flags; | |
130 | ||
131 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
132 | hlist_del(&c->node); | |
133 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
134 | ||
135 | mempool_free(c, s->tracked_chunk_pool); | |
136 | } | |
137 | ||
a8d41b59 MP |
138 | static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) |
139 | { | |
140 | struct dm_snap_tracked_chunk *c; | |
141 | struct hlist_node *hn; | |
142 | int found = 0; | |
143 | ||
144 | spin_lock_irq(&s->tracked_chunk_lock); | |
145 | ||
146 | hlist_for_each_entry(c, hn, | |
147 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { | |
148 | if (c->chunk == chunk) { | |
149 | found = 1; | |
150 | break; | |
151 | } | |
152 | } | |
153 | ||
154 | spin_unlock_irq(&s->tracked_chunk_lock); | |
155 | ||
156 | return found; | |
157 | } | |
158 | ||
1da177e4 LT |
159 | /* |
160 | * One of these per registered origin, held in the snapshot_origins hash | |
161 | */ | |
162 | struct origin { | |
163 | /* The origin device */ | |
164 | struct block_device *bdev; | |
165 | ||
166 | struct list_head hash_list; | |
167 | ||
168 | /* List of snapshots for this origin */ | |
169 | struct list_head snapshots; | |
170 | }; | |
171 | ||
172 | /* | |
173 | * Size of the hash table for origin volumes. If we make this | |
174 | * the size of the minors list then it should be nearly perfect | |
175 | */ | |
176 | #define ORIGIN_HASH_SIZE 256 | |
177 | #define ORIGIN_MASK 0xFF | |
178 | static struct list_head *_origins; | |
179 | static struct rw_semaphore _origins_lock; | |
180 | ||
181 | static int init_origin_hash(void) | |
182 | { | |
183 | int i; | |
184 | ||
185 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | |
186 | GFP_KERNEL); | |
187 | if (!_origins) { | |
72d94861 | 188 | DMERR("unable to allocate memory"); |
1da177e4 LT |
189 | return -ENOMEM; |
190 | } | |
191 | ||
192 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | |
193 | INIT_LIST_HEAD(_origins + i); | |
194 | init_rwsem(&_origins_lock); | |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
199 | static void exit_origin_hash(void) | |
200 | { | |
201 | kfree(_origins); | |
202 | } | |
203 | ||
028867ac | 204 | static unsigned origin_hash(struct block_device *bdev) |
1da177e4 LT |
205 | { |
206 | return bdev->bd_dev & ORIGIN_MASK; | |
207 | } | |
208 | ||
209 | static struct origin *__lookup_origin(struct block_device *origin) | |
210 | { | |
211 | struct list_head *ol; | |
212 | struct origin *o; | |
213 | ||
214 | ol = &_origins[origin_hash(origin)]; | |
215 | list_for_each_entry (o, ol, hash_list) | |
216 | if (bdev_equal(o->bdev, origin)) | |
217 | return o; | |
218 | ||
219 | return NULL; | |
220 | } | |
221 | ||
222 | static void __insert_origin(struct origin *o) | |
223 | { | |
224 | struct list_head *sl = &_origins[origin_hash(o->bdev)]; | |
225 | list_add_tail(&o->hash_list, sl); | |
226 | } | |
227 | ||
228 | /* | |
229 | * Make a note of the snapshot and its origin so we can look it | |
230 | * up when the origin has a write on it. | |
231 | */ | |
232 | static int register_snapshot(struct dm_snapshot *snap) | |
233 | { | |
60c856c8 | 234 | struct origin *o, *new_o; |
1da177e4 LT |
235 | struct block_device *bdev = snap->origin->bdev; |
236 | ||
60c856c8 MP |
237 | new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); |
238 | if (!new_o) | |
239 | return -ENOMEM; | |
240 | ||
1da177e4 LT |
241 | down_write(&_origins_lock); |
242 | o = __lookup_origin(bdev); | |
243 | ||
60c856c8 MP |
244 | if (o) |
245 | kfree(new_o); | |
246 | else { | |
1da177e4 | 247 | /* New origin */ |
60c856c8 | 248 | o = new_o; |
1da177e4 LT |
249 | |
250 | /* Initialise the struct */ | |
251 | INIT_LIST_HEAD(&o->snapshots); | |
252 | o->bdev = bdev; | |
253 | ||
254 | __insert_origin(o); | |
255 | } | |
256 | ||
257 | list_add_tail(&snap->list, &o->snapshots); | |
258 | ||
259 | up_write(&_origins_lock); | |
260 | return 0; | |
261 | } | |
262 | ||
263 | static void unregister_snapshot(struct dm_snapshot *s) | |
264 | { | |
265 | struct origin *o; | |
266 | ||
267 | down_write(&_origins_lock); | |
268 | o = __lookup_origin(s->origin->bdev); | |
269 | ||
270 | list_del(&s->list); | |
271 | if (list_empty(&o->snapshots)) { | |
272 | list_del(&o->hash_list); | |
273 | kfree(o); | |
274 | } | |
275 | ||
276 | up_write(&_origins_lock); | |
277 | } | |
278 | ||
279 | /* | |
280 | * Implementation of the exception hash tables. | |
d74f81f8 MB |
281 | * The lowest hash_shift bits of the chunk number are ignored, allowing |
282 | * some consecutive chunks to be grouped together. | |
1da177e4 | 283 | */ |
d74f81f8 MB |
284 | static int init_exception_table(struct exception_table *et, uint32_t size, |
285 | unsigned hash_shift) | |
1da177e4 LT |
286 | { |
287 | unsigned int i; | |
288 | ||
d74f81f8 | 289 | et->hash_shift = hash_shift; |
1da177e4 LT |
290 | et->hash_mask = size - 1; |
291 | et->table = dm_vcalloc(size, sizeof(struct list_head)); | |
292 | if (!et->table) | |
293 | return -ENOMEM; | |
294 | ||
295 | for (i = 0; i < size; i++) | |
296 | INIT_LIST_HEAD(et->table + i); | |
297 | ||
298 | return 0; | |
299 | } | |
300 | ||
e18b890b | 301 | static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) |
1da177e4 LT |
302 | { |
303 | struct list_head *slot; | |
028867ac | 304 | struct dm_snap_exception *ex, *next; |
1da177e4 LT |
305 | int i, size; |
306 | ||
307 | size = et->hash_mask + 1; | |
308 | for (i = 0; i < size; i++) { | |
309 | slot = et->table + i; | |
310 | ||
311 | list_for_each_entry_safe (ex, next, slot, hash_list) | |
312 | kmem_cache_free(mem, ex); | |
313 | } | |
314 | ||
315 | vfree(et->table); | |
316 | } | |
317 | ||
028867ac | 318 | static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) |
1da177e4 | 319 | { |
d74f81f8 | 320 | return (chunk >> et->hash_shift) & et->hash_mask; |
1da177e4 LT |
321 | } |
322 | ||
028867ac AK |
323 | static void insert_exception(struct exception_table *eh, |
324 | struct dm_snap_exception *e) | |
1da177e4 LT |
325 | { |
326 | struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; | |
327 | list_add(&e->hash_list, l); | |
328 | } | |
329 | ||
028867ac | 330 | static void remove_exception(struct dm_snap_exception *e) |
1da177e4 LT |
331 | { |
332 | list_del(&e->hash_list); | |
333 | } | |
334 | ||
335 | /* | |
336 | * Return the exception data for a sector, or NULL if not | |
337 | * remapped. | |
338 | */ | |
028867ac AK |
339 | static struct dm_snap_exception *lookup_exception(struct exception_table *et, |
340 | chunk_t chunk) | |
1da177e4 LT |
341 | { |
342 | struct list_head *slot; | |
028867ac | 343 | struct dm_snap_exception *e; |
1da177e4 LT |
344 | |
345 | slot = &et->table[exception_hash(et, chunk)]; | |
346 | list_for_each_entry (e, slot, hash_list) | |
d74f81f8 MB |
347 | if (chunk >= e->old_chunk && |
348 | chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) | |
1da177e4 LT |
349 | return e; |
350 | ||
351 | return NULL; | |
352 | } | |
353 | ||
028867ac | 354 | static struct dm_snap_exception *alloc_exception(void) |
1da177e4 | 355 | { |
028867ac | 356 | struct dm_snap_exception *e; |
1da177e4 LT |
357 | |
358 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); | |
359 | if (!e) | |
360 | e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); | |
361 | ||
362 | return e; | |
363 | } | |
364 | ||
028867ac | 365 | static void free_exception(struct dm_snap_exception *e) |
1da177e4 LT |
366 | { |
367 | kmem_cache_free(exception_cache, e); | |
368 | } | |
369 | ||
92e86812 | 370 | static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) |
1da177e4 | 371 | { |
92e86812 MP |
372 | struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, |
373 | GFP_NOIO); | |
374 | ||
879129d2 | 375 | atomic_inc(&s->pending_exceptions_count); |
92e86812 MP |
376 | pe->snap = s; |
377 | ||
378 | return pe; | |
1da177e4 LT |
379 | } |
380 | ||
028867ac | 381 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
1da177e4 | 382 | { |
879129d2 MP |
383 | struct dm_snapshot *s = pe->snap; |
384 | ||
385 | mempool_free(pe, s->pending_pool); | |
386 | smp_mb__before_atomic_dec(); | |
387 | atomic_dec(&s->pending_exceptions_count); | |
1da177e4 LT |
388 | } |
389 | ||
d74f81f8 MB |
390 | static void insert_completed_exception(struct dm_snapshot *s, |
391 | struct dm_snap_exception *new_e) | |
392 | { | |
393 | struct exception_table *eh = &s->complete; | |
394 | struct list_head *l; | |
395 | struct dm_snap_exception *e = NULL; | |
396 | ||
397 | l = &eh->table[exception_hash(eh, new_e->old_chunk)]; | |
398 | ||
399 | /* Add immediately if this table doesn't support consecutive chunks */ | |
400 | if (!eh->hash_shift) | |
401 | goto out; | |
402 | ||
403 | /* List is ordered by old_chunk */ | |
404 | list_for_each_entry_reverse(e, l, hash_list) { | |
405 | /* Insert after an existing chunk? */ | |
406 | if (new_e->old_chunk == (e->old_chunk + | |
407 | dm_consecutive_chunk_count(e) + 1) && | |
408 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + | |
409 | dm_consecutive_chunk_count(e) + 1)) { | |
410 | dm_consecutive_chunk_count_inc(e); | |
411 | free_exception(new_e); | |
412 | return; | |
413 | } | |
414 | ||
415 | /* Insert before an existing chunk? */ | |
416 | if (new_e->old_chunk == (e->old_chunk - 1) && | |
417 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { | |
418 | dm_consecutive_chunk_count_inc(e); | |
419 | e->old_chunk--; | |
420 | e->new_chunk--; | |
421 | free_exception(new_e); | |
422 | return; | |
423 | } | |
424 | ||
425 | if (new_e->old_chunk > e->old_chunk) | |
426 | break; | |
427 | } | |
428 | ||
429 | out: | |
430 | list_add(&new_e->hash_list, e ? &e->hash_list : l); | |
431 | } | |
432 | ||
1da177e4 LT |
433 | int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) |
434 | { | |
028867ac | 435 | struct dm_snap_exception *e; |
1da177e4 LT |
436 | |
437 | e = alloc_exception(); | |
438 | if (!e) | |
439 | return -ENOMEM; | |
440 | ||
441 | e->old_chunk = old; | |
d74f81f8 MB |
442 | |
443 | /* Consecutive_count is implicitly initialised to zero */ | |
1da177e4 | 444 | e->new_chunk = new; |
d74f81f8 MB |
445 | |
446 | insert_completed_exception(s, e); | |
447 | ||
1da177e4 LT |
448 | return 0; |
449 | } | |
450 | ||
451 | /* | |
452 | * Hard coded magic. | |
453 | */ | |
454 | static int calc_max_buckets(void) | |
455 | { | |
456 | /* use a fixed size of 2MB */ | |
457 | unsigned long mem = 2 * 1024 * 1024; | |
458 | mem /= sizeof(struct list_head); | |
459 | ||
460 | return mem; | |
461 | } | |
462 | ||
1da177e4 LT |
463 | /* |
464 | * Allocate room for a suitable hash table. | |
465 | */ | |
466 | static int init_hash_tables(struct dm_snapshot *s) | |
467 | { | |
468 | sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; | |
469 | ||
470 | /* | |
471 | * Calculate based on the size of the original volume or | |
472 | * the COW volume... | |
473 | */ | |
474 | cow_dev_size = get_dev_size(s->cow->bdev); | |
475 | origin_dev_size = get_dev_size(s->origin->bdev); | |
476 | max_buckets = calc_max_buckets(); | |
477 | ||
478 | hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; | |
479 | hash_size = min(hash_size, max_buckets); | |
480 | ||
8defd830 | 481 | hash_size = rounddown_pow_of_two(hash_size); |
d74f81f8 MB |
482 | if (init_exception_table(&s->complete, hash_size, |
483 | DM_CHUNK_CONSECUTIVE_BITS)) | |
1da177e4 LT |
484 | return -ENOMEM; |
485 | ||
486 | /* | |
487 | * Allocate hash table for in-flight exceptions | |
488 | * Make this smaller than the real hash table | |
489 | */ | |
490 | hash_size >>= 3; | |
491 | if (hash_size < 64) | |
492 | hash_size = 64; | |
493 | ||
d74f81f8 | 494 | if (init_exception_table(&s->pending, hash_size, 0)) { |
1da177e4 LT |
495 | exit_exception_table(&s->complete, exception_cache); |
496 | return -ENOMEM; | |
497 | } | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
502 | /* | |
503 | * Round a number up to the nearest 'size' boundary. size must | |
504 | * be a power of 2. | |
505 | */ | |
028867ac | 506 | static ulong round_up(ulong n, ulong size) |
1da177e4 LT |
507 | { |
508 | size--; | |
509 | return (n + size) & ~size; | |
510 | } | |
511 | ||
4c7e3bf4 MM |
512 | static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, |
513 | char **error) | |
514 | { | |
515 | unsigned long chunk_size; | |
516 | char *value; | |
517 | ||
518 | chunk_size = simple_strtoul(chunk_size_arg, &value, 10); | |
519 | if (*chunk_size_arg == '\0' || *value != '\0') { | |
520 | *error = "Invalid chunk size"; | |
521 | return -EINVAL; | |
522 | } | |
523 | ||
524 | if (!chunk_size) { | |
525 | s->chunk_size = s->chunk_mask = s->chunk_shift = 0; | |
526 | return 0; | |
527 | } | |
528 | ||
529 | /* | |
530 | * Chunk size must be multiple of page size. Silently | |
531 | * round up if it's not. | |
532 | */ | |
533 | chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); | |
534 | ||
535 | /* Check chunk_size is a power of 2 */ | |
6f3c3f0a | 536 | if (!is_power_of_2(chunk_size)) { |
4c7e3bf4 MM |
537 | *error = "Chunk size is not a power of 2"; |
538 | return -EINVAL; | |
539 | } | |
540 | ||
541 | /* Validate the chunk size against the device block size */ | |
542 | if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) { | |
543 | *error = "Chunk size is not a multiple of device blocksize"; | |
544 | return -EINVAL; | |
545 | } | |
546 | ||
547 | s->chunk_size = chunk_size; | |
548 | s->chunk_mask = chunk_size - 1; | |
549 | s->chunk_shift = ffs(chunk_size) - 1; | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
1da177e4 LT |
554 | /* |
555 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> | |
556 | */ | |
557 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
558 | { | |
559 | struct dm_snapshot *s; | |
cd45daff | 560 | int i; |
1da177e4 LT |
561 | int r = -EINVAL; |
562 | char persistent; | |
563 | char *origin_path; | |
564 | char *cow_path; | |
1da177e4 | 565 | |
4c7e3bf4 | 566 | if (argc != 4) { |
72d94861 | 567 | ti->error = "requires exactly 4 arguments"; |
1da177e4 LT |
568 | r = -EINVAL; |
569 | goto bad1; | |
570 | } | |
571 | ||
572 | origin_path = argv[0]; | |
573 | cow_path = argv[1]; | |
574 | persistent = toupper(*argv[2]); | |
575 | ||
576 | if (persistent != 'P' && persistent != 'N') { | |
577 | ti->error = "Persistent flag is not P or N"; | |
578 | r = -EINVAL; | |
579 | goto bad1; | |
580 | } | |
581 | ||
1da177e4 LT |
582 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
583 | if (s == NULL) { | |
584 | ti->error = "Cannot allocate snapshot context private " | |
585 | "structure"; | |
586 | r = -ENOMEM; | |
587 | goto bad1; | |
588 | } | |
589 | ||
590 | r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); | |
591 | if (r) { | |
592 | ti->error = "Cannot get origin device"; | |
593 | goto bad2; | |
594 | } | |
595 | ||
596 | r = dm_get_device(ti, cow_path, 0, 0, | |
597 | FMODE_READ | FMODE_WRITE, &s->cow); | |
598 | if (r) { | |
599 | dm_put_device(ti, s->origin); | |
600 | ti->error = "Cannot get COW device"; | |
601 | goto bad2; | |
602 | } | |
603 | ||
4c7e3bf4 MM |
604 | r = set_chunk_size(s, argv[3], &ti->error); |
605 | if (r) | |
1da177e4 | 606 | goto bad3; |
1da177e4 | 607 | |
1da177e4 | 608 | s->type = persistent; |
1da177e4 LT |
609 | |
610 | s->valid = 1; | |
aa14edeb | 611 | s->active = 0; |
879129d2 | 612 | atomic_set(&s->pending_exceptions_count, 0); |
1da177e4 | 613 | init_rwsem(&s->lock); |
ca3a931f | 614 | spin_lock_init(&s->pe_lock); |
72727bad | 615 | s->ti = ti; |
1da177e4 LT |
616 | |
617 | /* Allocate hash table for COW data */ | |
618 | if (init_hash_tables(s)) { | |
619 | ti->error = "Unable to allocate hash table space"; | |
620 | r = -ENOMEM; | |
621 | goto bad3; | |
622 | } | |
623 | ||
1da177e4 LT |
624 | s->store.snap = s; |
625 | ||
626 | if (persistent == 'P') | |
4c7e3bf4 | 627 | r = dm_create_persistent(&s->store); |
1da177e4 | 628 | else |
4c7e3bf4 | 629 | r = dm_create_transient(&s->store); |
1da177e4 LT |
630 | |
631 | if (r) { | |
632 | ti->error = "Couldn't create exception store"; | |
633 | r = -EINVAL; | |
634 | goto bad4; | |
635 | } | |
636 | ||
eb69aca5 | 637 | r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); |
1da177e4 LT |
638 | if (r) { |
639 | ti->error = "Could not create kcopyd client"; | |
640 | goto bad5; | |
641 | } | |
642 | ||
92e86812 MP |
643 | s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); |
644 | if (!s->pending_pool) { | |
645 | ti->error = "Could not allocate mempool for pending exceptions"; | |
646 | goto bad6; | |
647 | } | |
648 | ||
cd45daff MP |
649 | s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, |
650 | tracked_chunk_cache); | |
651 | if (!s->tracked_chunk_pool) { | |
652 | ti->error = "Could not allocate tracked_chunk mempool for " | |
653 | "tracking reads"; | |
92e86812 | 654 | goto bad_tracked_chunk_pool; |
cd45daff MP |
655 | } |
656 | ||
657 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
658 | INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); | |
659 | ||
660 | spin_lock_init(&s->tracked_chunk_lock); | |
661 | ||
aa14edeb | 662 | /* Metadata must only be loaded into one table at once */ |
f9cea4f7 | 663 | r = s->store.read_metadata(&s->store); |
0764147b | 664 | if (r < 0) { |
f9cea4f7 | 665 | ti->error = "Failed to read snapshot metadata"; |
cd45daff | 666 | goto bad_load_and_register; |
0764147b MB |
667 | } else if (r > 0) { |
668 | s->valid = 0; | |
669 | DMWARN("Snapshot is marked invalid."); | |
f9cea4f7 | 670 | } |
aa14edeb | 671 | |
ca3a931f | 672 | bio_list_init(&s->queued_bios); |
c4028958 | 673 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); |
ca3a931f | 674 | |
1da177e4 | 675 | /* Add snapshot to the list of snapshots for this origin */ |
aa14edeb | 676 | /* Exceptions aren't triggered till snapshot_resume() is called */ |
1da177e4 LT |
677 | if (register_snapshot(s)) { |
678 | r = -EINVAL; | |
679 | ti->error = "Cannot register snapshot origin"; | |
cd45daff | 680 | goto bad_load_and_register; |
1da177e4 LT |
681 | } |
682 | ||
683 | ti->private = s; | |
c51c2752 | 684 | ti->split_io = s->chunk_size; |
1da177e4 LT |
685 | |
686 | return 0; | |
687 | ||
cd45daff MP |
688 | bad_load_and_register: |
689 | mempool_destroy(s->tracked_chunk_pool); | |
690 | ||
92e86812 MP |
691 | bad_tracked_chunk_pool: |
692 | mempool_destroy(s->pending_pool); | |
693 | ||
1da177e4 | 694 | bad6: |
eb69aca5 | 695 | dm_kcopyd_client_destroy(s->kcopyd_client); |
1da177e4 LT |
696 | |
697 | bad5: | |
698 | s->store.destroy(&s->store); | |
699 | ||
700 | bad4: | |
701 | exit_exception_table(&s->pending, pending_cache); | |
702 | exit_exception_table(&s->complete, exception_cache); | |
703 | ||
704 | bad3: | |
705 | dm_put_device(ti, s->cow); | |
706 | dm_put_device(ti, s->origin); | |
707 | ||
708 | bad2: | |
709 | kfree(s); | |
710 | ||
711 | bad1: | |
712 | return r; | |
713 | } | |
714 | ||
31c93a0c MB |
715 | static void __free_exceptions(struct dm_snapshot *s) |
716 | { | |
eb69aca5 | 717 | dm_kcopyd_client_destroy(s->kcopyd_client); |
31c93a0c MB |
718 | s->kcopyd_client = NULL; |
719 | ||
720 | exit_exception_table(&s->pending, pending_cache); | |
721 | exit_exception_table(&s->complete, exception_cache); | |
722 | ||
723 | s->store.destroy(&s->store); | |
724 | } | |
725 | ||
1da177e4 LT |
726 | static void snapshot_dtr(struct dm_target *ti) |
727 | { | |
cd45daff MP |
728 | #ifdef CONFIG_DM_DEBUG |
729 | int i; | |
730 | #endif | |
028867ac | 731 | struct dm_snapshot *s = ti->private; |
1da177e4 | 732 | |
ca3a931f AK |
733 | flush_workqueue(ksnapd); |
734 | ||
138728dc AK |
735 | /* Prevent further origin writes from using this snapshot. */ |
736 | /* After this returns there can be no new kcopyd jobs. */ | |
1da177e4 LT |
737 | unregister_snapshot(s); |
738 | ||
879129d2 | 739 | while (atomic_read(&s->pending_exceptions_count)) |
90fa1527 | 740 | msleep(1); |
879129d2 MP |
741 | /* |
742 | * Ensure instructions in mempool_destroy aren't reordered | |
743 | * before atomic_read. | |
744 | */ | |
745 | smp_mb(); | |
746 | ||
cd45daff MP |
747 | #ifdef CONFIG_DM_DEBUG |
748 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
749 | BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); | |
750 | #endif | |
751 | ||
752 | mempool_destroy(s->tracked_chunk_pool); | |
753 | ||
31c93a0c | 754 | __free_exceptions(s); |
1da177e4 | 755 | |
92e86812 MP |
756 | mempool_destroy(s->pending_pool); |
757 | ||
1da177e4 LT |
758 | dm_put_device(ti, s->origin); |
759 | dm_put_device(ti, s->cow); | |
138728dc | 760 | |
1da177e4 LT |
761 | kfree(s); |
762 | } | |
763 | ||
764 | /* | |
765 | * Flush a list of buffers. | |
766 | */ | |
767 | static void flush_bios(struct bio *bio) | |
768 | { | |
769 | struct bio *n; | |
770 | ||
771 | while (bio) { | |
772 | n = bio->bi_next; | |
773 | bio->bi_next = NULL; | |
774 | generic_make_request(bio); | |
775 | bio = n; | |
776 | } | |
777 | } | |
778 | ||
c4028958 | 779 | static void flush_queued_bios(struct work_struct *work) |
ca3a931f | 780 | { |
c4028958 DH |
781 | struct dm_snapshot *s = |
782 | container_of(work, struct dm_snapshot, queued_bios_work); | |
ca3a931f AK |
783 | struct bio *queued_bios; |
784 | unsigned long flags; | |
785 | ||
786 | spin_lock_irqsave(&s->pe_lock, flags); | |
787 | queued_bios = bio_list_get(&s->queued_bios); | |
788 | spin_unlock_irqrestore(&s->pe_lock, flags); | |
789 | ||
790 | flush_bios(queued_bios); | |
791 | } | |
792 | ||
1da177e4 LT |
793 | /* |
794 | * Error a list of buffers. | |
795 | */ | |
796 | static void error_bios(struct bio *bio) | |
797 | { | |
798 | struct bio *n; | |
799 | ||
800 | while (bio) { | |
801 | n = bio->bi_next; | |
802 | bio->bi_next = NULL; | |
6712ecf8 | 803 | bio_io_error(bio); |
1da177e4 LT |
804 | bio = n; |
805 | } | |
806 | } | |
807 | ||
695368ac | 808 | static void __invalidate_snapshot(struct dm_snapshot *s, int err) |
76df1c65 AK |
809 | { |
810 | if (!s->valid) | |
811 | return; | |
812 | ||
813 | if (err == -EIO) | |
814 | DMERR("Invalidating snapshot: Error reading/writing."); | |
815 | else if (err == -ENOMEM) | |
816 | DMERR("Invalidating snapshot: Unable to allocate exception."); | |
817 | ||
76df1c65 AK |
818 | if (s->store.drop_snapshot) |
819 | s->store.drop_snapshot(&s->store); | |
820 | ||
821 | s->valid = 0; | |
822 | ||
72727bad | 823 | dm_table_event(s->ti->table); |
76df1c65 AK |
824 | } |
825 | ||
028867ac | 826 | static void get_pending_exception(struct dm_snap_pending_exception *pe) |
4b832e8d AK |
827 | { |
828 | atomic_inc(&pe->ref_count); | |
829 | } | |
830 | ||
028867ac | 831 | static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) |
4b832e8d | 832 | { |
028867ac | 833 | struct dm_snap_pending_exception *primary_pe; |
4b832e8d AK |
834 | struct bio *origin_bios = NULL; |
835 | ||
836 | primary_pe = pe->primary_pe; | |
837 | ||
838 | /* | |
839 | * If this pe is involved in a write to the origin and | |
840 | * it is the last sibling to complete then release | |
841 | * the bios for the original write to the origin. | |
842 | */ | |
843 | if (primary_pe && | |
7c5f78b9 | 844 | atomic_dec_and_test(&primary_pe->ref_count)) { |
4b832e8d | 845 | origin_bios = bio_list_get(&primary_pe->origin_bios); |
7c5f78b9 MP |
846 | free_pending_exception(primary_pe); |
847 | } | |
4b832e8d AK |
848 | |
849 | /* | |
850 | * Free the pe if it's not linked to an origin write or if | |
851 | * it's not itself a primary pe. | |
852 | */ | |
853 | if (!primary_pe || primary_pe != pe) | |
854 | free_pending_exception(pe); | |
855 | ||
4b832e8d AK |
856 | return origin_bios; |
857 | } | |
858 | ||
028867ac | 859 | static void pending_complete(struct dm_snap_pending_exception *pe, int success) |
1da177e4 | 860 | { |
028867ac | 861 | struct dm_snap_exception *e; |
1da177e4 | 862 | struct dm_snapshot *s = pe->snap; |
9d493fa8 AK |
863 | struct bio *origin_bios = NULL; |
864 | struct bio *snapshot_bios = NULL; | |
865 | int error = 0; | |
1da177e4 | 866 | |
76df1c65 AK |
867 | if (!success) { |
868 | /* Read/write error - snapshot is unusable */ | |
1da177e4 | 869 | down_write(&s->lock); |
695368ac | 870 | __invalidate_snapshot(s, -EIO); |
9d493fa8 | 871 | error = 1; |
76df1c65 AK |
872 | goto out; |
873 | } | |
874 | ||
875 | e = alloc_exception(); | |
876 | if (!e) { | |
1da177e4 | 877 | down_write(&s->lock); |
695368ac | 878 | __invalidate_snapshot(s, -ENOMEM); |
9d493fa8 | 879 | error = 1; |
76df1c65 AK |
880 | goto out; |
881 | } | |
882 | *e = pe->e; | |
1da177e4 | 883 | |
76df1c65 AK |
884 | down_write(&s->lock); |
885 | if (!s->valid) { | |
76df1c65 | 886 | free_exception(e); |
9d493fa8 | 887 | error = 1; |
76df1c65 | 888 | goto out; |
1da177e4 LT |
889 | } |
890 | ||
a8d41b59 MP |
891 | /* |
892 | * Check for conflicting reads. This is extremely improbable, | |
90fa1527 | 893 | * so msleep(1) is sufficient and there is no need for a wait queue. |
a8d41b59 MP |
894 | */ |
895 | while (__chunk_is_tracked(s, pe->e.old_chunk)) | |
90fa1527 | 896 | msleep(1); |
a8d41b59 | 897 | |
9d493fa8 AK |
898 | /* |
899 | * Add a proper exception, and remove the | |
900 | * in-flight exception from the list. | |
901 | */ | |
d74f81f8 | 902 | insert_completed_exception(s, e); |
76df1c65 | 903 | |
1da177e4 | 904 | out: |
695368ac | 905 | remove_exception(&pe->e); |
9d493fa8 | 906 | snapshot_bios = bio_list_get(&pe->snapshot_bios); |
4b832e8d | 907 | origin_bios = put_pending_exception(pe); |
1da177e4 | 908 | |
9d493fa8 AK |
909 | up_write(&s->lock); |
910 | ||
911 | /* Submit any pending write bios */ | |
912 | if (error) | |
913 | error_bios(snapshot_bios); | |
914 | else | |
915 | flush_bios(snapshot_bios); | |
916 | ||
917 | flush_bios(origin_bios); | |
1da177e4 LT |
918 | } |
919 | ||
920 | static void commit_callback(void *context, int success) | |
921 | { | |
028867ac AK |
922 | struct dm_snap_pending_exception *pe = context; |
923 | ||
1da177e4 LT |
924 | pending_complete(pe, success); |
925 | } | |
926 | ||
927 | /* | |
928 | * Called when the copy I/O has finished. kcopyd actually runs | |
929 | * this code so don't block. | |
930 | */ | |
4cdc1d1f | 931 | static void copy_callback(int read_err, unsigned long write_err, void *context) |
1da177e4 | 932 | { |
028867ac | 933 | struct dm_snap_pending_exception *pe = context; |
1da177e4 LT |
934 | struct dm_snapshot *s = pe->snap; |
935 | ||
936 | if (read_err || write_err) | |
937 | pending_complete(pe, 0); | |
938 | ||
939 | else | |
940 | /* Update the metadata if we are persistent */ | |
941 | s->store.commit_exception(&s->store, &pe->e, commit_callback, | |
942 | pe); | |
943 | } | |
944 | ||
945 | /* | |
946 | * Dispatches the copy operation to kcopyd. | |
947 | */ | |
028867ac | 948 | static void start_copy(struct dm_snap_pending_exception *pe) |
1da177e4 LT |
949 | { |
950 | struct dm_snapshot *s = pe->snap; | |
22a1ceb1 | 951 | struct dm_io_region src, dest; |
1da177e4 LT |
952 | struct block_device *bdev = s->origin->bdev; |
953 | sector_t dev_size; | |
954 | ||
955 | dev_size = get_dev_size(bdev); | |
956 | ||
957 | src.bdev = bdev; | |
958 | src.sector = chunk_to_sector(s, pe->e.old_chunk); | |
959 | src.count = min(s->chunk_size, dev_size - src.sector); | |
960 | ||
961 | dest.bdev = s->cow->bdev; | |
962 | dest.sector = chunk_to_sector(s, pe->e.new_chunk); | |
963 | dest.count = src.count; | |
964 | ||
965 | /* Hand over to kcopyd */ | |
eb69aca5 | 966 | dm_kcopyd_copy(s->kcopyd_client, |
1da177e4 LT |
967 | &src, 1, &dest, 0, copy_callback, pe); |
968 | } | |
969 | ||
970 | /* | |
971 | * Looks to see if this snapshot already has a pending exception | |
972 | * for this chunk, otherwise it allocates a new one and inserts | |
973 | * it into the pending table. | |
974 | * | |
975 | * NOTE: a write lock must be held on snap->lock before calling | |
976 | * this. | |
977 | */ | |
028867ac | 978 | static struct dm_snap_pending_exception * |
1da177e4 LT |
979 | __find_pending_exception(struct dm_snapshot *s, struct bio *bio) |
980 | { | |
028867ac AK |
981 | struct dm_snap_exception *e; |
982 | struct dm_snap_pending_exception *pe; | |
1da177e4 LT |
983 | chunk_t chunk = sector_to_chunk(s, bio->bi_sector); |
984 | ||
985 | /* | |
986 | * Is there a pending exception for this already ? | |
987 | */ | |
988 | e = lookup_exception(&s->pending, chunk); | |
989 | if (e) { | |
990 | /* cast the exception to a pending exception */ | |
028867ac | 991 | pe = container_of(e, struct dm_snap_pending_exception, e); |
76df1c65 AK |
992 | goto out; |
993 | } | |
1da177e4 | 994 | |
76df1c65 AK |
995 | /* |
996 | * Create a new pending exception, we don't want | |
997 | * to hold the lock while we do this. | |
998 | */ | |
999 | up_write(&s->lock); | |
92e86812 | 1000 | pe = alloc_pending_exception(s); |
76df1c65 | 1001 | down_write(&s->lock); |
1da177e4 | 1002 | |
76df1c65 AK |
1003 | if (!s->valid) { |
1004 | free_pending_exception(pe); | |
1005 | return NULL; | |
1006 | } | |
1da177e4 | 1007 | |
76df1c65 AK |
1008 | e = lookup_exception(&s->pending, chunk); |
1009 | if (e) { | |
1010 | free_pending_exception(pe); | |
028867ac | 1011 | pe = container_of(e, struct dm_snap_pending_exception, e); |
76df1c65 | 1012 | goto out; |
1da177e4 LT |
1013 | } |
1014 | ||
76df1c65 AK |
1015 | pe->e.old_chunk = chunk; |
1016 | bio_list_init(&pe->origin_bios); | |
1017 | bio_list_init(&pe->snapshot_bios); | |
1018 | pe->primary_pe = NULL; | |
4b832e8d | 1019 | atomic_set(&pe->ref_count, 0); |
76df1c65 AK |
1020 | pe->started = 0; |
1021 | ||
1022 | if (s->store.prepare_exception(&s->store, &pe->e)) { | |
1023 | free_pending_exception(pe); | |
1024 | return NULL; | |
1025 | } | |
1026 | ||
4b832e8d | 1027 | get_pending_exception(pe); |
76df1c65 AK |
1028 | insert_exception(&s->pending, &pe->e); |
1029 | ||
1030 | out: | |
1da177e4 LT |
1031 | return pe; |
1032 | } | |
1033 | ||
028867ac | 1034 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, |
d74f81f8 | 1035 | struct bio *bio, chunk_t chunk) |
1da177e4 LT |
1036 | { |
1037 | bio->bi_bdev = s->cow->bdev; | |
d74f81f8 MB |
1038 | bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) + |
1039 | (chunk - e->old_chunk)) + | |
1040 | (bio->bi_sector & s->chunk_mask); | |
1da177e4 LT |
1041 | } |
1042 | ||
1043 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | |
1044 | union map_info *map_context) | |
1045 | { | |
028867ac AK |
1046 | struct dm_snap_exception *e; |
1047 | struct dm_snapshot *s = ti->private; | |
d2a7ad29 | 1048 | int r = DM_MAPIO_REMAPPED; |
1da177e4 | 1049 | chunk_t chunk; |
028867ac | 1050 | struct dm_snap_pending_exception *pe = NULL; |
1da177e4 LT |
1051 | |
1052 | chunk = sector_to_chunk(s, bio->bi_sector); | |
1053 | ||
1054 | /* Full snapshots are not usable */ | |
76df1c65 | 1055 | /* To get here the table must be live so s->active is always set. */ |
1da177e4 | 1056 | if (!s->valid) |
f6a80ea8 | 1057 | return -EIO; |
1da177e4 | 1058 | |
ba40a2aa AK |
1059 | /* FIXME: should only take write lock if we need |
1060 | * to copy an exception */ | |
1061 | down_write(&s->lock); | |
1062 | ||
1063 | if (!s->valid) { | |
1064 | r = -EIO; | |
1065 | goto out_unlock; | |
1066 | } | |
1067 | ||
1068 | /* If the block is already remapped - use that, else remap it */ | |
1069 | e = lookup_exception(&s->complete, chunk); | |
1070 | if (e) { | |
d74f81f8 | 1071 | remap_exception(s, e, bio, chunk); |
ba40a2aa AK |
1072 | goto out_unlock; |
1073 | } | |
1074 | ||
1da177e4 LT |
1075 | /* |
1076 | * Write to snapshot - higher level takes care of RW/RO | |
1077 | * flags so we should only get this if we are | |
1078 | * writeable. | |
1079 | */ | |
1080 | if (bio_rw(bio) == WRITE) { | |
76df1c65 AK |
1081 | pe = __find_pending_exception(s, bio); |
1082 | if (!pe) { | |
695368ac | 1083 | __invalidate_snapshot(s, -ENOMEM); |
76df1c65 AK |
1084 | r = -EIO; |
1085 | goto out_unlock; | |
1da177e4 LT |
1086 | } |
1087 | ||
d74f81f8 | 1088 | remap_exception(s, &pe->e, bio, chunk); |
76df1c65 AK |
1089 | bio_list_add(&pe->snapshot_bios, bio); |
1090 | ||
d2a7ad29 | 1091 | r = DM_MAPIO_SUBMITTED; |
ba40a2aa | 1092 | |
76df1c65 AK |
1093 | if (!pe->started) { |
1094 | /* this is protected by snap->lock */ | |
1095 | pe->started = 1; | |
ba40a2aa | 1096 | up_write(&s->lock); |
76df1c65 | 1097 | start_copy(pe); |
ba40a2aa AK |
1098 | goto out; |
1099 | } | |
cd45daff | 1100 | } else { |
ba40a2aa | 1101 | bio->bi_bdev = s->origin->bdev; |
cd45daff MP |
1102 | map_context->ptr = track_chunk(s, chunk); |
1103 | } | |
1da177e4 | 1104 | |
ba40a2aa AK |
1105 | out_unlock: |
1106 | up_write(&s->lock); | |
1107 | out: | |
1da177e4 LT |
1108 | return r; |
1109 | } | |
1110 | ||
cd45daff MP |
1111 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, |
1112 | int error, union map_info *map_context) | |
1113 | { | |
1114 | struct dm_snapshot *s = ti->private; | |
1115 | struct dm_snap_tracked_chunk *c = map_context->ptr; | |
1116 | ||
1117 | if (c) | |
1118 | stop_tracking_chunk(s, c); | |
1119 | ||
1120 | return 0; | |
1121 | } | |
1122 | ||
1da177e4 LT |
1123 | static void snapshot_resume(struct dm_target *ti) |
1124 | { | |
028867ac | 1125 | struct dm_snapshot *s = ti->private; |
1da177e4 | 1126 | |
aa14edeb AK |
1127 | down_write(&s->lock); |
1128 | s->active = 1; | |
1129 | up_write(&s->lock); | |
1da177e4 LT |
1130 | } |
1131 | ||
1132 | static int snapshot_status(struct dm_target *ti, status_type_t type, | |
1133 | char *result, unsigned int maxlen) | |
1134 | { | |
028867ac | 1135 | struct dm_snapshot *snap = ti->private; |
1da177e4 LT |
1136 | |
1137 | switch (type) { | |
1138 | case STATUSTYPE_INFO: | |
1139 | if (!snap->valid) | |
1140 | snprintf(result, maxlen, "Invalid"); | |
1141 | else { | |
1142 | if (snap->store.fraction_full) { | |
1143 | sector_t numerator, denominator; | |
1144 | snap->store.fraction_full(&snap->store, | |
1145 | &numerator, | |
1146 | &denominator); | |
4ee218cd AM |
1147 | snprintf(result, maxlen, "%llu/%llu", |
1148 | (unsigned long long)numerator, | |
1149 | (unsigned long long)denominator); | |
1da177e4 LT |
1150 | } |
1151 | else | |
1152 | snprintf(result, maxlen, "Unknown"); | |
1153 | } | |
1154 | break; | |
1155 | ||
1156 | case STATUSTYPE_TABLE: | |
1157 | /* | |
1158 | * kdevname returns a static pointer so we need | |
1159 | * to make private copies if the output is to | |
1160 | * make sense. | |
1161 | */ | |
4ee218cd | 1162 | snprintf(result, maxlen, "%s %s %c %llu", |
1da177e4 | 1163 | snap->origin->name, snap->cow->name, |
4ee218cd AM |
1164 | snap->type, |
1165 | (unsigned long long)snap->chunk_size); | |
1da177e4 LT |
1166 | break; |
1167 | } | |
1168 | ||
1169 | return 0; | |
1170 | } | |
1171 | ||
1172 | /*----------------------------------------------------------------- | |
1173 | * Origin methods | |
1174 | *---------------------------------------------------------------*/ | |
1da177e4 LT |
1175 | static int __origin_write(struct list_head *snapshots, struct bio *bio) |
1176 | { | |
d2a7ad29 | 1177 | int r = DM_MAPIO_REMAPPED, first = 0; |
1da177e4 | 1178 | struct dm_snapshot *snap; |
028867ac AK |
1179 | struct dm_snap_exception *e; |
1180 | struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; | |
1da177e4 | 1181 | chunk_t chunk; |
eccf0817 | 1182 | LIST_HEAD(pe_queue); |
1da177e4 LT |
1183 | |
1184 | /* Do all the snapshots on this origin */ | |
1185 | list_for_each_entry (snap, snapshots, list) { | |
1186 | ||
76df1c65 AK |
1187 | down_write(&snap->lock); |
1188 | ||
aa14edeb AK |
1189 | /* Only deal with valid and active snapshots */ |
1190 | if (!snap->valid || !snap->active) | |
76df1c65 | 1191 | goto next_snapshot; |
1da177e4 | 1192 | |
d5e404c1 | 1193 | /* Nothing to do if writing beyond end of snapshot */ |
72727bad | 1194 | if (bio->bi_sector >= dm_table_get_size(snap->ti->table)) |
76df1c65 | 1195 | goto next_snapshot; |
1da177e4 LT |
1196 | |
1197 | /* | |
1198 | * Remember, different snapshots can have | |
1199 | * different chunk sizes. | |
1200 | */ | |
1201 | chunk = sector_to_chunk(snap, bio->bi_sector); | |
1202 | ||
1203 | /* | |
1204 | * Check exception table to see if block | |
1205 | * is already remapped in this snapshot | |
1206 | * and trigger an exception if not. | |
b4b610f6 | 1207 | * |
4b832e8d | 1208 | * ref_count is initialised to 1 so pending_complete() |
b4b610f6 | 1209 | * won't destroy the primary_pe while we're inside this loop. |
1da177e4 LT |
1210 | */ |
1211 | e = lookup_exception(&snap->complete, chunk); | |
76df1c65 AK |
1212 | if (e) |
1213 | goto next_snapshot; | |
1214 | ||
1215 | pe = __find_pending_exception(snap, bio); | |
1216 | if (!pe) { | |
695368ac | 1217 | __invalidate_snapshot(snap, -ENOMEM); |
76df1c65 AK |
1218 | goto next_snapshot; |
1219 | } | |
1220 | ||
1221 | if (!primary_pe) { | |
1222 | /* | |
1223 | * Either every pe here has same | |
1224 | * primary_pe or none has one yet. | |
1225 | */ | |
1226 | if (pe->primary_pe) | |
1227 | primary_pe = pe->primary_pe; | |
1228 | else { | |
1229 | primary_pe = pe; | |
1230 | first = 1; | |
1da177e4 | 1231 | } |
76df1c65 AK |
1232 | |
1233 | bio_list_add(&primary_pe->origin_bios, bio); | |
1234 | ||
d2a7ad29 | 1235 | r = DM_MAPIO_SUBMITTED; |
76df1c65 AK |
1236 | } |
1237 | ||
1238 | if (!pe->primary_pe) { | |
76df1c65 | 1239 | pe->primary_pe = primary_pe; |
4b832e8d | 1240 | get_pending_exception(primary_pe); |
76df1c65 AK |
1241 | } |
1242 | ||
1243 | if (!pe->started) { | |
1244 | pe->started = 1; | |
1245 | list_add_tail(&pe->list, &pe_queue); | |
1da177e4 LT |
1246 | } |
1247 | ||
76df1c65 | 1248 | next_snapshot: |
1da177e4 LT |
1249 | up_write(&snap->lock); |
1250 | } | |
1251 | ||
b4b610f6 | 1252 | if (!primary_pe) |
4b832e8d | 1253 | return r; |
b4b610f6 AK |
1254 | |
1255 | /* | |
1256 | * If this is the first time we're processing this chunk and | |
4b832e8d | 1257 | * ref_count is now 1 it means all the pending exceptions |
b4b610f6 AK |
1258 | * got completed while we were in the loop above, so it falls to |
1259 | * us here to remove the primary_pe and submit any origin_bios. | |
1260 | */ | |
1261 | ||
4b832e8d | 1262 | if (first && atomic_dec_and_test(&primary_pe->ref_count)) { |
b4b610f6 AK |
1263 | flush_bios(bio_list_get(&primary_pe->origin_bios)); |
1264 | free_pending_exception(primary_pe); | |
1265 | /* If we got here, pe_queue is necessarily empty. */ | |
4b832e8d | 1266 | return r; |
b4b610f6 AK |
1267 | } |
1268 | ||
1da177e4 LT |
1269 | /* |
1270 | * Now that we have a complete pe list we can start the copying. | |
1271 | */ | |
eccf0817 AK |
1272 | list_for_each_entry_safe(pe, next_pe, &pe_queue, list) |
1273 | start_copy(pe); | |
1da177e4 LT |
1274 | |
1275 | return r; | |
1276 | } | |
1277 | ||
1278 | /* | |
1279 | * Called on a write from the origin driver. | |
1280 | */ | |
1281 | static int do_origin(struct dm_dev *origin, struct bio *bio) | |
1282 | { | |
1283 | struct origin *o; | |
d2a7ad29 | 1284 | int r = DM_MAPIO_REMAPPED; |
1da177e4 LT |
1285 | |
1286 | down_read(&_origins_lock); | |
1287 | o = __lookup_origin(origin->bdev); | |
1288 | if (o) | |
1289 | r = __origin_write(&o->snapshots, bio); | |
1290 | up_read(&_origins_lock); | |
1291 | ||
1292 | return r; | |
1293 | } | |
1294 | ||
1295 | /* | |
1296 | * Origin: maps a linear range of a device, with hooks for snapshotting. | |
1297 | */ | |
1298 | ||
1299 | /* | |
1300 | * Construct an origin mapping: <dev_path> | |
1301 | * The context for an origin is merely a 'struct dm_dev *' | |
1302 | * pointing to the real device. | |
1303 | */ | |
1304 | static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1305 | { | |
1306 | int r; | |
1307 | struct dm_dev *dev; | |
1308 | ||
1309 | if (argc != 1) { | |
72d94861 | 1310 | ti->error = "origin: incorrect number of arguments"; |
1da177e4 LT |
1311 | return -EINVAL; |
1312 | } | |
1313 | ||
1314 | r = dm_get_device(ti, argv[0], 0, ti->len, | |
1315 | dm_table_get_mode(ti->table), &dev); | |
1316 | if (r) { | |
1317 | ti->error = "Cannot get target device"; | |
1318 | return r; | |
1319 | } | |
1320 | ||
1321 | ti->private = dev; | |
1322 | return 0; | |
1323 | } | |
1324 | ||
1325 | static void origin_dtr(struct dm_target *ti) | |
1326 | { | |
028867ac | 1327 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1328 | dm_put_device(ti, dev); |
1329 | } | |
1330 | ||
1331 | static int origin_map(struct dm_target *ti, struct bio *bio, | |
1332 | union map_info *map_context) | |
1333 | { | |
028867ac | 1334 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1335 | bio->bi_bdev = dev->bdev; |
1336 | ||
1337 | /* Only tell snapshots if this is a write */ | |
d2a7ad29 | 1338 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; |
1da177e4 LT |
1339 | } |
1340 | ||
1341 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | |
1342 | ||
1343 | /* | |
1344 | * Set the target "split_io" field to the minimum of all the snapshots' | |
1345 | * chunk sizes. | |
1346 | */ | |
1347 | static void origin_resume(struct dm_target *ti) | |
1348 | { | |
028867ac | 1349 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1350 | struct dm_snapshot *snap; |
1351 | struct origin *o; | |
1352 | chunk_t chunk_size = 0; | |
1353 | ||
1354 | down_read(&_origins_lock); | |
1355 | o = __lookup_origin(dev->bdev); | |
1356 | if (o) | |
1357 | list_for_each_entry (snap, &o->snapshots, list) | |
1358 | chunk_size = min_not_zero(chunk_size, snap->chunk_size); | |
1359 | up_read(&_origins_lock); | |
1360 | ||
1361 | ti->split_io = chunk_size; | |
1362 | } | |
1363 | ||
1364 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |
1365 | unsigned int maxlen) | |
1366 | { | |
028867ac | 1367 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1368 | |
1369 | switch (type) { | |
1370 | case STATUSTYPE_INFO: | |
1371 | result[0] = '\0'; | |
1372 | break; | |
1373 | ||
1374 | case STATUSTYPE_TABLE: | |
1375 | snprintf(result, maxlen, "%s", dev->name); | |
1376 | break; | |
1377 | } | |
1378 | ||
1379 | return 0; | |
1380 | } | |
1381 | ||
1382 | static struct target_type origin_target = { | |
1383 | .name = "snapshot-origin", | |
d74f81f8 | 1384 | .version = {1, 6, 0}, |
1da177e4 LT |
1385 | .module = THIS_MODULE, |
1386 | .ctr = origin_ctr, | |
1387 | .dtr = origin_dtr, | |
1388 | .map = origin_map, | |
1389 | .resume = origin_resume, | |
1390 | .status = origin_status, | |
1391 | }; | |
1392 | ||
1393 | static struct target_type snapshot_target = { | |
1394 | .name = "snapshot", | |
d74f81f8 | 1395 | .version = {1, 6, 0}, |
1da177e4 LT |
1396 | .module = THIS_MODULE, |
1397 | .ctr = snapshot_ctr, | |
1398 | .dtr = snapshot_dtr, | |
1399 | .map = snapshot_map, | |
cd45daff | 1400 | .end_io = snapshot_end_io, |
1da177e4 LT |
1401 | .resume = snapshot_resume, |
1402 | .status = snapshot_status, | |
1403 | }; | |
1404 | ||
1405 | static int __init dm_snapshot_init(void) | |
1406 | { | |
1407 | int r; | |
1408 | ||
4db6bfe0 AK |
1409 | r = dm_exception_store_init(); |
1410 | if (r) { | |
1411 | DMERR("Failed to initialize exception stores"); | |
1412 | return r; | |
1413 | } | |
1414 | ||
1da177e4 LT |
1415 | r = dm_register_target(&snapshot_target); |
1416 | if (r) { | |
1417 | DMERR("snapshot target register failed %d", r); | |
1418 | return r; | |
1419 | } | |
1420 | ||
1421 | r = dm_register_target(&origin_target); | |
1422 | if (r < 0) { | |
72d94861 | 1423 | DMERR("Origin target register failed %d", r); |
1da177e4 LT |
1424 | goto bad1; |
1425 | } | |
1426 | ||
1427 | r = init_origin_hash(); | |
1428 | if (r) { | |
1429 | DMERR("init_origin_hash failed."); | |
1430 | goto bad2; | |
1431 | } | |
1432 | ||
028867ac | 1433 | exception_cache = KMEM_CACHE(dm_snap_exception, 0); |
1da177e4 LT |
1434 | if (!exception_cache) { |
1435 | DMERR("Couldn't create exception cache."); | |
1436 | r = -ENOMEM; | |
1437 | goto bad3; | |
1438 | } | |
1439 | ||
028867ac | 1440 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
1da177e4 LT |
1441 | if (!pending_cache) { |
1442 | DMERR("Couldn't create pending cache."); | |
1443 | r = -ENOMEM; | |
1444 | goto bad4; | |
1445 | } | |
1446 | ||
cd45daff MP |
1447 | tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); |
1448 | if (!tracked_chunk_cache) { | |
1449 | DMERR("Couldn't create cache to track chunks in use."); | |
1450 | r = -ENOMEM; | |
1451 | goto bad5; | |
1452 | } | |
1453 | ||
ca3a931f AK |
1454 | ksnapd = create_singlethread_workqueue("ksnapd"); |
1455 | if (!ksnapd) { | |
1456 | DMERR("Failed to create ksnapd workqueue."); | |
1457 | r = -ENOMEM; | |
92e86812 | 1458 | goto bad_pending_pool; |
ca3a931f AK |
1459 | } |
1460 | ||
1da177e4 LT |
1461 | return 0; |
1462 | ||
4db6bfe0 | 1463 | bad_pending_pool: |
cd45daff | 1464 | kmem_cache_destroy(tracked_chunk_cache); |
4db6bfe0 | 1465 | bad5: |
1da177e4 | 1466 | kmem_cache_destroy(pending_cache); |
4db6bfe0 | 1467 | bad4: |
1da177e4 | 1468 | kmem_cache_destroy(exception_cache); |
4db6bfe0 | 1469 | bad3: |
1da177e4 | 1470 | exit_origin_hash(); |
4db6bfe0 | 1471 | bad2: |
1da177e4 | 1472 | dm_unregister_target(&origin_target); |
4db6bfe0 | 1473 | bad1: |
1da177e4 LT |
1474 | dm_unregister_target(&snapshot_target); |
1475 | return r; | |
1476 | } | |
1477 | ||
1478 | static void __exit dm_snapshot_exit(void) | |
1479 | { | |
ca3a931f AK |
1480 | destroy_workqueue(ksnapd); |
1481 | ||
10d3bd09 MP |
1482 | dm_unregister_target(&snapshot_target); |
1483 | dm_unregister_target(&origin_target); | |
1da177e4 LT |
1484 | |
1485 | exit_origin_hash(); | |
1da177e4 LT |
1486 | kmem_cache_destroy(pending_cache); |
1487 | kmem_cache_destroy(exception_cache); | |
cd45daff | 1488 | kmem_cache_destroy(tracked_chunk_cache); |
4db6bfe0 AK |
1489 | |
1490 | dm_exception_store_exit(); | |
1da177e4 LT |
1491 | } |
1492 | ||
1493 | /* Module hooks */ | |
1494 | module_init(dm_snapshot_init); | |
1495 | module_exit(dm_snapshot_exit); | |
1496 | ||
1497 | MODULE_DESCRIPTION(DM_NAME " snapshot target"); | |
1498 | MODULE_AUTHOR("Joe Thornber"); | |
1499 | MODULE_LICENSE("GPL"); |