]>
Commit | Line | Data |
---|---|---|
4db6bfe0 AK |
1 | /* |
2 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
3 | * Copyright (C) 2006-2008 Red Hat GmbH | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-exception-store.h" | |
4db6bfe0 AK |
9 | |
10 | #include <linux/mm.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/vmalloc.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/dm-io.h> | |
15 | ||
16 | #define DM_MSG_PREFIX "persistent snapshot" | |
17 | #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ | |
18 | ||
19 | /*----------------------------------------------------------------- | |
20 | * Persistent snapshots, by persistent we mean that the snapshot | |
21 | * will survive a reboot. | |
22 | *---------------------------------------------------------------*/ | |
23 | ||
24 | /* | |
25 | * We need to store a record of which parts of the origin have | |
26 | * been copied to the snapshot device. The snapshot code | |
27 | * requires that we copy exception chunks to chunk aligned areas | |
28 | * of the COW store. It makes sense therefore, to store the | |
29 | * metadata in chunk size blocks. | |
30 | * | |
31 | * There is no backward or forward compatibility implemented, | |
32 | * snapshots with different disk versions than the kernel will | |
33 | * not be usable. It is expected that "lvcreate" will blank out | |
34 | * the start of a fresh COW device before calling the snapshot | |
35 | * constructor. | |
36 | * | |
37 | * The first chunk of the COW device just contains the header. | |
38 | * After this there is a chunk filled with exception metadata, | |
39 | * followed by as many exception chunks as can fit in the | |
40 | * metadata areas. | |
41 | * | |
42 | * All on disk structures are in little-endian format. The end | |
43 | * of the exceptions info is indicated by an exception with a | |
44 | * new_chunk of 0, which is invalid since it would point to the | |
45 | * header chunk. | |
46 | */ | |
47 | ||
48 | /* | |
49 | * Magic for persistent snapshots: "SnAp" - Feeble isn't it. | |
50 | */ | |
51 | #define SNAP_MAGIC 0x70416e53 | |
52 | ||
53 | /* | |
54 | * The on-disk version of the metadata. | |
55 | */ | |
56 | #define SNAPSHOT_DISK_VERSION 1 | |
57 | ||
4454a621 MP |
58 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 |
59 | ||
4db6bfe0 AK |
60 | struct disk_header { |
61 | uint32_t magic; | |
62 | ||
63 | /* | |
64 | * Is this snapshot valid. There is no way of recovering | |
65 | * an invalid snapshot. | |
66 | */ | |
67 | uint32_t valid; | |
68 | ||
69 | /* | |
70 | * Simple, incrementing version. no backward | |
71 | * compatibility. | |
72 | */ | |
73 | uint32_t version; | |
74 | ||
75 | /* In sectors */ | |
76 | uint32_t chunk_size; | |
77 | }; | |
78 | ||
79 | struct disk_exception { | |
80 | uint64_t old_chunk; | |
81 | uint64_t new_chunk; | |
82 | }; | |
83 | ||
84 | struct commit_callback { | |
85 | void (*callback)(void *, int success); | |
86 | void *context; | |
87 | }; | |
88 | ||
89 | /* | |
90 | * The top level structure for a persistent exception store. | |
91 | */ | |
92 | struct pstore { | |
71fab00a | 93 | struct dm_exception_store *store; |
4db6bfe0 AK |
94 | int version; |
95 | int valid; | |
96 | uint32_t exceptions_per_area; | |
97 | ||
98 | /* | |
99 | * Now that we have an asynchronous kcopyd there is no | |
100 | * need for large chunk sizes, so it wont hurt to have a | |
101 | * whole chunks worth of metadata in memory at once. | |
102 | */ | |
103 | void *area; | |
104 | ||
105 | /* | |
106 | * An area of zeros used to clear the next area. | |
107 | */ | |
108 | void *zero_area; | |
109 | ||
61578dcd MP |
110 | /* |
111 | * An area used for header. The header can be written | |
112 | * concurrently with metadata (when invalidating the snapshot), | |
113 | * so it needs a separate buffer. | |
114 | */ | |
115 | void *header_area; | |
116 | ||
4db6bfe0 AK |
117 | /* |
118 | * Used to keep track of which metadata area the data in | |
119 | * 'chunk' refers to. | |
120 | */ | |
121 | chunk_t current_area; | |
122 | ||
123 | /* | |
124 | * The next free chunk for an exception. | |
4454a621 MP |
125 | * |
126 | * When creating exceptions, all the chunks here and above are | |
127 | * free. It holds the next chunk to be allocated. On rare | |
128 | * occasions (e.g. after a system crash) holes can be left in | |
129 | * the exception store because chunks can be committed out of | |
130 | * order. | |
131 | * | |
132 | * When merging exceptions, it does not necessarily mean all the | |
133 | * chunks here and above are free. It holds the value it would | |
134 | * have held if all chunks had been committed in order of | |
135 | * allocation. Consequently the value may occasionally be | |
136 | * slightly too low, but since it's only used for 'status' and | |
137 | * it can never reach its minimum value too early this doesn't | |
138 | * matter. | |
4db6bfe0 | 139 | */ |
4454a621 | 140 | |
4db6bfe0 AK |
141 | chunk_t next_free; |
142 | ||
143 | /* | |
144 | * The index of next free exception in the current | |
145 | * metadata area. | |
146 | */ | |
147 | uint32_t current_committed; | |
148 | ||
149 | atomic_t pending_count; | |
150 | uint32_t callback_count; | |
151 | struct commit_callback *callbacks; | |
152 | struct dm_io_client *io_client; | |
153 | ||
154 | struct workqueue_struct *metadata_wq; | |
155 | }; | |
156 | ||
4db6bfe0 AK |
157 | static int alloc_area(struct pstore *ps) |
158 | { | |
159 | int r = -ENOMEM; | |
160 | size_t len; | |
161 | ||
71fab00a | 162 | len = ps->store->chunk_size << SECTOR_SHIFT; |
4db6bfe0 AK |
163 | |
164 | /* | |
165 | * Allocate the chunk_size block of memory that will hold | |
166 | * a single metadata area. | |
167 | */ | |
168 | ps->area = vmalloc(len); | |
169 | if (!ps->area) | |
61578dcd | 170 | goto err_area; |
4db6bfe0 AK |
171 | |
172 | ps->zero_area = vmalloc(len); | |
61578dcd MP |
173 | if (!ps->zero_area) |
174 | goto err_zero_area; | |
4db6bfe0 AK |
175 | memset(ps->zero_area, 0, len); |
176 | ||
61578dcd MP |
177 | ps->header_area = vmalloc(len); |
178 | if (!ps->header_area) | |
179 | goto err_header_area; | |
180 | ||
4db6bfe0 | 181 | return 0; |
61578dcd MP |
182 | |
183 | err_header_area: | |
184 | vfree(ps->zero_area); | |
185 | ||
186 | err_zero_area: | |
187 | vfree(ps->area); | |
188 | ||
189 | err_area: | |
190 | return r; | |
4db6bfe0 AK |
191 | } |
192 | ||
193 | static void free_area(struct pstore *ps) | |
194 | { | |
a32079ce JB |
195 | if (ps->area) |
196 | vfree(ps->area); | |
4db6bfe0 | 197 | ps->area = NULL; |
a32079ce JB |
198 | |
199 | if (ps->zero_area) | |
200 | vfree(ps->zero_area); | |
4db6bfe0 | 201 | ps->zero_area = NULL; |
61578dcd MP |
202 | |
203 | if (ps->header_area) | |
204 | vfree(ps->header_area); | |
205 | ps->header_area = NULL; | |
4db6bfe0 AK |
206 | } |
207 | ||
208 | struct mdata_req { | |
209 | struct dm_io_region *where; | |
210 | struct dm_io_request *io_req; | |
211 | struct work_struct work; | |
212 | int result; | |
213 | }; | |
214 | ||
215 | static void do_metadata(struct work_struct *work) | |
216 | { | |
217 | struct mdata_req *req = container_of(work, struct mdata_req, work); | |
218 | ||
219 | req->result = dm_io(req->io_req, 1, req->where, NULL); | |
220 | } | |
221 | ||
222 | /* | |
223 | * Read or write a chunk aligned and sized block of data from a device. | |
224 | */ | |
02d2fd31 MP |
225 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, |
226 | int metadata) | |
4db6bfe0 AK |
227 | { |
228 | struct dm_io_region where = { | |
fc56f6fb | 229 | .bdev = dm_snap_cow(ps->store->snap)->bdev, |
71fab00a JB |
230 | .sector = ps->store->chunk_size * chunk, |
231 | .count = ps->store->chunk_size, | |
4db6bfe0 AK |
232 | }; |
233 | struct dm_io_request io_req = { | |
234 | .bi_rw = rw, | |
235 | .mem.type = DM_IO_VMA, | |
02d2fd31 | 236 | .mem.ptr.vma = area, |
4db6bfe0 AK |
237 | .client = ps->io_client, |
238 | .notify.fn = NULL, | |
239 | }; | |
240 | struct mdata_req req; | |
241 | ||
242 | if (!metadata) | |
243 | return dm_io(&io_req, 1, &where, NULL); | |
244 | ||
245 | req.where = &where; | |
246 | req.io_req = &io_req; | |
247 | ||
248 | /* | |
249 | * Issue the synchronous I/O from a different thread | |
250 | * to avoid generic_make_request recursion. | |
251 | */ | |
ca1cab37 | 252 | INIT_WORK_ONSTACK(&req.work, do_metadata); |
4db6bfe0 | 253 | queue_work(ps->metadata_wq, &req.work); |
239c8dd5 | 254 | flush_work(&req.work); |
4db6bfe0 AK |
255 | |
256 | return req.result; | |
257 | } | |
258 | ||
259 | /* | |
260 | * Convert a metadata area index to a chunk index. | |
261 | */ | |
262 | static chunk_t area_location(struct pstore *ps, chunk_t area) | |
263 | { | |
87c961cb | 264 | return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); |
4db6bfe0 AK |
265 | } |
266 | ||
267 | /* | |
268 | * Read or write a metadata area. Remembering to skip the first | |
269 | * chunk which holds the header. | |
270 | */ | |
271 | static int area_io(struct pstore *ps, int rw) | |
272 | { | |
273 | int r; | |
274 | chunk_t chunk; | |
275 | ||
276 | chunk = area_location(ps, ps->current_area); | |
277 | ||
02d2fd31 | 278 | r = chunk_io(ps, ps->area, chunk, rw, 0); |
4db6bfe0 AK |
279 | if (r) |
280 | return r; | |
281 | ||
282 | return 0; | |
283 | } | |
284 | ||
285 | static void zero_memory_area(struct pstore *ps) | |
286 | { | |
71fab00a | 287 | memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 AK |
288 | } |
289 | ||
290 | static int zero_disk_area(struct pstore *ps, chunk_t area) | |
291 | { | |
02d2fd31 | 292 | return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); |
4db6bfe0 AK |
293 | } |
294 | ||
295 | static int read_header(struct pstore *ps, int *new_snapshot) | |
296 | { | |
297 | int r; | |
298 | struct disk_header *dh; | |
df96eee6 | 299 | unsigned chunk_size; |
4db6bfe0 | 300 | int chunk_size_supplied = 1; |
ae0b7448 | 301 | char *chunk_err; |
4db6bfe0 AK |
302 | |
303 | /* | |
df96eee6 MP |
304 | * Use default chunk size (or logical_block_size, if larger) |
305 | * if none supplied | |
4db6bfe0 | 306 | */ |
71fab00a JB |
307 | if (!ps->store->chunk_size) { |
308 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, | |
fc56f6fb MS |
309 | bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> |
310 | bdev) >> 9); | |
71fab00a JB |
311 | ps->store->chunk_mask = ps->store->chunk_size - 1; |
312 | ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; | |
4db6bfe0 AK |
313 | chunk_size_supplied = 0; |
314 | } | |
315 | ||
bda8efec | 316 | ps->io_client = dm_io_client_create(); |
4db6bfe0 AK |
317 | if (IS_ERR(ps->io_client)) |
318 | return PTR_ERR(ps->io_client); | |
319 | ||
320 | r = alloc_area(ps); | |
321 | if (r) | |
322 | return r; | |
323 | ||
61578dcd | 324 | r = chunk_io(ps, ps->header_area, 0, READ, 1); |
4db6bfe0 AK |
325 | if (r) |
326 | goto bad; | |
327 | ||
61578dcd | 328 | dh = ps->header_area; |
4db6bfe0 AK |
329 | |
330 | if (le32_to_cpu(dh->magic) == 0) { | |
331 | *new_snapshot = 1; | |
332 | return 0; | |
333 | } | |
334 | ||
335 | if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { | |
336 | DMWARN("Invalid or corrupt snapshot"); | |
337 | r = -ENXIO; | |
338 | goto bad; | |
339 | } | |
340 | ||
341 | *new_snapshot = 0; | |
342 | ps->valid = le32_to_cpu(dh->valid); | |
343 | ps->version = le32_to_cpu(dh->version); | |
344 | chunk_size = le32_to_cpu(dh->chunk_size); | |
345 | ||
ae0b7448 | 346 | if (ps->store->chunk_size == chunk_size) |
4db6bfe0 AK |
347 | return 0; |
348 | ||
ae0b7448 | 349 | if (chunk_size_supplied) |
df96eee6 MP |
350 | DMWARN("chunk size %u in device metadata overrides " |
351 | "table chunk size of %u.", | |
352 | chunk_size, ps->store->chunk_size); | |
4db6bfe0 AK |
353 | |
354 | /* We had a bogus chunk_size. Fix stuff up. */ | |
355 | free_area(ps); | |
356 | ||
ae0b7448 MP |
357 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, |
358 | &chunk_err); | |
359 | if (r) { | |
df96eee6 MP |
360 | DMERR("invalid on-disk chunk size %u: %s.", |
361 | chunk_size, chunk_err); | |
ae0b7448 MP |
362 | return r; |
363 | } | |
4db6bfe0 | 364 | |
4db6bfe0 AK |
365 | r = alloc_area(ps); |
366 | return r; | |
367 | ||
368 | bad: | |
369 | free_area(ps); | |
370 | return r; | |
371 | } | |
372 | ||
373 | static int write_header(struct pstore *ps) | |
374 | { | |
375 | struct disk_header *dh; | |
376 | ||
61578dcd | 377 | memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 | 378 | |
61578dcd | 379 | dh = ps->header_area; |
4db6bfe0 AK |
380 | dh->magic = cpu_to_le32(SNAP_MAGIC); |
381 | dh->valid = cpu_to_le32(ps->valid); | |
382 | dh->version = cpu_to_le32(ps->version); | |
71fab00a | 383 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); |
4db6bfe0 | 384 | |
61578dcd | 385 | return chunk_io(ps, ps->header_area, 0, WRITE, 1); |
4db6bfe0 AK |
386 | } |
387 | ||
388 | /* | |
389 | * Access functions for the disk exceptions, these do the endian conversions. | |
390 | */ | |
391 | static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) | |
392 | { | |
393 | BUG_ON(index >= ps->exceptions_per_area); | |
394 | ||
395 | return ((struct disk_exception *) ps->area) + index; | |
396 | } | |
397 | ||
398 | static void read_exception(struct pstore *ps, | |
399 | uint32_t index, struct disk_exception *result) | |
400 | { | |
401 | struct disk_exception *e = get_exception(ps, index); | |
402 | ||
403 | /* copy it */ | |
404 | result->old_chunk = le64_to_cpu(e->old_chunk); | |
405 | result->new_chunk = le64_to_cpu(e->new_chunk); | |
406 | } | |
407 | ||
408 | static void write_exception(struct pstore *ps, | |
409 | uint32_t index, struct disk_exception *de) | |
410 | { | |
411 | struct disk_exception *e = get_exception(ps, index); | |
412 | ||
413 | /* copy it */ | |
414 | e->old_chunk = cpu_to_le64(de->old_chunk); | |
415 | e->new_chunk = cpu_to_le64(de->new_chunk); | |
416 | } | |
417 | ||
4454a621 MP |
418 | static void clear_exception(struct pstore *ps, uint32_t index) |
419 | { | |
420 | struct disk_exception *e = get_exception(ps, index); | |
421 | ||
422 | /* clear it */ | |
423 | e->old_chunk = 0; | |
424 | e->new_chunk = 0; | |
425 | } | |
426 | ||
4db6bfe0 AK |
427 | /* |
428 | * Registers the exceptions that are present in the current area. | |
429 | * 'full' is filled in to indicate if the area has been | |
430 | * filled. | |
431 | */ | |
a159c1ac JB |
432 | static int insert_exceptions(struct pstore *ps, |
433 | int (*callback)(void *callback_context, | |
434 | chunk_t old, chunk_t new), | |
435 | void *callback_context, | |
436 | int *full) | |
4db6bfe0 AK |
437 | { |
438 | int r; | |
439 | unsigned int i; | |
440 | struct disk_exception de; | |
441 | ||
442 | /* presume the area is full */ | |
443 | *full = 1; | |
444 | ||
445 | for (i = 0; i < ps->exceptions_per_area; i++) { | |
446 | read_exception(ps, i, &de); | |
447 | ||
448 | /* | |
449 | * If the new_chunk is pointing at the start of | |
450 | * the COW device, where the first metadata area | |
451 | * is we know that we've hit the end of the | |
452 | * exceptions. Therefore the area is not full. | |
453 | */ | |
454 | if (de.new_chunk == 0LL) { | |
455 | ps->current_committed = i; | |
456 | *full = 0; | |
457 | break; | |
458 | } | |
459 | ||
460 | /* | |
461 | * Keep track of the start of the free chunks. | |
462 | */ | |
463 | if (ps->next_free <= de.new_chunk) | |
464 | ps->next_free = de.new_chunk + 1; | |
465 | ||
466 | /* | |
467 | * Otherwise we add the exception to the snapshot. | |
468 | */ | |
a159c1ac | 469 | r = callback(callback_context, de.old_chunk, de.new_chunk); |
4db6bfe0 AK |
470 | if (r) |
471 | return r; | |
472 | } | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
a159c1ac JB |
477 | static int read_exceptions(struct pstore *ps, |
478 | int (*callback)(void *callback_context, chunk_t old, | |
479 | chunk_t new), | |
480 | void *callback_context) | |
4db6bfe0 AK |
481 | { |
482 | int r, full = 1; | |
483 | ||
484 | /* | |
485 | * Keeping reading chunks and inserting exceptions until | |
486 | * we find a partially full area. | |
487 | */ | |
488 | for (ps->current_area = 0; full; ps->current_area++) { | |
489 | r = area_io(ps, READ); | |
490 | if (r) | |
491 | return r; | |
492 | ||
a159c1ac | 493 | r = insert_exceptions(ps, callback, callback_context, &full); |
4db6bfe0 AK |
494 | if (r) |
495 | return r; | |
496 | } | |
497 | ||
498 | ps->current_area--; | |
499 | ||
500 | return 0; | |
501 | } | |
502 | ||
503 | static struct pstore *get_info(struct dm_exception_store *store) | |
504 | { | |
505 | return (struct pstore *) store->context; | |
506 | } | |
507 | ||
985903bb MS |
508 | static void persistent_usage(struct dm_exception_store *store, |
509 | sector_t *total_sectors, | |
510 | sector_t *sectors_allocated, | |
511 | sector_t *metadata_sectors) | |
4db6bfe0 | 512 | { |
985903bb MS |
513 | struct pstore *ps = get_info(store); |
514 | ||
515 | *sectors_allocated = ps->next_free * store->chunk_size; | |
fc56f6fb | 516 | *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); |
985903bb MS |
517 | |
518 | /* | |
519 | * First chunk is the fixed header. | |
520 | * Then there are (ps->current_area + 1) metadata chunks, each one | |
521 | * separated from the next by ps->exceptions_per_area data chunks. | |
522 | */ | |
4454a621 MP |
523 | *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * |
524 | store->chunk_size; | |
4db6bfe0 AK |
525 | } |
526 | ||
493df71c | 527 | static void persistent_dtr(struct dm_exception_store *store) |
4db6bfe0 AK |
528 | { |
529 | struct pstore *ps = get_info(store); | |
530 | ||
531 | destroy_workqueue(ps->metadata_wq); | |
a32079ce JB |
532 | |
533 | /* Created in read_header */ | |
534 | if (ps->io_client) | |
535 | dm_io_client_destroy(ps->io_client); | |
4db6bfe0 | 536 | free_area(ps); |
a32079ce JB |
537 | |
538 | /* Allocated in persistent_read_metadata */ | |
539 | if (ps->callbacks) | |
540 | vfree(ps->callbacks); | |
541 | ||
4db6bfe0 AK |
542 | kfree(ps); |
543 | } | |
544 | ||
a159c1ac JB |
545 | static int persistent_read_metadata(struct dm_exception_store *store, |
546 | int (*callback)(void *callback_context, | |
547 | chunk_t old, chunk_t new), | |
548 | void *callback_context) | |
4db6bfe0 AK |
549 | { |
550 | int r, uninitialized_var(new_snapshot); | |
551 | struct pstore *ps = get_info(store); | |
552 | ||
553 | /* | |
554 | * Read the snapshot header. | |
555 | */ | |
556 | r = read_header(ps, &new_snapshot); | |
557 | if (r) | |
558 | return r; | |
559 | ||
560 | /* | |
561 | * Now we know correct chunk_size, complete the initialisation. | |
562 | */ | |
71fab00a JB |
563 | ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / |
564 | sizeof(struct disk_exception); | |
4db6bfe0 AK |
565 | ps->callbacks = dm_vcalloc(ps->exceptions_per_area, |
566 | sizeof(*ps->callbacks)); | |
567 | if (!ps->callbacks) | |
568 | return -ENOMEM; | |
569 | ||
570 | /* | |
571 | * Do we need to setup a new snapshot ? | |
572 | */ | |
573 | if (new_snapshot) { | |
574 | r = write_header(ps); | |
575 | if (r) { | |
576 | DMWARN("write_header failed"); | |
577 | return r; | |
578 | } | |
579 | ||
580 | ps->current_area = 0; | |
581 | zero_memory_area(ps); | |
582 | r = zero_disk_area(ps, 0); | |
f5acc834 | 583 | if (r) |
4db6bfe0 | 584 | DMWARN("zero_disk_area(0) failed"); |
f5acc834 JB |
585 | return r; |
586 | } | |
587 | /* | |
588 | * Sanity checks. | |
589 | */ | |
590 | if (ps->version != SNAPSHOT_DISK_VERSION) { | |
591 | DMWARN("unable to handle snapshot disk version %d", | |
592 | ps->version); | |
593 | return -EINVAL; | |
594 | } | |
4db6bfe0 | 595 | |
f5acc834 JB |
596 | /* |
597 | * Metadata are valid, but snapshot is invalidated | |
598 | */ | |
599 | if (!ps->valid) | |
600 | return 1; | |
4db6bfe0 | 601 | |
f5acc834 JB |
602 | /* |
603 | * Read the metadata. | |
604 | */ | |
605 | r = read_exceptions(ps, callback, callback_context); | |
4db6bfe0 | 606 | |
f5acc834 | 607 | return r; |
4db6bfe0 AK |
608 | } |
609 | ||
a159c1ac | 610 | static int persistent_prepare_exception(struct dm_exception_store *store, |
1d4989c8 | 611 | struct dm_exception *e) |
4db6bfe0 AK |
612 | { |
613 | struct pstore *ps = get_info(store); | |
614 | uint32_t stride; | |
615 | chunk_t next_free; | |
fc56f6fb | 616 | sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); |
4db6bfe0 AK |
617 | |
618 | /* Is there enough room ? */ | |
d0216849 | 619 | if (size < ((ps->next_free + 1) * store->chunk_size)) |
4db6bfe0 AK |
620 | return -ENOSPC; |
621 | ||
622 | e->new_chunk = ps->next_free; | |
623 | ||
624 | /* | |
625 | * Move onto the next free pending, making sure to take | |
626 | * into account the location of the metadata chunks. | |
627 | */ | |
628 | stride = (ps->exceptions_per_area + 1); | |
629 | next_free = ++ps->next_free; | |
630 | if (sector_div(next_free, stride) == 1) | |
631 | ps->next_free++; | |
632 | ||
633 | atomic_inc(&ps->pending_count); | |
634 | return 0; | |
635 | } | |
636 | ||
a159c1ac | 637 | static void persistent_commit_exception(struct dm_exception_store *store, |
1d4989c8 | 638 | struct dm_exception *e, |
a159c1ac JB |
639 | void (*callback) (void *, int success), |
640 | void *callback_context) | |
4db6bfe0 AK |
641 | { |
642 | unsigned int i; | |
643 | struct pstore *ps = get_info(store); | |
644 | struct disk_exception de; | |
645 | struct commit_callback *cb; | |
646 | ||
647 | de.old_chunk = e->old_chunk; | |
648 | de.new_chunk = e->new_chunk; | |
649 | write_exception(ps, ps->current_committed++, &de); | |
650 | ||
651 | /* | |
652 | * Add the callback to the back of the array. This code | |
653 | * is the only place where the callback array is | |
654 | * manipulated, and we know that it will never be called | |
655 | * multiple times concurrently. | |
656 | */ | |
657 | cb = ps->callbacks + ps->callback_count++; | |
658 | cb->callback = callback; | |
659 | cb->context = callback_context; | |
660 | ||
661 | /* | |
662 | * If there are exceptions in flight and we have not yet | |
663 | * filled this metadata area there's nothing more to do. | |
664 | */ | |
665 | if (!atomic_dec_and_test(&ps->pending_count) && | |
666 | (ps->current_committed != ps->exceptions_per_area)) | |
667 | return; | |
668 | ||
669 | /* | |
670 | * If we completely filled the current area, then wipe the next one. | |
671 | */ | |
672 | if ((ps->current_committed == ps->exceptions_per_area) && | |
673 | zero_disk_area(ps, ps->current_area + 1)) | |
674 | ps->valid = 0; | |
675 | ||
676 | /* | |
677 | * Commit exceptions to disk. | |
678 | */ | |
d87f4c14 | 679 | if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) |
4db6bfe0 AK |
680 | ps->valid = 0; |
681 | ||
682 | /* | |
683 | * Advance to the next area if this one is full. | |
684 | */ | |
685 | if (ps->current_committed == ps->exceptions_per_area) { | |
686 | ps->current_committed = 0; | |
687 | ps->current_area++; | |
688 | zero_memory_area(ps); | |
689 | } | |
690 | ||
691 | for (i = 0; i < ps->callback_count; i++) { | |
692 | cb = ps->callbacks + i; | |
693 | cb->callback(cb->context, ps->valid); | |
694 | } | |
695 | ||
696 | ps->callback_count = 0; | |
697 | } | |
698 | ||
4454a621 MP |
699 | static int persistent_prepare_merge(struct dm_exception_store *store, |
700 | chunk_t *last_old_chunk, | |
701 | chunk_t *last_new_chunk) | |
702 | { | |
703 | struct pstore *ps = get_info(store); | |
704 | struct disk_exception de; | |
705 | int nr_consecutive; | |
706 | int r; | |
707 | ||
708 | /* | |
709 | * When current area is empty, move back to preceding area. | |
710 | */ | |
711 | if (!ps->current_committed) { | |
712 | /* | |
713 | * Have we finished? | |
714 | */ | |
715 | if (!ps->current_area) | |
716 | return 0; | |
717 | ||
718 | ps->current_area--; | |
719 | r = area_io(ps, READ); | |
720 | if (r < 0) | |
721 | return r; | |
722 | ps->current_committed = ps->exceptions_per_area; | |
723 | } | |
724 | ||
725 | read_exception(ps, ps->current_committed - 1, &de); | |
726 | *last_old_chunk = de.old_chunk; | |
727 | *last_new_chunk = de.new_chunk; | |
728 | ||
729 | /* | |
730 | * Find number of consecutive chunks within the current area, | |
731 | * working backwards. | |
732 | */ | |
733 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | |
734 | nr_consecutive++) { | |
735 | read_exception(ps, ps->current_committed - 1 - nr_consecutive, | |
736 | &de); | |
737 | if (de.old_chunk != *last_old_chunk - nr_consecutive || | |
738 | de.new_chunk != *last_new_chunk - nr_consecutive) | |
739 | break; | |
740 | } | |
741 | ||
742 | return nr_consecutive; | |
743 | } | |
744 | ||
745 | static int persistent_commit_merge(struct dm_exception_store *store, | |
746 | int nr_merged) | |
747 | { | |
748 | int r, i; | |
749 | struct pstore *ps = get_info(store); | |
750 | ||
751 | BUG_ON(nr_merged > ps->current_committed); | |
752 | ||
753 | for (i = 0; i < nr_merged; i++) | |
754 | clear_exception(ps, ps->current_committed - 1 - i); | |
755 | ||
756 | r = area_io(ps, WRITE); | |
757 | if (r < 0) | |
758 | return r; | |
759 | ||
760 | ps->current_committed -= nr_merged; | |
761 | ||
762 | /* | |
763 | * At this stage, only persistent_usage() uses ps->next_free, so | |
764 | * we make no attempt to keep ps->next_free strictly accurate | |
765 | * as exceptions may have been committed out-of-order originally. | |
766 | * Once a snapshot has become merging, we set it to the value it | |
767 | * would have held had all the exceptions been committed in order. | |
768 | * | |
769 | * ps->current_area does not get reduced by prepare_merge() until | |
770 | * after commit_merge() has removed the nr_merged previous exceptions. | |
771 | */ | |
87c961cb TK |
772 | ps->next_free = area_location(ps, ps->current_area) + |
773 | ps->current_committed + 1; | |
4454a621 MP |
774 | |
775 | return 0; | |
776 | } | |
777 | ||
a159c1ac | 778 | static void persistent_drop_snapshot(struct dm_exception_store *store) |
4db6bfe0 AK |
779 | { |
780 | struct pstore *ps = get_info(store); | |
781 | ||
782 | ps->valid = 0; | |
783 | if (write_header(ps)) | |
784 | DMWARN("write header failed"); | |
785 | } | |
786 | ||
493df71c JB |
787 | static int persistent_ctr(struct dm_exception_store *store, |
788 | unsigned argc, char **argv) | |
4db6bfe0 AK |
789 | { |
790 | struct pstore *ps; | |
791 | ||
792 | /* allocate the pstore */ | |
a32079ce | 793 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
4db6bfe0 AK |
794 | if (!ps) |
795 | return -ENOMEM; | |
796 | ||
71fab00a | 797 | ps->store = store; |
4db6bfe0 AK |
798 | ps->valid = 1; |
799 | ps->version = SNAPSHOT_DISK_VERSION; | |
800 | ps->area = NULL; | |
61578dcd MP |
801 | ps->zero_area = NULL; |
802 | ps->header_area = NULL; | |
4454a621 | 803 | ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ |
4db6bfe0 AK |
804 | ps->current_committed = 0; |
805 | ||
806 | ps->callback_count = 0; | |
807 | atomic_set(&ps->pending_count, 0); | |
808 | ps->callbacks = NULL; | |
809 | ||
239c8dd5 | 810 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); |
4db6bfe0 AK |
811 | if (!ps->metadata_wq) { |
812 | kfree(ps); | |
813 | DMERR("couldn't start header metadata update thread"); | |
814 | return -ENOMEM; | |
815 | } | |
816 | ||
4db6bfe0 AK |
817 | store->context = ps; |
818 | ||
819 | return 0; | |
820 | } | |
821 | ||
1e302a92 JB |
822 | static unsigned persistent_status(struct dm_exception_store *store, |
823 | status_type_t status, char *result, | |
824 | unsigned maxlen) | |
493df71c | 825 | { |
1e302a92 JB |
826 | unsigned sz = 0; |
827 | ||
828 | switch (status) { | |
829 | case STATUSTYPE_INFO: | |
830 | break; | |
831 | case STATUSTYPE_TABLE: | |
fc56f6fb | 832 | DMEMIT(" P %llu", (unsigned long long)store->chunk_size); |
1e302a92 | 833 | } |
493df71c JB |
834 | |
835 | return sz; | |
836 | } | |
837 | ||
838 | static struct dm_exception_store_type _persistent_type = { | |
839 | .name = "persistent", | |
840 | .module = THIS_MODULE, | |
841 | .ctr = persistent_ctr, | |
842 | .dtr = persistent_dtr, | |
843 | .read_metadata = persistent_read_metadata, | |
844 | .prepare_exception = persistent_prepare_exception, | |
845 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
846 | .prepare_merge = persistent_prepare_merge, |
847 | .commit_merge = persistent_commit_merge, | |
493df71c | 848 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 849 | .usage = persistent_usage, |
493df71c JB |
850 | .status = persistent_status, |
851 | }; | |
852 | ||
853 | static struct dm_exception_store_type _persistent_compat_type = { | |
854 | .name = "P", | |
855 | .module = THIS_MODULE, | |
856 | .ctr = persistent_ctr, | |
857 | .dtr = persistent_dtr, | |
858 | .read_metadata = persistent_read_metadata, | |
859 | .prepare_exception = persistent_prepare_exception, | |
860 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
861 | .prepare_merge = persistent_prepare_merge, |
862 | .commit_merge = persistent_commit_merge, | |
493df71c | 863 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 864 | .usage = persistent_usage, |
493df71c JB |
865 | .status = persistent_status, |
866 | }; | |
867 | ||
4db6bfe0 AK |
868 | int dm_persistent_snapshot_init(void) |
869 | { | |
493df71c JB |
870 | int r; |
871 | ||
872 | r = dm_exception_store_type_register(&_persistent_type); | |
873 | if (r) { | |
874 | DMERR("Unable to register persistent exception store type"); | |
875 | return r; | |
876 | } | |
877 | ||
878 | r = dm_exception_store_type_register(&_persistent_compat_type); | |
879 | if (r) { | |
880 | DMERR("Unable to register old-style persistent exception " | |
881 | "store type"); | |
882 | dm_exception_store_type_unregister(&_persistent_type); | |
883 | return r; | |
884 | } | |
885 | ||
886 | return r; | |
4db6bfe0 AK |
887 | } |
888 | ||
889 | void dm_persistent_snapshot_exit(void) | |
890 | { | |
493df71c JB |
891 | dm_exception_store_type_unregister(&_persistent_type); |
892 | dm_exception_store_type_unregister(&_persistent_compat_type); | |
4db6bfe0 | 893 | } |