]>
Commit | Line | Data |
---|---|---|
0e9cebe7 JB |
1 | /* |
2 | * Copyright (C) 2014 Facebook. All rights reserved. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include <linux/device-mapper.h> | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/blkdev.h> | |
12 | #include <linux/bio.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/freezer.h> | |
16 | ||
17 | #define DM_MSG_PREFIX "log-writes" | |
18 | ||
19 | /* | |
20 | * This target will sequentially log all writes to the target device onto the | |
21 | * log device. This is helpful for replaying writes to check for fs consistency | |
22 | * at all times. This target provides a mechanism to mark specific events to | |
23 | * check data at a later time. So for example you would: | |
24 | * | |
25 | * write data | |
26 | * fsync | |
27 | * dmsetup message /dev/whatever mark mymark | |
28 | * unmount /mnt/test | |
29 | * | |
30 | * Then replay the log up to mymark and check the contents of the replay to | |
31 | * verify it matches what was written. | |
32 | * | |
33 | * We log writes only after they have been flushed, this makes the log describe | |
34 | * close to the order in which the data hits the actual disk, not its cache. So | |
35 | * for example the following sequence (W means write, C means complete) | |
36 | * | |
37 | * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd | |
38 | * | |
39 | * Would result in the log looking like this: | |
40 | * | |
41 | * c,a,flush,fuad,b,<other writes>,<next flush> | |
42 | * | |
43 | * This is meant to help expose problems where file systems do not properly wait | |
44 | * on data being written before invoking a FLUSH. FUA bypasses cache so once it | |
45 | * completes it is added to the log as it should be on disk. | |
46 | * | |
47 | * We treat DISCARDs as if they don't bypass cache so that they are logged in | |
48 | * order of completion along with the normal writes. If we didn't do it this | |
49 | * way we would process all the discards first and then write all the data, when | |
50 | * in fact we want to do the data and the discard in the order that they | |
51 | * completed. | |
52 | */ | |
53 | #define LOG_FLUSH_FLAG (1 << 0) | |
54 | #define LOG_FUA_FLAG (1 << 1) | |
55 | #define LOG_DISCARD_FLAG (1 << 2) | |
56 | #define LOG_MARK_FLAG (1 << 3) | |
57 | ||
f4ad317a GU |
58 | #define WRITE_LOG_VERSION 1ULL |
59 | #define WRITE_LOG_MAGIC 0x6a736677736872ULL | |
0e9cebe7 JB |
60 | |
61 | /* | |
62 | * The disk format for this is braindead simple. | |
63 | * | |
64 | * At byte 0 we have our super, followed by the following sequence for | |
65 | * nr_entries: | |
66 | * | |
67 | * [ 1 sector ][ entry->nr_sectors ] | |
68 | * [log_write_entry][ data written ] | |
69 | * | |
70 | * The log_write_entry takes up a full sector so we can have arbitrary length | |
71 | * marks and it leaves us room for extra content in the future. | |
72 | */ | |
73 | ||
74 | /* | |
75 | * Basic info about the log for userspace. | |
76 | */ | |
77 | struct log_write_super { | |
78 | __le64 magic; | |
79 | __le64 version; | |
80 | __le64 nr_entries; | |
81 | __le32 sectorsize; | |
82 | }; | |
83 | ||
84 | /* | |
85 | * sector - the sector we wrote. | |
86 | * nr_sectors - the number of sectors we wrote. | |
87 | * flags - flags for this log entry. | |
88 | * data_len - the size of the data in this log entry, this is for private log | |
89 | * entry stuff, the MARK data provided by userspace for example. | |
90 | */ | |
91 | struct log_write_entry { | |
92 | __le64 sector; | |
93 | __le64 nr_sectors; | |
94 | __le64 flags; | |
95 | __le64 data_len; | |
96 | }; | |
97 | ||
98 | struct log_writes_c { | |
99 | struct dm_dev *dev; | |
100 | struct dm_dev *logdev; | |
101 | u64 logged_entries; | |
102 | u32 sectorsize; | |
228bb5b2 | 103 | u32 sectorshift; |
0e9cebe7 JB |
104 | atomic_t io_blocks; |
105 | atomic_t pending_blocks; | |
106 | sector_t next_sector; | |
107 | sector_t end_sector; | |
108 | bool logging_enabled; | |
109 | bool device_supports_discard; | |
110 | spinlock_t blocks_lock; | |
111 | struct list_head unflushed_blocks; | |
112 | struct list_head logging_blocks; | |
113 | wait_queue_head_t wait; | |
114 | struct task_struct *log_kthread; | |
115 | }; | |
116 | ||
117 | struct pending_block { | |
118 | int vec_cnt; | |
119 | u64 flags; | |
120 | sector_t sector; | |
121 | sector_t nr_sectors; | |
122 | char *data; | |
123 | u32 datalen; | |
124 | struct list_head list; | |
125 | struct bio_vec vecs[0]; | |
126 | }; | |
127 | ||
128 | struct per_bio_data { | |
129 | struct pending_block *block; | |
130 | }; | |
131 | ||
228bb5b2 JB |
132 | static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc, |
133 | sector_t sectors) | |
134 | { | |
135 | return sectors >> (lc->sectorshift - SECTOR_SHIFT); | |
136 | } | |
137 | ||
138 | static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc, | |
139 | sector_t sectors) | |
140 | { | |
141 | return sectors << (lc->sectorshift - SECTOR_SHIFT); | |
142 | } | |
143 | ||
0e9cebe7 JB |
144 | static void put_pending_block(struct log_writes_c *lc) |
145 | { | |
146 | if (atomic_dec_and_test(&lc->pending_blocks)) { | |
147 | smp_mb__after_atomic(); | |
148 | if (waitqueue_active(&lc->wait)) | |
149 | wake_up(&lc->wait); | |
150 | } | |
151 | } | |
152 | ||
153 | static void put_io_block(struct log_writes_c *lc) | |
154 | { | |
155 | if (atomic_dec_and_test(&lc->io_blocks)) { | |
156 | smp_mb__after_atomic(); | |
157 | if (waitqueue_active(&lc->wait)) | |
158 | wake_up(&lc->wait); | |
159 | } | |
160 | } | |
161 | ||
4246a0b6 | 162 | static void log_end_io(struct bio *bio) |
0e9cebe7 JB |
163 | { |
164 | struct log_writes_c *lc = bio->bi_private; | |
0e9cebe7 | 165 | |
4e4cbee9 | 166 | if (bio->bi_status) { |
0e9cebe7 JB |
167 | unsigned long flags; |
168 | ||
4e4cbee9 | 169 | DMERR("Error writing log block, error=%d", bio->bi_status); |
0e9cebe7 JB |
170 | spin_lock_irqsave(&lc->blocks_lock, flags); |
171 | lc->logging_enabled = false; | |
172 | spin_unlock_irqrestore(&lc->blocks_lock, flags); | |
173 | } | |
174 | ||
491221f8 | 175 | bio_free_pages(bio); |
0e9cebe7 JB |
176 | put_io_block(lc); |
177 | bio_put(bio); | |
178 | } | |
179 | ||
180 | /* | |
181 | * Meant to be called if there is an error, it will free all the pages | |
182 | * associated with the block. | |
183 | */ | |
184 | static void free_pending_block(struct log_writes_c *lc, | |
185 | struct pending_block *block) | |
186 | { | |
187 | int i; | |
188 | ||
189 | for (i = 0; i < block->vec_cnt; i++) { | |
190 | if (block->vecs[i].bv_page) | |
191 | __free_page(block->vecs[i].bv_page); | |
192 | } | |
193 | kfree(block->data); | |
194 | kfree(block); | |
195 | put_pending_block(lc); | |
196 | } | |
197 | ||
198 | static int write_metadata(struct log_writes_c *lc, void *entry, | |
199 | size_t entrylen, void *data, size_t datalen, | |
200 | sector_t sector) | |
201 | { | |
202 | struct bio *bio; | |
203 | struct page *page; | |
204 | void *ptr; | |
205 | size_t ret; | |
206 | ||
207 | bio = bio_alloc(GFP_KERNEL, 1); | |
208 | if (!bio) { | |
209 | DMERR("Couldn't alloc log bio"); | |
210 | goto error; | |
211 | } | |
212 | bio->bi_iter.bi_size = 0; | |
213 | bio->bi_iter.bi_sector = sector; | |
74d46992 | 214 | bio_set_dev(bio, lc->logdev->bdev); |
0e9cebe7 JB |
215 | bio->bi_end_io = log_end_io; |
216 | bio->bi_private = lc; | |
e6047149 | 217 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
0e9cebe7 JB |
218 | |
219 | page = alloc_page(GFP_KERNEL); | |
220 | if (!page) { | |
221 | DMERR("Couldn't alloc log page"); | |
222 | bio_put(bio); | |
223 | goto error; | |
224 | } | |
225 | ||
226 | ptr = kmap_atomic(page); | |
227 | memcpy(ptr, entry, entrylen); | |
228 | if (datalen) | |
229 | memcpy(ptr + entrylen, data, datalen); | |
230 | memset(ptr + entrylen + datalen, 0, | |
231 | lc->sectorsize - entrylen - datalen); | |
232 | kunmap_atomic(ptr); | |
233 | ||
234 | ret = bio_add_page(bio, page, lc->sectorsize, 0); | |
235 | if (ret != lc->sectorsize) { | |
236 | DMERR("Couldn't add page to the log block"); | |
237 | goto error_bio; | |
238 | } | |
4e49ea4a | 239 | submit_bio(bio); |
0e9cebe7 JB |
240 | return 0; |
241 | error_bio: | |
242 | bio_put(bio); | |
243 | __free_page(page); | |
244 | error: | |
245 | put_io_block(lc); | |
246 | return -1; | |
247 | } | |
248 | ||
249 | static int log_one_block(struct log_writes_c *lc, | |
250 | struct pending_block *block, sector_t sector) | |
251 | { | |
252 | struct bio *bio; | |
253 | struct log_write_entry entry; | |
254 | size_t ret; | |
255 | int i; | |
256 | ||
257 | entry.sector = cpu_to_le64(block->sector); | |
258 | entry.nr_sectors = cpu_to_le64(block->nr_sectors); | |
259 | entry.flags = cpu_to_le64(block->flags); | |
260 | entry.data_len = cpu_to_le64(block->datalen); | |
261 | if (write_metadata(lc, &entry, sizeof(entry), block->data, | |
262 | block->datalen, sector)) { | |
263 | free_pending_block(lc, block); | |
264 | return -1; | |
265 | } | |
266 | ||
267 | if (!block->vec_cnt) | |
268 | goto out; | |
228bb5b2 | 269 | sector += dev_to_bio_sectors(lc, 1); |
0e9cebe7 | 270 | |
a5d60783 | 271 | atomic_inc(&lc->io_blocks); |
7efb3673 | 272 | bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); |
0e9cebe7 JB |
273 | if (!bio) { |
274 | DMERR("Couldn't alloc log bio"); | |
275 | goto error; | |
276 | } | |
0e9cebe7 JB |
277 | bio->bi_iter.bi_size = 0; |
278 | bio->bi_iter.bi_sector = sector; | |
74d46992 | 279 | bio_set_dev(bio, lc->logdev->bdev); |
0e9cebe7 JB |
280 | bio->bi_end_io = log_end_io; |
281 | bio->bi_private = lc; | |
e6047149 | 282 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
0e9cebe7 JB |
283 | |
284 | for (i = 0; i < block->vec_cnt; i++) { | |
285 | /* | |
286 | * The page offset is always 0 because we allocate a new page | |
287 | * for every bvec in the original bio for simplicity sake. | |
288 | */ | |
289 | ret = bio_add_page(bio, block->vecs[i].bv_page, | |
290 | block->vecs[i].bv_len, 0); | |
291 | if (ret != block->vecs[i].bv_len) { | |
292 | atomic_inc(&lc->io_blocks); | |
4e49ea4a | 293 | submit_bio(bio); |
7efb3673 | 294 | bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES)); |
0e9cebe7 JB |
295 | if (!bio) { |
296 | DMERR("Couldn't alloc log bio"); | |
297 | goto error; | |
298 | } | |
299 | bio->bi_iter.bi_size = 0; | |
300 | bio->bi_iter.bi_sector = sector; | |
74d46992 | 301 | bio_set_dev(bio, lc->logdev->bdev); |
0e9cebe7 JB |
302 | bio->bi_end_io = log_end_io; |
303 | bio->bi_private = lc; | |
e6047149 | 304 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
0e9cebe7 JB |
305 | |
306 | ret = bio_add_page(bio, block->vecs[i].bv_page, | |
307 | block->vecs[i].bv_len, 0); | |
308 | if (ret != block->vecs[i].bv_len) { | |
309 | DMERR("Couldn't add page on new bio?"); | |
310 | bio_put(bio); | |
311 | goto error; | |
312 | } | |
313 | } | |
314 | sector += block->vecs[i].bv_len >> SECTOR_SHIFT; | |
315 | } | |
4e49ea4a | 316 | submit_bio(bio); |
0e9cebe7 JB |
317 | out: |
318 | kfree(block->data); | |
319 | kfree(block); | |
320 | put_pending_block(lc); | |
321 | return 0; | |
322 | error: | |
323 | free_pending_block(lc, block); | |
324 | put_io_block(lc); | |
325 | return -1; | |
326 | } | |
327 | ||
328 | static int log_super(struct log_writes_c *lc) | |
329 | { | |
330 | struct log_write_super super; | |
331 | ||
332 | super.magic = cpu_to_le64(WRITE_LOG_MAGIC); | |
333 | super.version = cpu_to_le64(WRITE_LOG_VERSION); | |
334 | super.nr_entries = cpu_to_le64(lc->logged_entries); | |
335 | super.sectorsize = cpu_to_le32(lc->sectorsize); | |
336 | ||
337 | if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { | |
338 | DMERR("Couldn't write super"); | |
339 | return -1; | |
340 | } | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | static inline sector_t logdev_last_sector(struct log_writes_c *lc) | |
346 | { | |
347 | return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; | |
348 | } | |
349 | ||
350 | static int log_writes_kthread(void *arg) | |
351 | { | |
352 | struct log_writes_c *lc = (struct log_writes_c *)arg; | |
353 | sector_t sector = 0; | |
354 | ||
355 | while (!kthread_should_stop()) { | |
356 | bool super = false; | |
357 | bool logging_enabled; | |
358 | struct pending_block *block = NULL; | |
359 | int ret; | |
360 | ||
361 | spin_lock_irq(&lc->blocks_lock); | |
362 | if (!list_empty(&lc->logging_blocks)) { | |
363 | block = list_first_entry(&lc->logging_blocks, | |
364 | struct pending_block, list); | |
365 | list_del_init(&block->list); | |
366 | if (!lc->logging_enabled) | |
367 | goto next; | |
368 | ||
369 | sector = lc->next_sector; | |
228bb5b2 JB |
370 | if (!(block->flags & LOG_DISCARD_FLAG)) |
371 | lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); | |
372 | lc->next_sector += dev_to_bio_sectors(lc, 1); | |
0e9cebe7 JB |
373 | |
374 | /* | |
375 | * Apparently the size of the device may not be known | |
376 | * right away, so handle this properly. | |
377 | */ | |
378 | if (!lc->end_sector) | |
379 | lc->end_sector = logdev_last_sector(lc); | |
380 | if (lc->end_sector && | |
381 | lc->next_sector >= lc->end_sector) { | |
382 | DMERR("Ran out of space on the logdev"); | |
383 | lc->logging_enabled = false; | |
384 | goto next; | |
385 | } | |
386 | lc->logged_entries++; | |
387 | atomic_inc(&lc->io_blocks); | |
388 | ||
389 | super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG)); | |
390 | if (super) | |
391 | atomic_inc(&lc->io_blocks); | |
392 | } | |
393 | next: | |
394 | logging_enabled = lc->logging_enabled; | |
395 | spin_unlock_irq(&lc->blocks_lock); | |
396 | if (block) { | |
397 | if (logging_enabled) { | |
398 | ret = log_one_block(lc, block, sector); | |
399 | if (!ret && super) | |
400 | ret = log_super(lc); | |
401 | if (ret) { | |
402 | spin_lock_irq(&lc->blocks_lock); | |
403 | lc->logging_enabled = false; | |
404 | spin_unlock_irq(&lc->blocks_lock); | |
405 | } | |
406 | } else | |
407 | free_pending_block(lc, block); | |
408 | continue; | |
409 | } | |
410 | ||
411 | if (!try_to_freeze()) { | |
412 | set_current_state(TASK_INTERRUPTIBLE); | |
413 | if (!kthread_should_stop() && | |
0c79c620 | 414 | list_empty(&lc->logging_blocks)) |
0e9cebe7 JB |
415 | schedule(); |
416 | __set_current_state(TASK_RUNNING); | |
417 | } | |
418 | } | |
419 | return 0; | |
420 | } | |
421 | ||
422 | /* | |
423 | * Construct a log-writes mapping: | |
424 | * log-writes <dev_path> <log_dev_path> | |
425 | */ | |
426 | static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
427 | { | |
428 | struct log_writes_c *lc; | |
429 | struct dm_arg_set as; | |
430 | const char *devname, *logdevname; | |
e80d1c80 | 431 | int ret; |
0e9cebe7 JB |
432 | |
433 | as.argc = argc; | |
434 | as.argv = argv; | |
435 | ||
436 | if (argc < 2) { | |
437 | ti->error = "Invalid argument count"; | |
438 | return -EINVAL; | |
439 | } | |
440 | ||
441 | lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL); | |
442 | if (!lc) { | |
443 | ti->error = "Cannot allocate context"; | |
444 | return -ENOMEM; | |
445 | } | |
446 | spin_lock_init(&lc->blocks_lock); | |
447 | INIT_LIST_HEAD(&lc->unflushed_blocks); | |
448 | INIT_LIST_HEAD(&lc->logging_blocks); | |
449 | init_waitqueue_head(&lc->wait); | |
0e9cebe7 JB |
450 | atomic_set(&lc->io_blocks, 0); |
451 | atomic_set(&lc->pending_blocks, 0); | |
452 | ||
453 | devname = dm_shift_arg(&as); | |
e80d1c80 VG |
454 | ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev); |
455 | if (ret) { | |
0e9cebe7 JB |
456 | ti->error = "Device lookup failed"; |
457 | goto bad; | |
458 | } | |
459 | ||
460 | logdevname = dm_shift_arg(&as); | |
e80d1c80 VG |
461 | ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), |
462 | &lc->logdev); | |
463 | if (ret) { | |
0e9cebe7 JB |
464 | ti->error = "Log device lookup failed"; |
465 | dm_put_device(ti, lc->dev); | |
466 | goto bad; | |
467 | } | |
468 | ||
228bb5b2 JB |
469 | lc->sectorsize = bdev_logical_block_size(lc->dev->bdev); |
470 | lc->sectorshift = ilog2(lc->sectorsize); | |
0e9cebe7 | 471 | lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); |
91e630d9 VZ |
472 | if (IS_ERR(lc->log_kthread)) { |
473 | ret = PTR_ERR(lc->log_kthread); | |
0e9cebe7 JB |
474 | ti->error = "Couldn't alloc kthread"; |
475 | dm_put_device(ti, lc->dev); | |
476 | dm_put_device(ti, lc->logdev); | |
477 | goto bad; | |
478 | } | |
479 | ||
228bb5b2 JB |
480 | /* |
481 | * next_sector is in 512b sectors to correspond to what bi_sector expects. | |
482 | * The super starts at sector 0, and the next_sector is the next logical | |
483 | * one based on the sectorsize of the device. | |
484 | */ | |
485 | lc->next_sector = lc->sectorsize >> SECTOR_SHIFT; | |
0e9cebe7 JB |
486 | lc->logging_enabled = true; |
487 | lc->end_sector = logdev_last_sector(lc); | |
488 | lc->device_supports_discard = true; | |
489 | ||
490 | ti->num_flush_bios = 1; | |
491 | ti->flush_supported = true; | |
492 | ti->num_discard_bios = 1; | |
493 | ti->discards_supported = true; | |
30187e1d | 494 | ti->per_io_data_size = sizeof(struct per_bio_data); |
0e9cebe7 JB |
495 | ti->private = lc; |
496 | return 0; | |
497 | ||
498 | bad: | |
499 | kfree(lc); | |
e80d1c80 | 500 | return ret; |
0e9cebe7 JB |
501 | } |
502 | ||
503 | static int log_mark(struct log_writes_c *lc, char *data) | |
504 | { | |
505 | struct pending_block *block; | |
506 | size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry); | |
507 | ||
508 | block = kzalloc(sizeof(struct pending_block), GFP_KERNEL); | |
509 | if (!block) { | |
510 | DMERR("Error allocating pending block"); | |
511 | return -ENOMEM; | |
512 | } | |
513 | ||
514 | block->data = kstrndup(data, maxsize, GFP_KERNEL); | |
515 | if (!block->data) { | |
516 | DMERR("Error copying mark data"); | |
517 | kfree(block); | |
518 | return -ENOMEM; | |
519 | } | |
520 | atomic_inc(&lc->pending_blocks); | |
521 | block->datalen = strlen(block->data); | |
522 | block->flags |= LOG_MARK_FLAG; | |
523 | spin_lock_irq(&lc->blocks_lock); | |
524 | list_add_tail(&block->list, &lc->logging_blocks); | |
525 | spin_unlock_irq(&lc->blocks_lock); | |
526 | wake_up_process(lc->log_kthread); | |
527 | return 0; | |
528 | } | |
529 | ||
530 | static void log_writes_dtr(struct dm_target *ti) | |
531 | { | |
532 | struct log_writes_c *lc = ti->private; | |
533 | ||
534 | spin_lock_irq(&lc->blocks_lock); | |
535 | list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks); | |
536 | spin_unlock_irq(&lc->blocks_lock); | |
537 | ||
538 | /* | |
539 | * This is just nice to have since it'll update the super to include the | |
540 | * unflushed blocks, if it fails we don't really care. | |
541 | */ | |
542 | log_mark(lc, "dm-log-writes-end"); | |
543 | wake_up_process(lc->log_kthread); | |
544 | wait_event(lc->wait, !atomic_read(&lc->io_blocks) && | |
545 | !atomic_read(&lc->pending_blocks)); | |
546 | kthread_stop(lc->log_kthread); | |
547 | ||
548 | WARN_ON(!list_empty(&lc->logging_blocks)); | |
549 | WARN_ON(!list_empty(&lc->unflushed_blocks)); | |
550 | dm_put_device(ti, lc->dev); | |
551 | dm_put_device(ti, lc->logdev); | |
552 | kfree(lc); | |
553 | } | |
554 | ||
555 | static void normal_map_bio(struct dm_target *ti, struct bio *bio) | |
556 | { | |
557 | struct log_writes_c *lc = ti->private; | |
558 | ||
74d46992 | 559 | bio_set_dev(bio, lc->dev->bdev); |
0e9cebe7 JB |
560 | } |
561 | ||
562 | static int log_writes_map(struct dm_target *ti, struct bio *bio) | |
563 | { | |
564 | struct log_writes_c *lc = ti->private; | |
565 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | |
566 | struct pending_block *block; | |
567 | struct bvec_iter iter; | |
568 | struct bio_vec bv; | |
569 | size_t alloc_size; | |
570 | int i = 0; | |
1eff9d32 JA |
571 | bool flush_bio = (bio->bi_opf & REQ_PREFLUSH); |
572 | bool fua_bio = (bio->bi_opf & REQ_FUA); | |
e6047149 | 573 | bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); |
0e9cebe7 JB |
574 | |
575 | pb->block = NULL; | |
576 | ||
577 | /* Don't bother doing anything if logging has been disabled */ | |
578 | if (!lc->logging_enabled) | |
579 | goto map_bio; | |
580 | ||
581 | /* | |
582 | * Map reads as normal. | |
583 | */ | |
584 | if (bio_data_dir(bio) == READ) | |
585 | goto map_bio; | |
586 | ||
587 | /* No sectors and not a flush? Don't care */ | |
588 | if (!bio_sectors(bio) && !flush_bio) | |
589 | goto map_bio; | |
590 | ||
591 | /* | |
592 | * Discards will have bi_size set but there's no actual data, so just | |
593 | * allocate the size of the pending block. | |
594 | */ | |
595 | if (discard_bio) | |
596 | alloc_size = sizeof(struct pending_block); | |
597 | else | |
598 | alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio); | |
599 | ||
600 | block = kzalloc(alloc_size, GFP_NOIO); | |
601 | if (!block) { | |
602 | DMERR("Error allocating pending block"); | |
603 | spin_lock_irq(&lc->blocks_lock); | |
604 | lc->logging_enabled = false; | |
605 | spin_unlock_irq(&lc->blocks_lock); | |
846785e6 | 606 | return DM_MAPIO_KILL; |
0e9cebe7 JB |
607 | } |
608 | INIT_LIST_HEAD(&block->list); | |
609 | pb->block = block; | |
610 | atomic_inc(&lc->pending_blocks); | |
611 | ||
612 | if (flush_bio) | |
613 | block->flags |= LOG_FLUSH_FLAG; | |
614 | if (fua_bio) | |
615 | block->flags |= LOG_FUA_FLAG; | |
616 | if (discard_bio) | |
617 | block->flags |= LOG_DISCARD_FLAG; | |
618 | ||
228bb5b2 JB |
619 | block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); |
620 | block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); | |
0e9cebe7 JB |
621 | |
622 | /* We don't need the data, just submit */ | |
623 | if (discard_bio) { | |
624 | WARN_ON(flush_bio || fua_bio); | |
625 | if (lc->device_supports_discard) | |
626 | goto map_bio; | |
4246a0b6 | 627 | bio_endio(bio); |
0e9cebe7 JB |
628 | return DM_MAPIO_SUBMITTED; |
629 | } | |
630 | ||
631 | /* Flush bio, splice the unflushed blocks onto this list and submit */ | |
632 | if (flush_bio && !bio_sectors(bio)) { | |
633 | spin_lock_irq(&lc->blocks_lock); | |
634 | list_splice_init(&lc->unflushed_blocks, &block->list); | |
635 | spin_unlock_irq(&lc->blocks_lock); | |
636 | goto map_bio; | |
637 | } | |
638 | ||
639 | /* | |
640 | * We will write this bio somewhere else way later so we need to copy | |
641 | * the actual contents into new pages so we know the data will always be | |
642 | * there. | |
643 | * | |
644 | * We do this because this could be a bio from O_DIRECT in which case we | |
645 | * can't just hold onto the page until some later point, we have to | |
646 | * manually copy the contents. | |
647 | */ | |
648 | bio_for_each_segment(bv, bio, iter) { | |
649 | struct page *page; | |
650 | void *src, *dst; | |
651 | ||
652 | page = alloc_page(GFP_NOIO); | |
653 | if (!page) { | |
654 | DMERR("Error allocing page"); | |
655 | free_pending_block(lc, block); | |
656 | spin_lock_irq(&lc->blocks_lock); | |
657 | lc->logging_enabled = false; | |
658 | spin_unlock_irq(&lc->blocks_lock); | |
846785e6 | 659 | return DM_MAPIO_KILL; |
0e9cebe7 JB |
660 | } |
661 | ||
662 | src = kmap_atomic(bv.bv_page); | |
663 | dst = kmap_atomic(page); | |
664 | memcpy(dst, src + bv.bv_offset, bv.bv_len); | |
665 | kunmap_atomic(dst); | |
666 | kunmap_atomic(src); | |
667 | block->vecs[i].bv_page = page; | |
668 | block->vecs[i].bv_len = bv.bv_len; | |
669 | block->vec_cnt++; | |
670 | i++; | |
671 | } | |
672 | ||
673 | /* Had a flush with data in it, weird */ | |
674 | if (flush_bio) { | |
675 | spin_lock_irq(&lc->blocks_lock); | |
676 | list_splice_init(&lc->unflushed_blocks, &block->list); | |
677 | spin_unlock_irq(&lc->blocks_lock); | |
678 | } | |
679 | map_bio: | |
680 | normal_map_bio(ti, bio); | |
681 | return DM_MAPIO_REMAPPED; | |
682 | } | |
683 | ||
4e4cbee9 CH |
684 | static int normal_end_io(struct dm_target *ti, struct bio *bio, |
685 | blk_status_t *error) | |
0e9cebe7 JB |
686 | { |
687 | struct log_writes_c *lc = ti->private; | |
688 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | |
689 | ||
690 | if (bio_data_dir(bio) == WRITE && pb->block) { | |
691 | struct pending_block *block = pb->block; | |
692 | unsigned long flags; | |
693 | ||
694 | spin_lock_irqsave(&lc->blocks_lock, flags); | |
695 | if (block->flags & LOG_FLUSH_FLAG) { | |
696 | list_splice_tail_init(&block->list, &lc->logging_blocks); | |
697 | list_add_tail(&block->list, &lc->logging_blocks); | |
698 | wake_up_process(lc->log_kthread); | |
699 | } else if (block->flags & LOG_FUA_FLAG) { | |
700 | list_add_tail(&block->list, &lc->logging_blocks); | |
701 | wake_up_process(lc->log_kthread); | |
702 | } else | |
703 | list_add_tail(&block->list, &lc->unflushed_blocks); | |
704 | spin_unlock_irqrestore(&lc->blocks_lock, flags); | |
705 | } | |
706 | ||
1be56909 | 707 | return DM_ENDIO_DONE; |
0e9cebe7 JB |
708 | } |
709 | ||
710 | /* | |
711 | * INFO format: <logged entries> <highest allocated sector> | |
712 | */ | |
713 | static void log_writes_status(struct dm_target *ti, status_type_t type, | |
714 | unsigned status_flags, char *result, | |
715 | unsigned maxlen) | |
716 | { | |
717 | unsigned sz = 0; | |
718 | struct log_writes_c *lc = ti->private; | |
719 | ||
720 | switch (type) { | |
721 | case STATUSTYPE_INFO: | |
722 | DMEMIT("%llu %llu", lc->logged_entries, | |
723 | (unsigned long long)lc->next_sector - 1); | |
724 | if (!lc->logging_enabled) | |
725 | DMEMIT(" logging_disabled"); | |
726 | break; | |
727 | ||
728 | case STATUSTYPE_TABLE: | |
729 | DMEMIT("%s %s", lc->dev->name, lc->logdev->name); | |
730 | break; | |
731 | } | |
732 | } | |
733 | ||
e56f81e0 CH |
734 | static int log_writes_prepare_ioctl(struct dm_target *ti, |
735 | struct block_device **bdev, fmode_t *mode) | |
0e9cebe7 JB |
736 | { |
737 | struct log_writes_c *lc = ti->private; | |
738 | struct dm_dev *dev = lc->dev; | |
0e9cebe7 | 739 | |
e56f81e0 | 740 | *bdev = dev->bdev; |
0e9cebe7 JB |
741 | /* |
742 | * Only pass ioctls through if the device sizes match exactly. | |
743 | */ | |
744 | if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) | |
e56f81e0 CH |
745 | return 1; |
746 | return 0; | |
0e9cebe7 JB |
747 | } |
748 | ||
0e9cebe7 JB |
749 | static int log_writes_iterate_devices(struct dm_target *ti, |
750 | iterate_devices_callout_fn fn, | |
751 | void *data) | |
752 | { | |
753 | struct log_writes_c *lc = ti->private; | |
754 | ||
755 | return fn(ti, lc->dev, 0, ti->len, data); | |
756 | } | |
757 | ||
758 | /* | |
759 | * Messages supported: | |
760 | * mark <mark data> - specify the marked data. | |
761 | */ | |
762 | static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv) | |
763 | { | |
764 | int r = -EINVAL; | |
765 | struct log_writes_c *lc = ti->private; | |
766 | ||
767 | if (argc != 2) { | |
768 | DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc); | |
769 | return r; | |
770 | } | |
771 | ||
772 | if (!strcasecmp(argv[0], "mark")) | |
773 | r = log_mark(lc, argv[1]); | |
774 | else | |
775 | DMWARN("Unrecognised log writes target message received: %s", argv[0]); | |
776 | ||
777 | return r; | |
778 | } | |
779 | ||
780 | static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits) | |
781 | { | |
782 | struct log_writes_c *lc = ti->private; | |
783 | struct request_queue *q = bdev_get_queue(lc->dev->bdev); | |
784 | ||
785 | if (!q || !blk_queue_discard(q)) { | |
786 | lc->device_supports_discard = false; | |
228bb5b2 | 787 | limits->discard_granularity = lc->sectorsize; |
0e9cebe7 JB |
788 | limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); |
789 | } | |
228bb5b2 JB |
790 | limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); |
791 | limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); | |
792 | limits->io_min = limits->physical_block_size; | |
0e9cebe7 JB |
793 | } |
794 | ||
795 | static struct target_type log_writes_target = { | |
796 | .name = "log-writes", | |
797 | .version = {1, 0, 0}, | |
798 | .module = THIS_MODULE, | |
799 | .ctr = log_writes_ctr, | |
800 | .dtr = log_writes_dtr, | |
801 | .map = log_writes_map, | |
802 | .end_io = normal_end_io, | |
803 | .status = log_writes_status, | |
e56f81e0 | 804 | .prepare_ioctl = log_writes_prepare_ioctl, |
0e9cebe7 JB |
805 | .message = log_writes_message, |
806 | .iterate_devices = log_writes_iterate_devices, | |
807 | .io_hints = log_writes_io_hints, | |
808 | }; | |
809 | ||
810 | static int __init dm_log_writes_init(void) | |
811 | { | |
812 | int r = dm_register_target(&log_writes_target); | |
813 | ||
814 | if (r < 0) | |
815 | DMERR("register failed %d", r); | |
816 | ||
817 | return r; | |
818 | } | |
819 | ||
820 | static void __exit dm_log_writes_exit(void) | |
821 | { | |
822 | dm_unregister_target(&log_writes_target); | |
823 | } | |
824 | ||
825 | module_init(dm_log_writes_init); | |
826 | module_exit(dm_log_writes_exit); | |
827 | ||
828 | MODULE_DESCRIPTION(DM_NAME " log writes target"); | |
829 | MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>"); | |
830 | MODULE_LICENSE("GPL"); |