2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include "dm-bio-record.h"
11 #include <linux/compiler.h>
12 #include <linux/module.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-io.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sort.h>
17 #include <linux/rbtree.h>
18 #include <linux/delay.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <crypto/hash.h>
22 #include <crypto/skcipher.h>
23 #include <linux/async_tx.h>
24 #include <linux/dm-bufio.h>
26 #define DM_MSG_PREFIX "integrity"
28 #define DEFAULT_INTERLEAVE_SECTORS 32768
29 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
30 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
31 #define DEFAULT_BUFFER_SECTORS 128
32 #define DEFAULT_JOURNAL_WATERMARK 50
33 #define DEFAULT_SYNC_MSEC 10000
34 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
35 #define MIN_LOG2_INTERLEAVE_SECTORS 3
36 #define MAX_LOG2_INTERLEAVE_SECTORS 31
37 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
38 #define RECALC_SECTORS 32768
39 #define RECALC_WRITE_SUPER 16
40 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
41 #define BITMAP_FLUSH_INTERVAL (10 * HZ)
42 #define DISCARD_FILLER 0xf6
46 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
47 * so it should not be enabled in the official kernel
50 //#define INTERNAL_VERIFY
56 #define SB_MAGIC "integrt"
57 #define SB_VERSION_1 1
58 #define SB_VERSION_2 2
59 #define SB_VERSION_3 3
60 #define SB_VERSION_4 4
61 #define SB_VERSION_5 5
63 #define MAX_SECTORS_PER_BLOCK 8
68 __u8 log2_interleave_sectors
;
69 __le16 integrity_tag_size
;
70 __le32 journal_sections
;
71 __le64 provided_data_sectors
; /* userspace uses this value */
73 __u8 log2_sectors_per_block
;
74 __u8 log2_blocks_per_bitmap_bit
;
81 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
82 #define SB_FLAG_RECALCULATING 0x2
83 #define SB_FLAG_DIRTY_BITMAP 0x4
84 #define SB_FLAG_FIXED_PADDING 0x8
85 #define SB_FLAG_FIXED_HMAC 0x10
87 #define JOURNAL_ENTRY_ROUNDUP 8
89 typedef __le64 commit_id_t
;
90 #define JOURNAL_MAC_PER_SECTOR 8
92 struct journal_entry
{
100 commit_id_t last_bytes
[];
104 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
106 #if BITS_PER_LONG == 64
107 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
109 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
111 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
112 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
113 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
114 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
115 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
117 #define JOURNAL_BLOCK_SECTORS 8
118 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
119 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
121 struct journal_sector
{
122 __u8 entries
[JOURNAL_SECTOR_DATA
- JOURNAL_MAC_PER_SECTOR
];
123 __u8 mac
[JOURNAL_MAC_PER_SECTOR
];
124 commit_id_t commit_id
;
127 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
129 #define METADATA_PADDING_SECTORS 8
131 #define N_COMMIT_IDS 4
133 static unsigned char prev_commit_seq(unsigned char seq
)
135 return (seq
+ N_COMMIT_IDS
- 1) % N_COMMIT_IDS
;
138 static unsigned char next_commit_seq(unsigned char seq
)
140 return (seq
+ 1) % N_COMMIT_IDS
;
144 * In-memory structures
147 struct journal_node
{
159 struct dm_integrity_c
{
161 struct dm_dev
*meta_dev
;
165 mempool_t journal_io_mempool
;
166 struct dm_io_client
*io
;
167 struct dm_bufio_client
*bufio
;
168 struct workqueue_struct
*metadata_wq
;
169 struct superblock
*sb
;
170 unsigned journal_pages
;
171 unsigned n_bitmap_blocks
;
173 struct page_list
*journal
;
174 struct page_list
*journal_io
;
175 struct page_list
*journal_xor
;
176 struct page_list
*recalc_bitmap
;
177 struct page_list
*may_write_bitmap
;
178 struct bitmap_block_status
*bbs
;
179 unsigned bitmap_flush_interval
;
180 int synchronous_mode
;
181 struct bio_list synchronous_bios
;
182 struct delayed_work bitmap_flush_work
;
184 struct crypto_skcipher
*journal_crypt
;
185 struct scatterlist
**journal_scatterlist
;
186 struct scatterlist
**journal_io_scatterlist
;
187 struct skcipher_request
**sk_requests
;
189 struct crypto_shash
*journal_mac
;
191 struct journal_node
*journal_tree
;
192 struct rb_root journal_tree_root
;
194 sector_t provided_data_sectors
;
196 unsigned short journal_entry_size
;
197 unsigned char journal_entries_per_sector
;
198 unsigned char journal_section_entries
;
199 unsigned short journal_section_sectors
;
200 unsigned journal_sections
;
201 unsigned journal_entries
;
202 sector_t data_device_sectors
;
203 sector_t meta_device_sectors
;
204 unsigned initial_sectors
;
205 unsigned metadata_run
;
206 __s8 log2_metadata_run
;
207 __u8 log2_buffer_sectors
;
208 __u8 sectors_per_block
;
209 __u8 log2_blocks_per_bitmap_bit
;
215 struct crypto_shash
*internal_hash
;
217 struct dm_target
*ti
;
219 /* these variables are locked with endio_wait.lock */
220 struct rb_root in_progress
;
221 struct list_head wait_list
;
222 wait_queue_head_t endio_wait
;
223 struct workqueue_struct
*wait_wq
;
224 struct workqueue_struct
*offload_wq
;
226 unsigned char commit_seq
;
227 commit_id_t commit_ids
[N_COMMIT_IDS
];
229 unsigned committed_section
;
230 unsigned n_committed_sections
;
232 unsigned uncommitted_section
;
233 unsigned n_uncommitted_sections
;
235 unsigned free_section
;
236 unsigned char free_section_entry
;
237 unsigned free_sectors
;
239 unsigned free_sectors_threshold
;
241 struct workqueue_struct
*commit_wq
;
242 struct work_struct commit_work
;
244 struct workqueue_struct
*writer_wq
;
245 struct work_struct writer_work
;
247 struct workqueue_struct
*recalc_wq
;
248 struct work_struct recalc_work
;
252 struct bio_list flush_bio_list
;
254 unsigned long autocommit_jiffies
;
255 struct timer_list autocommit_timer
;
256 unsigned autocommit_msec
;
258 wait_queue_head_t copy_to_journal_wait
;
260 struct completion crypto_backoff
;
262 bool journal_uptodate
;
264 bool recalculate_flag
;
265 bool reset_recalculate_flag
;
269 bool legacy_recalculate
;
271 struct alg_spec internal_hash_alg
;
272 struct alg_spec journal_crypt_alg
;
273 struct alg_spec journal_mac_alg
;
275 atomic64_t number_of_mismatches
;
277 struct notifier_block reboot_notifier
;
280 struct dm_integrity_range
{
281 sector_t logical_sector
;
287 struct task_struct
*task
;
288 struct list_head wait_entry
;
293 struct dm_integrity_io
{
294 struct work_struct work
;
296 struct dm_integrity_c
*ic
;
300 struct dm_integrity_range range
;
302 sector_t metadata_block
;
303 unsigned metadata_offset
;
306 blk_status_t bi_status
;
308 struct completion
*completion
;
310 struct dm_bio_details bio_details
;
313 struct journal_completion
{
314 struct dm_integrity_c
*ic
;
316 struct completion comp
;
320 struct dm_integrity_range range
;
321 struct journal_completion
*comp
;
324 struct bitmap_block_status
{
325 struct work_struct work
;
326 struct dm_integrity_c
*ic
;
328 unsigned long *bitmap
;
329 struct bio_list bio_queue
;
330 spinlock_t bio_queue_lock
;
334 static struct kmem_cache
*journal_io_cache
;
336 #define JOURNAL_IO_MEMPOOL 32
339 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
340 static void __DEBUG_bytes(__u8
*bytes
, size_t len
, const char *msg
, ...)
349 pr_cont(" %02x", *bytes
);
355 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
357 #define DEBUG_print(x, ...) do { } while (0)
358 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
361 static void dm_integrity_prepare(struct request
*rq
)
365 static void dm_integrity_complete(struct request
*rq
, unsigned int nr_bytes
)
370 * DM Integrity profile, protection is performed layer above (dm-crypt)
372 static const struct blk_integrity_profile dm_integrity_profile
= {
373 .name
= "DM-DIF-EXT-TAG",
376 .prepare_fn
= dm_integrity_prepare
,
377 .complete_fn
= dm_integrity_complete
,
380 static void dm_integrity_map_continue(struct dm_integrity_io
*dio
, bool from_map
);
381 static void integrity_bio_wait(struct work_struct
*w
);
382 static void dm_integrity_dtr(struct dm_target
*ti
);
384 static void dm_integrity_io_error(struct dm_integrity_c
*ic
, const char *msg
, int err
)
387 atomic64_inc(&ic
->number_of_mismatches
);
388 if (!cmpxchg(&ic
->failed
, 0, err
))
389 DMERR("Error on %s: %d", msg
, err
);
392 static int dm_integrity_failed(struct dm_integrity_c
*ic
)
394 return READ_ONCE(ic
->failed
);
397 static bool dm_integrity_disable_recalculate(struct dm_integrity_c
*ic
)
399 if (ic
->legacy_recalculate
)
401 if (!(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) ?
402 ic
->internal_hash_alg
.key
|| ic
->journal_mac_alg
.key
:
403 ic
->internal_hash_alg
.key
&& !ic
->journal_mac_alg
.key
)
408 static commit_id_t
dm_integrity_commit_id(struct dm_integrity_c
*ic
, unsigned i
,
409 unsigned j
, unsigned char seq
)
412 * Xor the number with section and sector, so that if a piece of
413 * journal is written at wrong place, it is detected.
415 return ic
->commit_ids
[seq
] ^ cpu_to_le64(((__u64
)i
<< 32) ^ j
);
418 static void get_area_and_offset(struct dm_integrity_c
*ic
, sector_t data_sector
,
419 sector_t
*area
, sector_t
*offset
)
422 __u8 log2_interleave_sectors
= ic
->sb
->log2_interleave_sectors
;
423 *area
= data_sector
>> log2_interleave_sectors
;
424 *offset
= (unsigned)data_sector
& ((1U << log2_interleave_sectors
) - 1);
427 *offset
= data_sector
;
431 #define sector_to_block(ic, n) \
433 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
434 (n) >>= (ic)->sb->log2_sectors_per_block; \
437 static __u64
get_metadata_sector_and_offset(struct dm_integrity_c
*ic
, sector_t area
,
438 sector_t offset
, unsigned *metadata_offset
)
443 ms
= area
<< ic
->sb
->log2_interleave_sectors
;
444 if (likely(ic
->log2_metadata_run
>= 0))
445 ms
+= area
<< ic
->log2_metadata_run
;
447 ms
+= area
* ic
->metadata_run
;
448 ms
>>= ic
->log2_buffer_sectors
;
450 sector_to_block(ic
, offset
);
452 if (likely(ic
->log2_tag_size
>= 0)) {
453 ms
+= offset
>> (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
- ic
->log2_tag_size
);
454 mo
= (offset
<< ic
->log2_tag_size
) & ((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - 1);
456 ms
+= (__u64
)offset
* ic
->tag_size
>> (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
);
457 mo
= (offset
* ic
->tag_size
) & ((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - 1);
459 *metadata_offset
= mo
;
463 static sector_t
get_data_sector(struct dm_integrity_c
*ic
, sector_t area
, sector_t offset
)
470 result
= area
<< ic
->sb
->log2_interleave_sectors
;
471 if (likely(ic
->log2_metadata_run
>= 0))
472 result
+= (area
+ 1) << ic
->log2_metadata_run
;
474 result
+= (area
+ 1) * ic
->metadata_run
;
476 result
+= (sector_t
)ic
->initial_sectors
+ offset
;
482 static void wraparound_section(struct dm_integrity_c
*ic
, unsigned *sec_ptr
)
484 if (unlikely(*sec_ptr
>= ic
->journal_sections
))
485 *sec_ptr
-= ic
->journal_sections
;
488 static void sb_set_version(struct dm_integrity_c
*ic
)
490 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
))
491 ic
->sb
->version
= SB_VERSION_5
;
492 else if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_PADDING
))
493 ic
->sb
->version
= SB_VERSION_4
;
494 else if (ic
->mode
== 'B' || ic
->sb
->flags
& cpu_to_le32(SB_FLAG_DIRTY_BITMAP
))
495 ic
->sb
->version
= SB_VERSION_3
;
496 else if (ic
->meta_dev
|| ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))
497 ic
->sb
->version
= SB_VERSION_2
;
499 ic
->sb
->version
= SB_VERSION_1
;
502 static int sb_mac(struct dm_integrity_c
*ic
, bool wr
)
504 SHASH_DESC_ON_STACK(desc
, ic
->journal_mac
);
506 unsigned size
= crypto_shash_digestsize(ic
->journal_mac
);
508 if (sizeof(struct superblock
) + size
> 1 << SECTOR_SHIFT
) {
509 dm_integrity_io_error(ic
, "digest is too long", -EINVAL
);
513 desc
->tfm
= ic
->journal_mac
;
515 r
= crypto_shash_init(desc
);
516 if (unlikely(r
< 0)) {
517 dm_integrity_io_error(ic
, "crypto_shash_init", r
);
521 r
= crypto_shash_update(desc
, (__u8
*)ic
->sb
, (1 << SECTOR_SHIFT
) - size
);
522 if (unlikely(r
< 0)) {
523 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
528 r
= crypto_shash_final(desc
, (__u8
*)ic
->sb
+ (1 << SECTOR_SHIFT
) - size
);
529 if (unlikely(r
< 0)) {
530 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
534 __u8 result
[HASH_MAX_DIGESTSIZE
];
535 r
= crypto_shash_final(desc
, result
);
536 if (unlikely(r
< 0)) {
537 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
540 if (memcmp((__u8
*)ic
->sb
+ (1 << SECTOR_SHIFT
) - size
, result
, size
)) {
541 dm_integrity_io_error(ic
, "superblock mac", -EILSEQ
);
549 static int sync_rw_sb(struct dm_integrity_c
*ic
, int op
, int op_flags
)
551 struct dm_io_request io_req
;
552 struct dm_io_region io_loc
;
556 io_req
.bi_op_flags
= op_flags
;
557 io_req
.mem
.type
= DM_IO_KMEM
;
558 io_req
.mem
.ptr
.addr
= ic
->sb
;
559 io_req
.notify
.fn
= NULL
;
560 io_req
.client
= ic
->io
;
561 io_loc
.bdev
= ic
->meta_dev
? ic
->meta_dev
->bdev
: ic
->dev
->bdev
;
562 io_loc
.sector
= ic
->start
;
563 io_loc
.count
= SB_SECTORS
;
565 if (op
== REQ_OP_WRITE
) {
567 if (ic
->journal_mac
&& ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) {
568 r
= sb_mac(ic
, true);
574 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
578 if (op
== REQ_OP_READ
) {
579 if (ic
->mode
!= 'R' && ic
->journal_mac
&& ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) {
580 r
= sb_mac(ic
, false);
589 #define BITMAP_OP_TEST_ALL_SET 0
590 #define BITMAP_OP_TEST_ALL_CLEAR 1
591 #define BITMAP_OP_SET 2
592 #define BITMAP_OP_CLEAR 3
594 static bool block_bitmap_op(struct dm_integrity_c
*ic
, struct page_list
*bitmap
,
595 sector_t sector
, sector_t n_sectors
, int mode
)
597 unsigned long bit
, end_bit
, this_end_bit
, page
, end_page
;
600 if (unlikely(((sector
| n_sectors
) & ((1 << ic
->sb
->log2_sectors_per_block
) - 1)) != 0)) {
601 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
604 ic
->sb
->log2_sectors_per_block
,
605 ic
->log2_blocks_per_bitmap_bit
,
610 if (unlikely(!n_sectors
))
613 bit
= sector
>> (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
614 end_bit
= (sector
+ n_sectors
- 1) >>
615 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
617 page
= bit
/ (PAGE_SIZE
* 8);
618 bit
%= PAGE_SIZE
* 8;
620 end_page
= end_bit
/ (PAGE_SIZE
* 8);
621 end_bit
%= PAGE_SIZE
* 8;
624 if (page
< end_page
) {
625 this_end_bit
= PAGE_SIZE
* 8 - 1;
627 this_end_bit
= end_bit
;
630 data
= lowmem_page_address(bitmap
[page
].page
);
632 if (mode
== BITMAP_OP_TEST_ALL_SET
) {
633 while (bit
<= this_end_bit
) {
634 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
636 if (data
[bit
/ BITS_PER_LONG
] != -1)
638 bit
+= BITS_PER_LONG
;
639 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
642 if (!test_bit(bit
, data
))
646 } else if (mode
== BITMAP_OP_TEST_ALL_CLEAR
) {
647 while (bit
<= this_end_bit
) {
648 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
650 if (data
[bit
/ BITS_PER_LONG
] != 0)
652 bit
+= BITS_PER_LONG
;
653 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
656 if (test_bit(bit
, data
))
660 } else if (mode
== BITMAP_OP_SET
) {
661 while (bit
<= this_end_bit
) {
662 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
664 data
[bit
/ BITS_PER_LONG
] = -1;
665 bit
+= BITS_PER_LONG
;
666 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
669 __set_bit(bit
, data
);
672 } else if (mode
== BITMAP_OP_CLEAR
) {
673 if (!bit
&& this_end_bit
== PAGE_SIZE
* 8 - 1)
675 else while (bit
<= this_end_bit
) {
676 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
678 data
[bit
/ BITS_PER_LONG
] = 0;
679 bit
+= BITS_PER_LONG
;
680 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
683 __clear_bit(bit
, data
);
690 if (unlikely(page
< end_page
)) {
699 static void block_bitmap_copy(struct dm_integrity_c
*ic
, struct page_list
*dst
, struct page_list
*src
)
701 unsigned n_bitmap_pages
= DIV_ROUND_UP(ic
->n_bitmap_blocks
, PAGE_SIZE
/ BITMAP_BLOCK_SIZE
);
704 for (i
= 0; i
< n_bitmap_pages
; i
++) {
705 unsigned long *dst_data
= lowmem_page_address(dst
[i
].page
);
706 unsigned long *src_data
= lowmem_page_address(src
[i
].page
);
707 copy_page(dst_data
, src_data
);
711 static struct bitmap_block_status
*sector_to_bitmap_block(struct dm_integrity_c
*ic
, sector_t sector
)
713 unsigned bit
= sector
>> (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
714 unsigned bitmap_block
= bit
/ (BITMAP_BLOCK_SIZE
* 8);
716 BUG_ON(bitmap_block
>= ic
->n_bitmap_blocks
);
717 return &ic
->bbs
[bitmap_block
];
720 static void access_journal_check(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
721 bool e
, const char *function
)
723 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
724 unsigned limit
= e
? ic
->journal_section_entries
: ic
->journal_section_sectors
;
726 if (unlikely(section
>= ic
->journal_sections
) ||
727 unlikely(offset
>= limit
)) {
728 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
729 function
, section
, offset
, ic
->journal_sections
, limit
);
735 static void page_list_location(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
736 unsigned *pl_index
, unsigned *pl_offset
)
740 access_journal_check(ic
, section
, offset
, false, "page_list_location");
742 sector
= section
* ic
->journal_section_sectors
+ offset
;
744 *pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
745 *pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
748 static struct journal_sector
*access_page_list(struct dm_integrity_c
*ic
, struct page_list
*pl
,
749 unsigned section
, unsigned offset
, unsigned *n_sectors
)
751 unsigned pl_index
, pl_offset
;
754 page_list_location(ic
, section
, offset
, &pl_index
, &pl_offset
);
757 *n_sectors
= (PAGE_SIZE
- pl_offset
) >> SECTOR_SHIFT
;
759 va
= lowmem_page_address(pl
[pl_index
].page
);
761 return (struct journal_sector
*)(va
+ pl_offset
);
764 static struct journal_sector
*access_journal(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
)
766 return access_page_list(ic
, ic
->journal
, section
, offset
, NULL
);
769 static struct journal_entry
*access_journal_entry(struct dm_integrity_c
*ic
, unsigned section
, unsigned n
)
771 unsigned rel_sector
, offset
;
772 struct journal_sector
*js
;
774 access_journal_check(ic
, section
, n
, true, "access_journal_entry");
776 rel_sector
= n
% JOURNAL_BLOCK_SECTORS
;
777 offset
= n
/ JOURNAL_BLOCK_SECTORS
;
779 js
= access_journal(ic
, section
, rel_sector
);
780 return (struct journal_entry
*)((char *)js
+ offset
* ic
->journal_entry_size
);
783 static struct journal_sector
*access_journal_data(struct dm_integrity_c
*ic
, unsigned section
, unsigned n
)
785 n
<<= ic
->sb
->log2_sectors_per_block
;
787 n
+= JOURNAL_BLOCK_SECTORS
;
789 access_journal_check(ic
, section
, n
, false, "access_journal_data");
791 return access_journal(ic
, section
, n
);
794 static void section_mac(struct dm_integrity_c
*ic
, unsigned section
, __u8 result
[JOURNAL_MAC_SIZE
])
796 SHASH_DESC_ON_STACK(desc
, ic
->journal_mac
);
800 desc
->tfm
= ic
->journal_mac
;
802 r
= crypto_shash_init(desc
);
803 if (unlikely(r
< 0)) {
804 dm_integrity_io_error(ic
, "crypto_shash_init", r
);
808 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) {
811 r
= crypto_shash_update(desc
, (__u8
*)&ic
->sb
->salt
, SALT_SIZE
);
812 if (unlikely(r
< 0)) {
813 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
817 section_le
= cpu_to_le64(section
);
818 r
= crypto_shash_update(desc
, (__u8
*)§ion_le
, sizeof section_le
);
819 if (unlikely(r
< 0)) {
820 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
825 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
826 struct journal_entry
*je
= access_journal_entry(ic
, section
, j
);
827 r
= crypto_shash_update(desc
, (__u8
*)&je
->u
.sector
, sizeof je
->u
.sector
);
828 if (unlikely(r
< 0)) {
829 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
834 size
= crypto_shash_digestsize(ic
->journal_mac
);
836 if (likely(size
<= JOURNAL_MAC_SIZE
)) {
837 r
= crypto_shash_final(desc
, result
);
838 if (unlikely(r
< 0)) {
839 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
842 memset(result
+ size
, 0, JOURNAL_MAC_SIZE
- size
);
844 __u8 digest
[HASH_MAX_DIGESTSIZE
];
846 if (WARN_ON(size
> sizeof(digest
))) {
847 dm_integrity_io_error(ic
, "digest_size", -EINVAL
);
850 r
= crypto_shash_final(desc
, digest
);
851 if (unlikely(r
< 0)) {
852 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
855 memcpy(result
, digest
, JOURNAL_MAC_SIZE
);
860 memset(result
, 0, JOURNAL_MAC_SIZE
);
863 static void rw_section_mac(struct dm_integrity_c
*ic
, unsigned section
, bool wr
)
865 __u8 result
[JOURNAL_MAC_SIZE
];
868 if (!ic
->journal_mac
)
871 section_mac(ic
, section
, result
);
873 for (j
= 0; j
< JOURNAL_BLOCK_SECTORS
; j
++) {
874 struct journal_sector
*js
= access_journal(ic
, section
, j
);
877 memcpy(&js
->mac
, result
+ (j
* JOURNAL_MAC_PER_SECTOR
), JOURNAL_MAC_PER_SECTOR
);
879 if (memcmp(&js
->mac
, result
+ (j
* JOURNAL_MAC_PER_SECTOR
), JOURNAL_MAC_PER_SECTOR
))
880 dm_integrity_io_error(ic
, "journal mac", -EILSEQ
);
885 static void complete_journal_op(void *context
)
887 struct journal_completion
*comp
= context
;
888 BUG_ON(!atomic_read(&comp
->in_flight
));
889 if (likely(atomic_dec_and_test(&comp
->in_flight
)))
890 complete(&comp
->comp
);
893 static void xor_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
894 unsigned n_sections
, struct journal_completion
*comp
)
896 struct async_submit_ctl submit
;
897 size_t n_bytes
= (size_t)(n_sections
* ic
->journal_section_sectors
) << SECTOR_SHIFT
;
898 unsigned pl_index
, pl_offset
, section_index
;
899 struct page_list
*source_pl
, *target_pl
;
901 if (likely(encrypt
)) {
902 source_pl
= ic
->journal
;
903 target_pl
= ic
->journal_io
;
905 source_pl
= ic
->journal_io
;
906 target_pl
= ic
->journal
;
909 page_list_location(ic
, section
, 0, &pl_index
, &pl_offset
);
911 atomic_add(roundup(pl_offset
+ n_bytes
, PAGE_SIZE
) >> PAGE_SHIFT
, &comp
->in_flight
);
913 init_async_submit(&submit
, ASYNC_TX_XOR_ZERO_DST
, NULL
, complete_journal_op
, comp
, NULL
);
915 section_index
= pl_index
;
919 struct page
*src_pages
[2];
920 struct page
*dst_page
;
922 while (unlikely(pl_index
== section_index
)) {
925 rw_section_mac(ic
, section
, true);
930 page_list_location(ic
, section
, 0, §ion_index
, &dummy
);
933 this_step
= min(n_bytes
, (size_t)PAGE_SIZE
- pl_offset
);
934 dst_page
= target_pl
[pl_index
].page
;
935 src_pages
[0] = source_pl
[pl_index
].page
;
936 src_pages
[1] = ic
->journal_xor
[pl_index
].page
;
938 async_xor(dst_page
, src_pages
, pl_offset
, 2, this_step
, &submit
);
942 n_bytes
-= this_step
;
947 async_tx_issue_pending_all();
950 static void complete_journal_encrypt(struct crypto_async_request
*req
, int err
)
952 struct journal_completion
*comp
= req
->data
;
954 if (likely(err
== -EINPROGRESS
)) {
955 complete(&comp
->ic
->crypto_backoff
);
958 dm_integrity_io_error(comp
->ic
, "asynchronous encrypt", err
);
960 complete_journal_op(comp
);
963 static bool do_crypt(bool encrypt
, struct skcipher_request
*req
, struct journal_completion
*comp
)
966 skcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
967 complete_journal_encrypt
, comp
);
969 r
= crypto_skcipher_encrypt(req
);
971 r
= crypto_skcipher_decrypt(req
);
974 if (likely(r
== -EINPROGRESS
))
976 if (likely(r
== -EBUSY
)) {
977 wait_for_completion(&comp
->ic
->crypto_backoff
);
978 reinit_completion(&comp
->ic
->crypto_backoff
);
981 dm_integrity_io_error(comp
->ic
, "encrypt", r
);
985 static void crypt_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
986 unsigned n_sections
, struct journal_completion
*comp
)
988 struct scatterlist
**source_sg
;
989 struct scatterlist
**target_sg
;
991 atomic_add(2, &comp
->in_flight
);
993 if (likely(encrypt
)) {
994 source_sg
= ic
->journal_scatterlist
;
995 target_sg
= ic
->journal_io_scatterlist
;
997 source_sg
= ic
->journal_io_scatterlist
;
998 target_sg
= ic
->journal_scatterlist
;
1002 struct skcipher_request
*req
;
1006 if (likely(encrypt
))
1007 rw_section_mac(ic
, section
, true);
1009 req
= ic
->sk_requests
[section
];
1010 ivsize
= crypto_skcipher_ivsize(ic
->journal_crypt
);
1013 memcpy(iv
, iv
+ ivsize
, ivsize
);
1015 req
->src
= source_sg
[section
];
1016 req
->dst
= target_sg
[section
];
1018 if (unlikely(do_crypt(encrypt
, req
, comp
)))
1019 atomic_inc(&comp
->in_flight
);
1023 } while (n_sections
);
1025 atomic_dec(&comp
->in_flight
);
1026 complete_journal_op(comp
);
1029 static void encrypt_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
1030 unsigned n_sections
, struct journal_completion
*comp
)
1032 if (ic
->journal_xor
)
1033 return xor_journal(ic
, encrypt
, section
, n_sections
, comp
);
1035 return crypt_journal(ic
, encrypt
, section
, n_sections
, comp
);
1038 static void complete_journal_io(unsigned long error
, void *context
)
1040 struct journal_completion
*comp
= context
;
1041 if (unlikely(error
!= 0))
1042 dm_integrity_io_error(comp
->ic
, "writing journal", -EIO
);
1043 complete_journal_op(comp
);
1046 static void rw_journal_sectors(struct dm_integrity_c
*ic
, int op
, int op_flags
,
1047 unsigned sector
, unsigned n_sectors
, struct journal_completion
*comp
)
1049 struct dm_io_request io_req
;
1050 struct dm_io_region io_loc
;
1051 unsigned pl_index
, pl_offset
;
1054 if (unlikely(dm_integrity_failed(ic
))) {
1056 complete_journal_io(-1UL, comp
);
1060 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
1061 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
1064 io_req
.bi_op_flags
= op_flags
;
1065 io_req
.mem
.type
= DM_IO_PAGE_LIST
;
1067 io_req
.mem
.ptr
.pl
= &ic
->journal_io
[pl_index
];
1069 io_req
.mem
.ptr
.pl
= &ic
->journal
[pl_index
];
1070 io_req
.mem
.offset
= pl_offset
;
1071 if (likely(comp
!= NULL
)) {
1072 io_req
.notify
.fn
= complete_journal_io
;
1073 io_req
.notify
.context
= comp
;
1075 io_req
.notify
.fn
= NULL
;
1077 io_req
.client
= ic
->io
;
1078 io_loc
.bdev
= ic
->meta_dev
? ic
->meta_dev
->bdev
: ic
->dev
->bdev
;
1079 io_loc
.sector
= ic
->start
+ SB_SECTORS
+ sector
;
1080 io_loc
.count
= n_sectors
;
1082 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
1084 dm_integrity_io_error(ic
, op
== REQ_OP_READ
? "reading journal" : "writing journal", r
);
1086 WARN_ONCE(1, "asynchronous dm_io failed: %d", r
);
1087 complete_journal_io(-1UL, comp
);
1092 static void rw_journal(struct dm_integrity_c
*ic
, int op
, int op_flags
, unsigned section
,
1093 unsigned n_sections
, struct journal_completion
*comp
)
1095 unsigned sector
, n_sectors
;
1097 sector
= section
* ic
->journal_section_sectors
;
1098 n_sectors
= n_sections
* ic
->journal_section_sectors
;
1100 rw_journal_sectors(ic
, op
, op_flags
, sector
, n_sectors
, comp
);
1103 static void write_journal(struct dm_integrity_c
*ic
, unsigned commit_start
, unsigned commit_sections
)
1105 struct journal_completion io_comp
;
1106 struct journal_completion crypt_comp_1
;
1107 struct journal_completion crypt_comp_2
;
1111 init_completion(&io_comp
.comp
);
1113 if (commit_start
+ commit_sections
<= ic
->journal_sections
) {
1114 io_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
1115 if (ic
->journal_io
) {
1116 crypt_comp_1
.ic
= ic
;
1117 init_completion(&crypt_comp_1
.comp
);
1118 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1119 encrypt_journal(ic
, true, commit_start
, commit_sections
, &crypt_comp_1
);
1120 wait_for_completion_io(&crypt_comp_1
.comp
);
1122 for (i
= 0; i
< commit_sections
; i
++)
1123 rw_section_mac(ic
, commit_start
+ i
, true);
1125 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, commit_start
,
1126 commit_sections
, &io_comp
);
1129 io_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(2);
1130 to_end
= ic
->journal_sections
- commit_start
;
1131 if (ic
->journal_io
) {
1132 crypt_comp_1
.ic
= ic
;
1133 init_completion(&crypt_comp_1
.comp
);
1134 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1135 encrypt_journal(ic
, true, commit_start
, to_end
, &crypt_comp_1
);
1136 if (try_wait_for_completion(&crypt_comp_1
.comp
)) {
1137 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
1138 reinit_completion(&crypt_comp_1
.comp
);
1139 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1140 encrypt_journal(ic
, true, 0, commit_sections
- to_end
, &crypt_comp_1
);
1141 wait_for_completion_io(&crypt_comp_1
.comp
);
1143 crypt_comp_2
.ic
= ic
;
1144 init_completion(&crypt_comp_2
.comp
);
1145 crypt_comp_2
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1146 encrypt_journal(ic
, true, 0, commit_sections
- to_end
, &crypt_comp_2
);
1147 wait_for_completion_io(&crypt_comp_1
.comp
);
1148 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
1149 wait_for_completion_io(&crypt_comp_2
.comp
);
1152 for (i
= 0; i
< to_end
; i
++)
1153 rw_section_mac(ic
, commit_start
+ i
, true);
1154 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
1155 for (i
= 0; i
< commit_sections
- to_end
; i
++)
1156 rw_section_mac(ic
, i
, true);
1158 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, 0, commit_sections
- to_end
, &io_comp
);
1161 wait_for_completion_io(&io_comp
.comp
);
1164 static void copy_from_journal(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
1165 unsigned n_sectors
, sector_t target
, io_notify_fn fn
, void *data
)
1167 struct dm_io_request io_req
;
1168 struct dm_io_region io_loc
;
1170 unsigned sector
, pl_index
, pl_offset
;
1172 BUG_ON((target
| n_sectors
| offset
) & (unsigned)(ic
->sectors_per_block
- 1));
1174 if (unlikely(dm_integrity_failed(ic
))) {
1179 sector
= section
* ic
->journal_section_sectors
+ JOURNAL_BLOCK_SECTORS
+ offset
;
1181 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
1182 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
1184 io_req
.bi_op
= REQ_OP_WRITE
;
1185 io_req
.bi_op_flags
= 0;
1186 io_req
.mem
.type
= DM_IO_PAGE_LIST
;
1187 io_req
.mem
.ptr
.pl
= &ic
->journal
[pl_index
];
1188 io_req
.mem
.offset
= pl_offset
;
1189 io_req
.notify
.fn
= fn
;
1190 io_req
.notify
.context
= data
;
1191 io_req
.client
= ic
->io
;
1192 io_loc
.bdev
= ic
->dev
->bdev
;
1193 io_loc
.sector
= target
;
1194 io_loc
.count
= n_sectors
;
1196 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
1198 WARN_ONCE(1, "asynchronous dm_io failed: %d", r
);
1203 static bool ranges_overlap(struct dm_integrity_range
*range1
, struct dm_integrity_range
*range2
)
1205 return range1
->logical_sector
< range2
->logical_sector
+ range2
->n_sectors
&&
1206 range1
->logical_sector
+ range1
->n_sectors
> range2
->logical_sector
;
1209 static bool add_new_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*new_range
, bool check_waiting
)
1211 struct rb_node
**n
= &ic
->in_progress
.rb_node
;
1212 struct rb_node
*parent
;
1214 BUG_ON((new_range
->logical_sector
| new_range
->n_sectors
) & (unsigned)(ic
->sectors_per_block
- 1));
1216 if (likely(check_waiting
)) {
1217 struct dm_integrity_range
*range
;
1218 list_for_each_entry(range
, &ic
->wait_list
, wait_entry
) {
1219 if (unlikely(ranges_overlap(range
, new_range
)))
1227 struct dm_integrity_range
*range
= container_of(*n
, struct dm_integrity_range
, node
);
1230 if (new_range
->logical_sector
+ new_range
->n_sectors
<= range
->logical_sector
) {
1231 n
= &range
->node
.rb_left
;
1232 } else if (new_range
->logical_sector
>= range
->logical_sector
+ range
->n_sectors
) {
1233 n
= &range
->node
.rb_right
;
1239 rb_link_node(&new_range
->node
, parent
, n
);
1240 rb_insert_color(&new_range
->node
, &ic
->in_progress
);
1245 static void remove_range_unlocked(struct dm_integrity_c
*ic
, struct dm_integrity_range
*range
)
1247 rb_erase(&range
->node
, &ic
->in_progress
);
1248 while (unlikely(!list_empty(&ic
->wait_list
))) {
1249 struct dm_integrity_range
*last_range
=
1250 list_first_entry(&ic
->wait_list
, struct dm_integrity_range
, wait_entry
);
1251 struct task_struct
*last_range_task
;
1252 last_range_task
= last_range
->task
;
1253 list_del(&last_range
->wait_entry
);
1254 if (!add_new_range(ic
, last_range
, false)) {
1255 last_range
->task
= last_range_task
;
1256 list_add(&last_range
->wait_entry
, &ic
->wait_list
);
1259 last_range
->waiting
= false;
1260 wake_up_process(last_range_task
);
1264 static void remove_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*range
)
1266 unsigned long flags
;
1268 spin_lock_irqsave(&ic
->endio_wait
.lock
, flags
);
1269 remove_range_unlocked(ic
, range
);
1270 spin_unlock_irqrestore(&ic
->endio_wait
.lock
, flags
);
1273 static void wait_and_add_new_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*new_range
)
1275 new_range
->waiting
= true;
1276 list_add_tail(&new_range
->wait_entry
, &ic
->wait_list
);
1277 new_range
->task
= current
;
1279 __set_current_state(TASK_UNINTERRUPTIBLE
);
1280 spin_unlock_irq(&ic
->endio_wait
.lock
);
1282 spin_lock_irq(&ic
->endio_wait
.lock
);
1283 } while (unlikely(new_range
->waiting
));
1286 static void add_new_range_and_wait(struct dm_integrity_c
*ic
, struct dm_integrity_range
*new_range
)
1288 if (unlikely(!add_new_range(ic
, new_range
, true)))
1289 wait_and_add_new_range(ic
, new_range
);
1292 static void init_journal_node(struct journal_node
*node
)
1294 RB_CLEAR_NODE(&node
->node
);
1295 node
->sector
= (sector_t
)-1;
1298 static void add_journal_node(struct dm_integrity_c
*ic
, struct journal_node
*node
, sector_t sector
)
1300 struct rb_node
**link
;
1301 struct rb_node
*parent
;
1303 node
->sector
= sector
;
1304 BUG_ON(!RB_EMPTY_NODE(&node
->node
));
1306 link
= &ic
->journal_tree_root
.rb_node
;
1310 struct journal_node
*j
;
1312 j
= container_of(parent
, struct journal_node
, node
);
1313 if (sector
< j
->sector
)
1314 link
= &j
->node
.rb_left
;
1316 link
= &j
->node
.rb_right
;
1319 rb_link_node(&node
->node
, parent
, link
);
1320 rb_insert_color(&node
->node
, &ic
->journal_tree_root
);
1323 static void remove_journal_node(struct dm_integrity_c
*ic
, struct journal_node
*node
)
1325 BUG_ON(RB_EMPTY_NODE(&node
->node
));
1326 rb_erase(&node
->node
, &ic
->journal_tree_root
);
1327 init_journal_node(node
);
1330 #define NOT_FOUND (-1U)
1332 static unsigned find_journal_node(struct dm_integrity_c
*ic
, sector_t sector
, sector_t
*next_sector
)
1334 struct rb_node
*n
= ic
->journal_tree_root
.rb_node
;
1335 unsigned found
= NOT_FOUND
;
1336 *next_sector
= (sector_t
)-1;
1338 struct journal_node
*j
= container_of(n
, struct journal_node
, node
);
1339 if (sector
== j
->sector
) {
1340 found
= j
- ic
->journal_tree
;
1342 if (sector
< j
->sector
) {
1343 *next_sector
= j
->sector
;
1344 n
= j
->node
.rb_left
;
1346 n
= j
->node
.rb_right
;
1353 static bool test_journal_node(struct dm_integrity_c
*ic
, unsigned pos
, sector_t sector
)
1355 struct journal_node
*node
, *next_node
;
1356 struct rb_node
*next
;
1358 if (unlikely(pos
>= ic
->journal_entries
))
1360 node
= &ic
->journal_tree
[pos
];
1361 if (unlikely(RB_EMPTY_NODE(&node
->node
)))
1363 if (unlikely(node
->sector
!= sector
))
1366 next
= rb_next(&node
->node
);
1367 if (unlikely(!next
))
1370 next_node
= container_of(next
, struct journal_node
, node
);
1371 return next_node
->sector
!= sector
;
1374 static bool find_newer_committed_node(struct dm_integrity_c
*ic
, struct journal_node
*node
)
1376 struct rb_node
*next
;
1377 struct journal_node
*next_node
;
1378 unsigned next_section
;
1380 BUG_ON(RB_EMPTY_NODE(&node
->node
));
1382 next
= rb_next(&node
->node
);
1383 if (unlikely(!next
))
1386 next_node
= container_of(next
, struct journal_node
, node
);
1388 if (next_node
->sector
!= node
->sector
)
1391 next_section
= (unsigned)(next_node
- ic
->journal_tree
) / ic
->journal_section_entries
;
1392 if (next_section
>= ic
->committed_section
&&
1393 next_section
< ic
->committed_section
+ ic
->n_committed_sections
)
1395 if (next_section
+ ic
->journal_sections
< ic
->committed_section
+ ic
->n_committed_sections
)
1405 static int dm_integrity_rw_tag(struct dm_integrity_c
*ic
, unsigned char *tag
, sector_t
*metadata_block
,
1406 unsigned *metadata_offset
, unsigned total_size
, int op
)
1408 #define MAY_BE_FILLER 1
1409 #define MAY_BE_HASH 2
1410 unsigned hash_offset
= 0;
1411 unsigned may_be
= MAY_BE_HASH
| (ic
->discard
? MAY_BE_FILLER
: 0);
1414 unsigned char *data
, *dp
;
1415 struct dm_buffer
*b
;
1419 r
= dm_integrity_failed(ic
);
1423 data
= dm_bufio_read(ic
->bufio
, *metadata_block
, &b
);
1425 return PTR_ERR(data
);
1427 to_copy
= min((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - *metadata_offset
, total_size
);
1428 dp
= data
+ *metadata_offset
;
1429 if (op
== TAG_READ
) {
1430 memcpy(tag
, dp
, to_copy
);
1431 } else if (op
== TAG_WRITE
) {
1432 if (memcmp(dp
, tag
, to_copy
)) {
1433 memcpy(dp
, tag
, to_copy
);
1434 dm_bufio_mark_partial_buffer_dirty(b
, *metadata_offset
, *metadata_offset
+ to_copy
);
1437 /* e.g.: op == TAG_CMP */
1439 if (likely(is_power_of_2(ic
->tag_size
))) {
1440 if (unlikely(memcmp(dp
, tag
, to_copy
)))
1441 if (unlikely(!ic
->discard
) ||
1442 unlikely(memchr_inv(dp
, DISCARD_FILLER
, to_copy
) != NULL
)) {
1450 for (i
= 0; i
< to_copy
; i
++, ts
--) {
1451 if (unlikely(dp
[i
] != tag
[i
]))
1452 may_be
&= ~MAY_BE_HASH
;
1453 if (likely(dp
[i
] != DISCARD_FILLER
))
1454 may_be
&= ~MAY_BE_FILLER
;
1456 if (unlikely(hash_offset
== ic
->tag_size
)) {
1457 if (unlikely(!may_be
)) {
1458 dm_bufio_release(b
);
1462 may_be
= MAY_BE_HASH
| (ic
->discard
? MAY_BE_FILLER
: 0);
1467 dm_bufio_release(b
);
1470 *metadata_offset
+= to_copy
;
1471 if (unlikely(*metadata_offset
== 1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
)) {
1472 (*metadata_block
)++;
1473 *metadata_offset
= 0;
1476 if (unlikely(!is_power_of_2(ic
->tag_size
))) {
1477 hash_offset
= (hash_offset
+ to_copy
) % ic
->tag_size
;
1480 total_size
-= to_copy
;
1481 } while (unlikely(total_size
));
1484 #undef MAY_BE_FILLER
1488 struct flush_request
{
1489 struct dm_io_request io_req
;
1490 struct dm_io_region io_reg
;
1491 struct dm_integrity_c
*ic
;
1492 struct completion comp
;
1495 static void flush_notify(unsigned long error
, void *fr_
)
1497 struct flush_request
*fr
= fr_
;
1498 if (unlikely(error
!= 0))
1499 dm_integrity_io_error(fr
->ic
, "flushing disk cache", -EIO
);
1500 complete(&fr
->comp
);
1503 static void dm_integrity_flush_buffers(struct dm_integrity_c
*ic
, bool flush_data
)
1507 struct flush_request fr
;
1512 fr
.io_req
.bi_op
= REQ_OP_WRITE
,
1513 fr
.io_req
.bi_op_flags
= REQ_PREFLUSH
| REQ_SYNC
,
1514 fr
.io_req
.mem
.type
= DM_IO_KMEM
,
1515 fr
.io_req
.mem
.ptr
.addr
= NULL
,
1516 fr
.io_req
.notify
.fn
= flush_notify
,
1517 fr
.io_req
.notify
.context
= &fr
;
1518 fr
.io_req
.client
= dm_bufio_get_dm_io_client(ic
->bufio
),
1519 fr
.io_reg
.bdev
= ic
->dev
->bdev
,
1520 fr
.io_reg
.sector
= 0,
1521 fr
.io_reg
.count
= 0,
1523 init_completion(&fr
.comp
);
1524 r
= dm_io(&fr
.io_req
, 1, &fr
.io_reg
, NULL
);
1528 r
= dm_bufio_write_dirty_buffers(ic
->bufio
);
1530 dm_integrity_io_error(ic
, "writing tags", r
);
1533 wait_for_completion(&fr
.comp
);
1536 static void sleep_on_endio_wait(struct dm_integrity_c
*ic
)
1538 DECLARE_WAITQUEUE(wait
, current
);
1539 __add_wait_queue(&ic
->endio_wait
, &wait
);
1540 __set_current_state(TASK_UNINTERRUPTIBLE
);
1541 spin_unlock_irq(&ic
->endio_wait
.lock
);
1543 spin_lock_irq(&ic
->endio_wait
.lock
);
1544 __remove_wait_queue(&ic
->endio_wait
, &wait
);
1547 static void autocommit_fn(struct timer_list
*t
)
1549 struct dm_integrity_c
*ic
= from_timer(ic
, t
, autocommit_timer
);
1551 if (likely(!dm_integrity_failed(ic
)))
1552 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1555 static void schedule_autocommit(struct dm_integrity_c
*ic
)
1557 if (!timer_pending(&ic
->autocommit_timer
))
1558 mod_timer(&ic
->autocommit_timer
, jiffies
+ ic
->autocommit_jiffies
);
1561 static void submit_flush_bio(struct dm_integrity_c
*ic
, struct dm_integrity_io
*dio
)
1564 unsigned long flags
;
1566 spin_lock_irqsave(&ic
->endio_wait
.lock
, flags
);
1567 bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1568 bio_list_add(&ic
->flush_bio_list
, bio
);
1569 spin_unlock_irqrestore(&ic
->endio_wait
.lock
, flags
);
1571 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1574 static void do_endio(struct dm_integrity_c
*ic
, struct bio
*bio
)
1576 int r
= dm_integrity_failed(ic
);
1577 if (unlikely(r
) && !bio
->bi_status
)
1578 bio
->bi_status
= errno_to_blk_status(r
);
1579 if (unlikely(ic
->synchronous_mode
) && bio_op(bio
) == REQ_OP_WRITE
) {
1580 unsigned long flags
;
1581 spin_lock_irqsave(&ic
->endio_wait
.lock
, flags
);
1582 bio_list_add(&ic
->synchronous_bios
, bio
);
1583 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, 0);
1584 spin_unlock_irqrestore(&ic
->endio_wait
.lock
, flags
);
1590 static void do_endio_flush(struct dm_integrity_c
*ic
, struct dm_integrity_io
*dio
)
1592 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1594 if (unlikely(dio
->fua
) && likely(!bio
->bi_status
) && likely(!dm_integrity_failed(ic
)))
1595 submit_flush_bio(ic
, dio
);
1600 static void dec_in_flight(struct dm_integrity_io
*dio
)
1602 if (atomic_dec_and_test(&dio
->in_flight
)) {
1603 struct dm_integrity_c
*ic
= dio
->ic
;
1606 remove_range(ic
, &dio
->range
);
1608 if (dio
->op
== REQ_OP_WRITE
|| unlikely(dio
->op
== REQ_OP_DISCARD
))
1609 schedule_autocommit(ic
);
1611 bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1613 if (unlikely(dio
->bi_status
) && !bio
->bi_status
)
1614 bio
->bi_status
= dio
->bi_status
;
1615 if (likely(!bio
->bi_status
) && unlikely(bio_sectors(bio
) != dio
->range
.n_sectors
)) {
1616 dio
->range
.logical_sector
+= dio
->range
.n_sectors
;
1617 bio_advance(bio
, dio
->range
.n_sectors
<< SECTOR_SHIFT
);
1618 INIT_WORK(&dio
->work
, integrity_bio_wait
);
1619 queue_work(ic
->offload_wq
, &dio
->work
);
1622 do_endio_flush(ic
, dio
);
1626 static void integrity_end_io(struct bio
*bio
)
1628 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
1630 dm_bio_restore(&dio
->bio_details
, bio
);
1631 if (bio
->bi_integrity
)
1632 bio
->bi_opf
|= REQ_INTEGRITY
;
1634 if (dio
->completion
)
1635 complete(dio
->completion
);
1640 static void integrity_sector_checksum(struct dm_integrity_c
*ic
, sector_t sector
,
1641 const char *data
, char *result
)
1643 __le64 sector_le
= cpu_to_le64(sector
);
1644 SHASH_DESC_ON_STACK(req
, ic
->internal_hash
);
1646 unsigned digest_size
;
1648 req
->tfm
= ic
->internal_hash
;
1650 r
= crypto_shash_init(req
);
1651 if (unlikely(r
< 0)) {
1652 dm_integrity_io_error(ic
, "crypto_shash_init", r
);
1656 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) {
1657 r
= crypto_shash_update(req
, (__u8
*)&ic
->sb
->salt
, SALT_SIZE
);
1658 if (unlikely(r
< 0)) {
1659 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
1664 r
= crypto_shash_update(req
, (const __u8
*)§or_le
, sizeof sector_le
);
1665 if (unlikely(r
< 0)) {
1666 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
1670 r
= crypto_shash_update(req
, data
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
1671 if (unlikely(r
< 0)) {
1672 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
1676 r
= crypto_shash_final(req
, result
);
1677 if (unlikely(r
< 0)) {
1678 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
1682 digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1683 if (unlikely(digest_size
< ic
->tag_size
))
1684 memset(result
+ digest_size
, 0, ic
->tag_size
- digest_size
);
1689 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1690 get_random_bytes(result
, ic
->tag_size
);
1693 static void integrity_metadata(struct work_struct
*w
)
1695 struct dm_integrity_io
*dio
= container_of(w
, struct dm_integrity_io
, work
);
1696 struct dm_integrity_c
*ic
= dio
->ic
;
1700 if (ic
->internal_hash
) {
1701 struct bvec_iter iter
;
1703 unsigned digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1704 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1706 unsigned extra_space
= unlikely(digest_size
> ic
->tag_size
) ? digest_size
- ic
->tag_size
: 0;
1707 char checksums_onstack
[max((size_t)HASH_MAX_DIGESTSIZE
, MAX_TAG_SIZE
)];
1709 unsigned sectors_to_process
;
1711 if (unlikely(ic
->mode
== 'R'))
1714 if (likely(dio
->op
!= REQ_OP_DISCARD
))
1715 checksums
= kmalloc((PAGE_SIZE
>> SECTOR_SHIFT
>> ic
->sb
->log2_sectors_per_block
) * ic
->tag_size
+ extra_space
,
1716 GFP_NOIO
| __GFP_NORETRY
| __GFP_NOWARN
);
1718 checksums
= kmalloc(PAGE_SIZE
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOWARN
);
1720 checksums
= checksums_onstack
;
1721 if (WARN_ON(extra_space
&&
1722 digest_size
> sizeof(checksums_onstack
))) {
1728 if (unlikely(dio
->op
== REQ_OP_DISCARD
)) {
1729 sector_t bi_sector
= dio
->bio_details
.bi_iter
.bi_sector
;
1730 unsigned bi_size
= dio
->bio_details
.bi_iter
.bi_size
;
1731 unsigned max_size
= likely(checksums
!= checksums_onstack
) ? PAGE_SIZE
: HASH_MAX_DIGESTSIZE
;
1732 unsigned max_blocks
= max_size
/ ic
->tag_size
;
1733 memset(checksums
, DISCARD_FILLER
, max_size
);
1736 unsigned this_step_blocks
= bi_size
>> (SECTOR_SHIFT
+ ic
->sb
->log2_sectors_per_block
);
1737 this_step_blocks
= min(this_step_blocks
, max_blocks
);
1738 r
= dm_integrity_rw_tag(ic
, checksums
, &dio
->metadata_block
, &dio
->metadata_offset
,
1739 this_step_blocks
* ic
->tag_size
, TAG_WRITE
);
1741 if (likely(checksums
!= checksums_onstack
))
1746 /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1747 printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1748 printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1751 bi_size
-= this_step_blocks
<< (SECTOR_SHIFT
+ ic
->sb
->log2_sectors_per_block
);
1752 bi_sector
+= this_step_blocks
<< ic
->sb
->log2_sectors_per_block
;
1755 if (likely(checksums
!= checksums_onstack
))
1760 sector
= dio
->range
.logical_sector
;
1761 sectors_to_process
= dio
->range
.n_sectors
;
1763 __bio_for_each_segment(bv
, bio
, iter
, dio
->bio_details
.bi_iter
) {
1765 char *mem
, *checksums_ptr
;
1768 mem
= (char *)kmap_atomic(bv
.bv_page
) + bv
.bv_offset
;
1770 checksums_ptr
= checksums
;
1772 integrity_sector_checksum(ic
, sector
, mem
+ pos
, checksums_ptr
);
1773 checksums_ptr
+= ic
->tag_size
;
1774 sectors_to_process
-= ic
->sectors_per_block
;
1775 pos
+= ic
->sectors_per_block
<< SECTOR_SHIFT
;
1776 sector
+= ic
->sectors_per_block
;
1777 } while (pos
< bv
.bv_len
&& sectors_to_process
&& checksums
!= checksums_onstack
);
1780 r
= dm_integrity_rw_tag(ic
, checksums
, &dio
->metadata_block
, &dio
->metadata_offset
,
1781 checksums_ptr
- checksums
, dio
->op
== REQ_OP_READ
? TAG_CMP
: TAG_WRITE
);
1784 char b
[BDEVNAME_SIZE
];
1785 DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio
, b
),
1786 (sector
- ((r
+ ic
->tag_size
- 1) / ic
->tag_size
)));
1788 atomic64_inc(&ic
->number_of_mismatches
);
1790 if (likely(checksums
!= checksums_onstack
))
1795 if (!sectors_to_process
)
1798 if (unlikely(pos
< bv
.bv_len
)) {
1799 bv
.bv_offset
+= pos
;
1805 if (likely(checksums
!= checksums_onstack
))
1808 struct bio_integrity_payload
*bip
= dio
->bio_details
.bi_integrity
;
1812 struct bvec_iter iter
;
1813 unsigned data_to_process
= dio
->range
.n_sectors
;
1814 sector_to_block(ic
, data_to_process
);
1815 data_to_process
*= ic
->tag_size
;
1817 bip_for_each_vec(biv
, bip
, iter
) {
1821 BUG_ON(PageHighMem(biv
.bv_page
));
1822 tag
= bvec_virt(&biv
);
1823 this_len
= min(biv
.bv_len
, data_to_process
);
1824 r
= dm_integrity_rw_tag(ic
, tag
, &dio
->metadata_block
, &dio
->metadata_offset
,
1825 this_len
, dio
->op
== REQ_OP_READ
? TAG_READ
: TAG_WRITE
);
1828 data_to_process
-= this_len
;
1829 if (!data_to_process
)
1838 dio
->bi_status
= errno_to_blk_status(r
);
1842 static int dm_integrity_map(struct dm_target
*ti
, struct bio
*bio
)
1844 struct dm_integrity_c
*ic
= ti
->private;
1845 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
1846 struct bio_integrity_payload
*bip
;
1848 sector_t area
, offset
;
1852 dio
->op
= bio_op(bio
);
1854 if (unlikely(dio
->op
== REQ_OP_DISCARD
)) {
1855 if (ti
->max_io_len
) {
1856 sector_t sec
= dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1857 unsigned log2_max_io_len
= __fls(ti
->max_io_len
);
1858 sector_t start_boundary
= sec
>> log2_max_io_len
;
1859 sector_t end_boundary
= (sec
+ bio_sectors(bio
) - 1) >> log2_max_io_len
;
1860 if (start_boundary
< end_boundary
) {
1861 sector_t len
= ti
->max_io_len
- (sec
& (ti
->max_io_len
- 1));
1862 dm_accept_partial_bio(bio
, len
);
1867 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
1868 submit_flush_bio(ic
, dio
);
1869 return DM_MAPIO_SUBMITTED
;
1872 dio
->range
.logical_sector
= dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1873 dio
->fua
= dio
->op
== REQ_OP_WRITE
&& bio
->bi_opf
& REQ_FUA
;
1874 if (unlikely(dio
->fua
)) {
1876 * Don't pass down the FUA flag because we have to flush
1877 * disk cache anyway.
1879 bio
->bi_opf
&= ~REQ_FUA
;
1881 if (unlikely(dio
->range
.logical_sector
+ bio_sectors(bio
) > ic
->provided_data_sectors
)) {
1882 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1883 dio
->range
.logical_sector
, bio_sectors(bio
),
1884 ic
->provided_data_sectors
);
1885 return DM_MAPIO_KILL
;
1887 if (unlikely((dio
->range
.logical_sector
| bio_sectors(bio
)) & (unsigned)(ic
->sectors_per_block
- 1))) {
1888 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1889 ic
->sectors_per_block
,
1890 dio
->range
.logical_sector
, bio_sectors(bio
));
1891 return DM_MAPIO_KILL
;
1894 if (ic
->sectors_per_block
> 1 && likely(dio
->op
!= REQ_OP_DISCARD
)) {
1895 struct bvec_iter iter
;
1897 bio_for_each_segment(bv
, bio
, iter
) {
1898 if (unlikely(bv
.bv_len
& ((ic
->sectors_per_block
<< SECTOR_SHIFT
) - 1))) {
1899 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1900 bv
.bv_offset
, bv
.bv_len
, ic
->sectors_per_block
);
1901 return DM_MAPIO_KILL
;
1906 bip
= bio_integrity(bio
);
1907 if (!ic
->internal_hash
) {
1909 unsigned wanted_tag_size
= bio_sectors(bio
) >> ic
->sb
->log2_sectors_per_block
;
1910 if (ic
->log2_tag_size
>= 0)
1911 wanted_tag_size
<<= ic
->log2_tag_size
;
1913 wanted_tag_size
*= ic
->tag_size
;
1914 if (unlikely(wanted_tag_size
!= bip
->bip_iter
.bi_size
)) {
1915 DMERR("Invalid integrity data size %u, expected %u",
1916 bip
->bip_iter
.bi_size
, wanted_tag_size
);
1917 return DM_MAPIO_KILL
;
1921 if (unlikely(bip
!= NULL
)) {
1922 DMERR("Unexpected integrity data when using internal hash");
1923 return DM_MAPIO_KILL
;
1927 if (unlikely(ic
->mode
== 'R') && unlikely(dio
->op
!= REQ_OP_READ
))
1928 return DM_MAPIO_KILL
;
1930 get_area_and_offset(ic
, dio
->range
.logical_sector
, &area
, &offset
);
1931 dio
->metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &dio
->metadata_offset
);
1932 bio
->bi_iter
.bi_sector
= get_data_sector(ic
, area
, offset
);
1934 dm_integrity_map_continue(dio
, true);
1935 return DM_MAPIO_SUBMITTED
;
1938 static bool __journal_read_write(struct dm_integrity_io
*dio
, struct bio
*bio
,
1939 unsigned journal_section
, unsigned journal_entry
)
1941 struct dm_integrity_c
*ic
= dio
->ic
;
1942 sector_t logical_sector
;
1945 logical_sector
= dio
->range
.logical_sector
;
1946 n_sectors
= dio
->range
.n_sectors
;
1948 struct bio_vec bv
= bio_iovec(bio
);
1951 if (unlikely(bv
.bv_len
>> SECTOR_SHIFT
> n_sectors
))
1952 bv
.bv_len
= n_sectors
<< SECTOR_SHIFT
;
1953 n_sectors
-= bv
.bv_len
>> SECTOR_SHIFT
;
1954 bio_advance_iter(bio
, &bio
->bi_iter
, bv
.bv_len
);
1956 mem
= kmap_atomic(bv
.bv_page
);
1957 if (likely(dio
->op
== REQ_OP_WRITE
))
1958 flush_dcache_page(bv
.bv_page
);
1961 struct journal_entry
*je
= access_journal_entry(ic
, journal_section
, journal_entry
);
1963 if (unlikely(dio
->op
== REQ_OP_READ
)) {
1964 struct journal_sector
*js
;
1968 if (unlikely(journal_entry_is_inprogress(je
))) {
1969 flush_dcache_page(bv
.bv_page
);
1972 __io_wait_event(ic
->copy_to_journal_wait
, !journal_entry_is_inprogress(je
));
1976 BUG_ON(journal_entry_get_sector(je
) != logical_sector
);
1977 js
= access_journal_data(ic
, journal_section
, journal_entry
);
1978 mem_ptr
= mem
+ bv
.bv_offset
;
1981 memcpy(mem_ptr
, js
, JOURNAL_SECTOR_DATA
);
1982 *(commit_id_t
*)(mem_ptr
+ JOURNAL_SECTOR_DATA
) = je
->last_bytes
[s
];
1984 mem_ptr
+= 1 << SECTOR_SHIFT
;
1985 } while (++s
< ic
->sectors_per_block
);
1986 #ifdef INTERNAL_VERIFY
1987 if (ic
->internal_hash
) {
1988 char checksums_onstack
[max((size_t)HASH_MAX_DIGESTSIZE
, MAX_TAG_SIZE
)];
1990 integrity_sector_checksum(ic
, logical_sector
, mem
+ bv
.bv_offset
, checksums_onstack
);
1991 if (unlikely(memcmp(checksums_onstack
, journal_entry_tag(ic
, je
), ic
->tag_size
))) {
1992 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1999 if (!ic
->internal_hash
) {
2000 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
2001 unsigned tag_todo
= ic
->tag_size
;
2002 char *tag_ptr
= journal_entry_tag(ic
, je
);
2005 struct bio_vec biv
= bvec_iter_bvec(bip
->bip_vec
, bip
->bip_iter
);
2006 unsigned tag_now
= min(biv
.bv_len
, tag_todo
);
2008 BUG_ON(PageHighMem(biv
.bv_page
));
2009 tag_addr
= bvec_virt(&biv
);
2010 if (likely(dio
->op
== REQ_OP_WRITE
))
2011 memcpy(tag_ptr
, tag_addr
, tag_now
);
2013 memcpy(tag_addr
, tag_ptr
, tag_now
);
2014 bvec_iter_advance(bip
->bip_vec
, &bip
->bip_iter
, tag_now
);
2016 tag_todo
-= tag_now
;
2017 } while (unlikely(tag_todo
)); else {
2018 if (likely(dio
->op
== REQ_OP_WRITE
))
2019 memset(tag_ptr
, 0, tag_todo
);
2023 if (likely(dio
->op
== REQ_OP_WRITE
)) {
2024 struct journal_sector
*js
;
2027 js
= access_journal_data(ic
, journal_section
, journal_entry
);
2028 memcpy(js
, mem
+ bv
.bv_offset
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
2032 je
->last_bytes
[s
] = js
[s
].commit_id
;
2033 } while (++s
< ic
->sectors_per_block
);
2035 if (ic
->internal_hash
) {
2036 unsigned digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
2037 if (unlikely(digest_size
> ic
->tag_size
)) {
2038 char checksums_onstack
[HASH_MAX_DIGESTSIZE
];
2039 integrity_sector_checksum(ic
, logical_sector
, (char *)js
, checksums_onstack
);
2040 memcpy(journal_entry_tag(ic
, je
), checksums_onstack
, ic
->tag_size
);
2042 integrity_sector_checksum(ic
, logical_sector
, (char *)js
, journal_entry_tag(ic
, je
));
2045 journal_entry_set_sector(je
, logical_sector
);
2047 logical_sector
+= ic
->sectors_per_block
;
2050 if (unlikely(journal_entry
== ic
->journal_section_entries
)) {
2053 wraparound_section(ic
, &journal_section
);
2056 bv
.bv_offset
+= ic
->sectors_per_block
<< SECTOR_SHIFT
;
2057 } while (bv
.bv_len
-= ic
->sectors_per_block
<< SECTOR_SHIFT
);
2059 if (unlikely(dio
->op
== REQ_OP_READ
))
2060 flush_dcache_page(bv
.bv_page
);
2062 } while (n_sectors
);
2064 if (likely(dio
->op
== REQ_OP_WRITE
)) {
2066 if (unlikely(waitqueue_active(&ic
->copy_to_journal_wait
)))
2067 wake_up(&ic
->copy_to_journal_wait
);
2068 if (READ_ONCE(ic
->free_sectors
) <= ic
->free_sectors_threshold
) {
2069 queue_work(ic
->commit_wq
, &ic
->commit_work
);
2071 schedule_autocommit(ic
);
2074 remove_range(ic
, &dio
->range
);
2077 if (unlikely(bio
->bi_iter
.bi_size
)) {
2078 sector_t area
, offset
;
2080 dio
->range
.logical_sector
= logical_sector
;
2081 get_area_and_offset(ic
, dio
->range
.logical_sector
, &area
, &offset
);
2082 dio
->metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &dio
->metadata_offset
);
2089 static void dm_integrity_map_continue(struct dm_integrity_io
*dio
, bool from_map
)
2091 struct dm_integrity_c
*ic
= dio
->ic
;
2092 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
2093 unsigned journal_section
, journal_entry
;
2094 unsigned journal_read_pos
;
2095 struct completion read_comp
;
2096 bool discard_retried
= false;
2097 bool need_sync_io
= ic
->internal_hash
&& dio
->op
== REQ_OP_READ
;
2098 if (unlikely(dio
->op
== REQ_OP_DISCARD
) && ic
->mode
!= 'D')
2099 need_sync_io
= true;
2101 if (need_sync_io
&& from_map
) {
2102 INIT_WORK(&dio
->work
, integrity_bio_wait
);
2103 queue_work(ic
->offload_wq
, &dio
->work
);
2108 spin_lock_irq(&ic
->endio_wait
.lock
);
2110 if (unlikely(dm_integrity_failed(ic
))) {
2111 spin_unlock_irq(&ic
->endio_wait
.lock
);
2115 dio
->range
.n_sectors
= bio_sectors(bio
);
2116 journal_read_pos
= NOT_FOUND
;
2117 if (ic
->mode
== 'J' && likely(dio
->op
!= REQ_OP_DISCARD
)) {
2118 if (dio
->op
== REQ_OP_WRITE
) {
2119 unsigned next_entry
, i
, pos
;
2120 unsigned ws
, we
, range_sectors
;
2122 dio
->range
.n_sectors
= min(dio
->range
.n_sectors
,
2123 (sector_t
)ic
->free_sectors
<< ic
->sb
->log2_sectors_per_block
);
2124 if (unlikely(!dio
->range
.n_sectors
)) {
2126 goto offload_to_thread
;
2127 sleep_on_endio_wait(ic
);
2130 range_sectors
= dio
->range
.n_sectors
>> ic
->sb
->log2_sectors_per_block
;
2131 ic
->free_sectors
-= range_sectors
;
2132 journal_section
= ic
->free_section
;
2133 journal_entry
= ic
->free_section_entry
;
2135 next_entry
= ic
->free_section_entry
+ range_sectors
;
2136 ic
->free_section_entry
= next_entry
% ic
->journal_section_entries
;
2137 ic
->free_section
+= next_entry
/ ic
->journal_section_entries
;
2138 ic
->n_uncommitted_sections
+= next_entry
/ ic
->journal_section_entries
;
2139 wraparound_section(ic
, &ic
->free_section
);
2141 pos
= journal_section
* ic
->journal_section_entries
+ journal_entry
;
2142 ws
= journal_section
;
2146 struct journal_entry
*je
;
2148 add_journal_node(ic
, &ic
->journal_tree
[pos
], dio
->range
.logical_sector
+ i
);
2150 if (unlikely(pos
>= ic
->journal_entries
))
2153 je
= access_journal_entry(ic
, ws
, we
);
2154 BUG_ON(!journal_entry_is_unused(je
));
2155 journal_entry_set_inprogress(je
);
2157 if (unlikely(we
== ic
->journal_section_entries
)) {
2160 wraparound_section(ic
, &ws
);
2162 } while ((i
+= ic
->sectors_per_block
) < dio
->range
.n_sectors
);
2164 spin_unlock_irq(&ic
->endio_wait
.lock
);
2165 goto journal_read_write
;
2167 sector_t next_sector
;
2168 journal_read_pos
= find_journal_node(ic
, dio
->range
.logical_sector
, &next_sector
);
2169 if (likely(journal_read_pos
== NOT_FOUND
)) {
2170 if (unlikely(dio
->range
.n_sectors
> next_sector
- dio
->range
.logical_sector
))
2171 dio
->range
.n_sectors
= next_sector
- dio
->range
.logical_sector
;
2174 unsigned jp
= journal_read_pos
+ 1;
2175 for (i
= ic
->sectors_per_block
; i
< dio
->range
.n_sectors
; i
+= ic
->sectors_per_block
, jp
++) {
2176 if (!test_journal_node(ic
, jp
, dio
->range
.logical_sector
+ i
))
2179 dio
->range
.n_sectors
= i
;
2183 if (unlikely(!add_new_range(ic
, &dio
->range
, true))) {
2185 * We must not sleep in the request routine because it could
2186 * stall bios on current->bio_list.
2187 * So, we offload the bio to a workqueue if we have to sleep.
2191 spin_unlock_irq(&ic
->endio_wait
.lock
);
2192 INIT_WORK(&dio
->work
, integrity_bio_wait
);
2193 queue_work(ic
->wait_wq
, &dio
->work
);
2196 if (journal_read_pos
!= NOT_FOUND
)
2197 dio
->range
.n_sectors
= ic
->sectors_per_block
;
2198 wait_and_add_new_range(ic
, &dio
->range
);
2200 * wait_and_add_new_range drops the spinlock, so the journal
2201 * may have been changed arbitrarily. We need to recheck.
2202 * To simplify the code, we restrict I/O size to just one block.
2204 if (journal_read_pos
!= NOT_FOUND
) {
2205 sector_t next_sector
;
2206 unsigned new_pos
= find_journal_node(ic
, dio
->range
.logical_sector
, &next_sector
);
2207 if (unlikely(new_pos
!= journal_read_pos
)) {
2208 remove_range_unlocked(ic
, &dio
->range
);
2213 if (ic
->mode
== 'J' && likely(dio
->op
== REQ_OP_DISCARD
) && !discard_retried
) {
2214 sector_t next_sector
;
2215 unsigned new_pos
= find_journal_node(ic
, dio
->range
.logical_sector
, &next_sector
);
2216 if (unlikely(new_pos
!= NOT_FOUND
) ||
2217 unlikely(next_sector
< dio
->range
.logical_sector
- dio
->range
.n_sectors
)) {
2218 remove_range_unlocked(ic
, &dio
->range
);
2219 spin_unlock_irq(&ic
->endio_wait
.lock
);
2220 queue_work(ic
->commit_wq
, &ic
->commit_work
);
2221 flush_workqueue(ic
->commit_wq
);
2222 queue_work(ic
->writer_wq
, &ic
->writer_work
);
2223 flush_workqueue(ic
->writer_wq
);
2224 discard_retried
= true;
2228 spin_unlock_irq(&ic
->endio_wait
.lock
);
2230 if (unlikely(journal_read_pos
!= NOT_FOUND
)) {
2231 journal_section
= journal_read_pos
/ ic
->journal_section_entries
;
2232 journal_entry
= journal_read_pos
% ic
->journal_section_entries
;
2233 goto journal_read_write
;
2236 if (ic
->mode
== 'B' && (dio
->op
== REQ_OP_WRITE
|| unlikely(dio
->op
== REQ_OP_DISCARD
))) {
2237 if (!block_bitmap_op(ic
, ic
->may_write_bitmap
, dio
->range
.logical_sector
,
2238 dio
->range
.n_sectors
, BITMAP_OP_TEST_ALL_SET
)) {
2239 struct bitmap_block_status
*bbs
;
2241 bbs
= sector_to_bitmap_block(ic
, dio
->range
.logical_sector
);
2242 spin_lock(&bbs
->bio_queue_lock
);
2243 bio_list_add(&bbs
->bio_queue
, bio
);
2244 spin_unlock(&bbs
->bio_queue_lock
);
2245 queue_work(ic
->writer_wq
, &bbs
->work
);
2250 dio
->in_flight
= (atomic_t
)ATOMIC_INIT(2);
2253 init_completion(&read_comp
);
2254 dio
->completion
= &read_comp
;
2256 dio
->completion
= NULL
;
2258 dm_bio_record(&dio
->bio_details
, bio
);
2259 bio_set_dev(bio
, ic
->dev
->bdev
);
2260 bio
->bi_integrity
= NULL
;
2261 bio
->bi_opf
&= ~REQ_INTEGRITY
;
2262 bio
->bi_end_io
= integrity_end_io
;
2263 bio
->bi_iter
.bi_size
= dio
->range
.n_sectors
<< SECTOR_SHIFT
;
2265 if (unlikely(dio
->op
== REQ_OP_DISCARD
) && likely(ic
->mode
!= 'D')) {
2266 integrity_metadata(&dio
->work
);
2267 dm_integrity_flush_buffers(ic
, false);
2269 dio
->in_flight
= (atomic_t
)ATOMIC_INIT(1);
2270 dio
->completion
= NULL
;
2272 submit_bio_noacct(bio
);
2277 submit_bio_noacct(bio
);
2280 wait_for_completion_io(&read_comp
);
2281 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
) &&
2282 dio
->range
.logical_sector
+ dio
->range
.n_sectors
> le64_to_cpu(ic
->sb
->recalc_sector
))
2284 if (ic
->mode
== 'B') {
2285 if (!block_bitmap_op(ic
, ic
->recalc_bitmap
, dio
->range
.logical_sector
,
2286 dio
->range
.n_sectors
, BITMAP_OP_TEST_ALL_CLEAR
))
2290 if (likely(!bio
->bi_status
))
2291 integrity_metadata(&dio
->work
);
2297 INIT_WORK(&dio
->work
, integrity_metadata
);
2298 queue_work(ic
->metadata_wq
, &dio
->work
);
2304 if (unlikely(__journal_read_write(dio
, bio
, journal_section
, journal_entry
)))
2307 do_endio_flush(ic
, dio
);
2311 static void integrity_bio_wait(struct work_struct
*w
)
2313 struct dm_integrity_io
*dio
= container_of(w
, struct dm_integrity_io
, work
);
2315 dm_integrity_map_continue(dio
, false);
2318 static void pad_uncommitted(struct dm_integrity_c
*ic
)
2320 if (ic
->free_section_entry
) {
2321 ic
->free_sectors
-= ic
->journal_section_entries
- ic
->free_section_entry
;
2322 ic
->free_section_entry
= 0;
2324 wraparound_section(ic
, &ic
->free_section
);
2325 ic
->n_uncommitted_sections
++;
2327 if (WARN_ON(ic
->journal_sections
* ic
->journal_section_entries
!=
2328 (ic
->n_uncommitted_sections
+ ic
->n_committed_sections
) *
2329 ic
->journal_section_entries
+ ic
->free_sectors
)) {
2330 DMCRIT("journal_sections %u, journal_section_entries %u, "
2331 "n_uncommitted_sections %u, n_committed_sections %u, "
2332 "journal_section_entries %u, free_sectors %u",
2333 ic
->journal_sections
, ic
->journal_section_entries
,
2334 ic
->n_uncommitted_sections
, ic
->n_committed_sections
,
2335 ic
->journal_section_entries
, ic
->free_sectors
);
2339 static void integrity_commit(struct work_struct
*w
)
2341 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, commit_work
);
2342 unsigned commit_start
, commit_sections
;
2344 struct bio
*flushes
;
2346 del_timer(&ic
->autocommit_timer
);
2348 spin_lock_irq(&ic
->endio_wait
.lock
);
2349 flushes
= bio_list_get(&ic
->flush_bio_list
);
2350 if (unlikely(ic
->mode
!= 'J')) {
2351 spin_unlock_irq(&ic
->endio_wait
.lock
);
2352 dm_integrity_flush_buffers(ic
, true);
2353 goto release_flush_bios
;
2356 pad_uncommitted(ic
);
2357 commit_start
= ic
->uncommitted_section
;
2358 commit_sections
= ic
->n_uncommitted_sections
;
2359 spin_unlock_irq(&ic
->endio_wait
.lock
);
2361 if (!commit_sections
)
2362 goto release_flush_bios
;
2365 for (n
= 0; n
< commit_sections
; n
++) {
2366 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2367 struct journal_entry
*je
;
2368 je
= access_journal_entry(ic
, i
, j
);
2369 io_wait_event(ic
->copy_to_journal_wait
, !journal_entry_is_inprogress(je
));
2371 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2372 struct journal_sector
*js
;
2373 js
= access_journal(ic
, i
, j
);
2374 js
->commit_id
= dm_integrity_commit_id(ic
, i
, j
, ic
->commit_seq
);
2377 if (unlikely(i
>= ic
->journal_sections
))
2378 ic
->commit_seq
= next_commit_seq(ic
->commit_seq
);
2379 wraparound_section(ic
, &i
);
2383 write_journal(ic
, commit_start
, commit_sections
);
2385 spin_lock_irq(&ic
->endio_wait
.lock
);
2386 ic
->uncommitted_section
+= commit_sections
;
2387 wraparound_section(ic
, &ic
->uncommitted_section
);
2388 ic
->n_uncommitted_sections
-= commit_sections
;
2389 ic
->n_committed_sections
+= commit_sections
;
2390 spin_unlock_irq(&ic
->endio_wait
.lock
);
2392 if (READ_ONCE(ic
->free_sectors
) <= ic
->free_sectors_threshold
)
2393 queue_work(ic
->writer_wq
, &ic
->writer_work
);
2397 struct bio
*next
= flushes
->bi_next
;
2398 flushes
->bi_next
= NULL
;
2399 do_endio(ic
, flushes
);
2404 static void complete_copy_from_journal(unsigned long error
, void *context
)
2406 struct journal_io
*io
= context
;
2407 struct journal_completion
*comp
= io
->comp
;
2408 struct dm_integrity_c
*ic
= comp
->ic
;
2409 remove_range(ic
, &io
->range
);
2410 mempool_free(io
, &ic
->journal_io_mempool
);
2411 if (unlikely(error
!= 0))
2412 dm_integrity_io_error(ic
, "copying from journal", -EIO
);
2413 complete_journal_op(comp
);
2416 static void restore_last_bytes(struct dm_integrity_c
*ic
, struct journal_sector
*js
,
2417 struct journal_entry
*je
)
2421 js
->commit_id
= je
->last_bytes
[s
];
2423 } while (++s
< ic
->sectors_per_block
);
2426 static void do_journal_write(struct dm_integrity_c
*ic
, unsigned write_start
,
2427 unsigned write_sections
, bool from_replay
)
2430 struct journal_completion comp
;
2431 struct blk_plug plug
;
2433 blk_start_plug(&plug
);
2436 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
2437 init_completion(&comp
.comp
);
2440 for (n
= 0; n
< write_sections
; n
++, i
++, wraparound_section(ic
, &i
)) {
2441 #ifndef INTERNAL_VERIFY
2442 if (unlikely(from_replay
))
2444 rw_section_mac(ic
, i
, false);
2445 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2446 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2447 sector_t sec
, area
, offset
;
2448 unsigned k
, l
, next_loop
;
2449 sector_t metadata_block
;
2450 unsigned metadata_offset
;
2451 struct journal_io
*io
;
2453 if (journal_entry_is_unused(je
))
2455 BUG_ON(unlikely(journal_entry_is_inprogress(je
)) && !from_replay
);
2456 sec
= journal_entry_get_sector(je
);
2457 if (unlikely(from_replay
)) {
2458 if (unlikely(sec
& (unsigned)(ic
->sectors_per_block
- 1))) {
2459 dm_integrity_io_error(ic
, "invalid sector in journal", -EIO
);
2460 sec
&= ~(sector_t
)(ic
->sectors_per_block
- 1);
2463 if (unlikely(sec
>= ic
->provided_data_sectors
))
2465 get_area_and_offset(ic
, sec
, &area
, &offset
);
2466 restore_last_bytes(ic
, access_journal_data(ic
, i
, j
), je
);
2467 for (k
= j
+ 1; k
< ic
->journal_section_entries
; k
++) {
2468 struct journal_entry
*je2
= access_journal_entry(ic
, i
, k
);
2469 sector_t sec2
, area2
, offset2
;
2470 if (journal_entry_is_unused(je2
))
2472 BUG_ON(unlikely(journal_entry_is_inprogress(je2
)) && !from_replay
);
2473 sec2
= journal_entry_get_sector(je2
);
2474 if (unlikely(sec2
>= ic
->provided_data_sectors
))
2476 get_area_and_offset(ic
, sec2
, &area2
, &offset2
);
2477 if (area2
!= area
|| offset2
!= offset
+ ((k
- j
) << ic
->sb
->log2_sectors_per_block
))
2479 restore_last_bytes(ic
, access_journal_data(ic
, i
, k
), je2
);
2483 io
= mempool_alloc(&ic
->journal_io_mempool
, GFP_NOIO
);
2485 io
->range
.logical_sector
= sec
;
2486 io
->range
.n_sectors
= (k
- j
) << ic
->sb
->log2_sectors_per_block
;
2488 spin_lock_irq(&ic
->endio_wait
.lock
);
2489 add_new_range_and_wait(ic
, &io
->range
);
2491 if (likely(!from_replay
)) {
2492 struct journal_node
*section_node
= &ic
->journal_tree
[i
* ic
->journal_section_entries
];
2494 /* don't write if there is newer committed sector */
2495 while (j
< k
&& find_newer_committed_node(ic
, §ion_node
[j
])) {
2496 struct journal_entry
*je2
= access_journal_entry(ic
, i
, j
);
2498 journal_entry_set_unused(je2
);
2499 remove_journal_node(ic
, §ion_node
[j
]);
2501 sec
+= ic
->sectors_per_block
;
2502 offset
+= ic
->sectors_per_block
;
2504 while (j
< k
&& find_newer_committed_node(ic
, §ion_node
[k
- 1])) {
2505 struct journal_entry
*je2
= access_journal_entry(ic
, i
, k
- 1);
2507 journal_entry_set_unused(je2
);
2508 remove_journal_node(ic
, §ion_node
[k
- 1]);
2512 remove_range_unlocked(ic
, &io
->range
);
2513 spin_unlock_irq(&ic
->endio_wait
.lock
);
2514 mempool_free(io
, &ic
->journal_io_mempool
);
2517 for (l
= j
; l
< k
; l
++) {
2518 remove_journal_node(ic
, §ion_node
[l
]);
2521 spin_unlock_irq(&ic
->endio_wait
.lock
);
2523 metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &metadata_offset
);
2524 for (l
= j
; l
< k
; l
++) {
2526 struct journal_entry
*je2
= access_journal_entry(ic
, i
, l
);
2529 #ifndef INTERNAL_VERIFY
2530 unlikely(from_replay
) &&
2532 ic
->internal_hash
) {
2533 char test_tag
[max_t(size_t, HASH_MAX_DIGESTSIZE
, MAX_TAG_SIZE
)];
2535 integrity_sector_checksum(ic
, sec
+ ((l
- j
) << ic
->sb
->log2_sectors_per_block
),
2536 (char *)access_journal_data(ic
, i
, l
), test_tag
);
2537 if (unlikely(memcmp(test_tag
, journal_entry_tag(ic
, je2
), ic
->tag_size
)))
2538 dm_integrity_io_error(ic
, "tag mismatch when replaying journal", -EILSEQ
);
2541 journal_entry_set_unused(je2
);
2542 r
= dm_integrity_rw_tag(ic
, journal_entry_tag(ic
, je2
), &metadata_block
, &metadata_offset
,
2543 ic
->tag_size
, TAG_WRITE
);
2545 dm_integrity_io_error(ic
, "reading tags", r
);
2549 atomic_inc(&comp
.in_flight
);
2550 copy_from_journal(ic
, i
, j
<< ic
->sb
->log2_sectors_per_block
,
2551 (k
- j
) << ic
->sb
->log2_sectors_per_block
,
2552 get_data_sector(ic
, area
, offset
),
2553 complete_copy_from_journal
, io
);
2559 dm_bufio_write_dirty_buffers_async(ic
->bufio
);
2561 blk_finish_plug(&plug
);
2563 complete_journal_op(&comp
);
2564 wait_for_completion_io(&comp
.comp
);
2566 dm_integrity_flush_buffers(ic
, true);
2569 static void integrity_writer(struct work_struct
*w
)
2571 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, writer_work
);
2572 unsigned write_start
, write_sections
;
2574 unsigned prev_free_sectors
;
2576 /* the following test is not needed, but it tests the replay code */
2577 if (unlikely(dm_post_suspending(ic
->ti
)) && !ic
->meta_dev
)
2580 spin_lock_irq(&ic
->endio_wait
.lock
);
2581 write_start
= ic
->committed_section
;
2582 write_sections
= ic
->n_committed_sections
;
2583 spin_unlock_irq(&ic
->endio_wait
.lock
);
2585 if (!write_sections
)
2588 do_journal_write(ic
, write_start
, write_sections
, false);
2590 spin_lock_irq(&ic
->endio_wait
.lock
);
2592 ic
->committed_section
+= write_sections
;
2593 wraparound_section(ic
, &ic
->committed_section
);
2594 ic
->n_committed_sections
-= write_sections
;
2596 prev_free_sectors
= ic
->free_sectors
;
2597 ic
->free_sectors
+= write_sections
* ic
->journal_section_entries
;
2598 if (unlikely(!prev_free_sectors
))
2599 wake_up_locked(&ic
->endio_wait
);
2601 spin_unlock_irq(&ic
->endio_wait
.lock
);
2604 static void recalc_write_super(struct dm_integrity_c
*ic
)
2608 dm_integrity_flush_buffers(ic
, false);
2609 if (dm_integrity_failed(ic
))
2612 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, 0);
2614 dm_integrity_io_error(ic
, "writing superblock", r
);
2617 static void integrity_recalc(struct work_struct
*w
)
2619 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, recalc_work
);
2620 struct dm_integrity_range range
;
2621 struct dm_io_request io_req
;
2622 struct dm_io_region io_loc
;
2623 sector_t area
, offset
;
2624 sector_t metadata_block
;
2625 unsigned metadata_offset
;
2626 sector_t logical_sector
, n_sectors
;
2630 unsigned super_counter
= 0;
2632 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic
->sb
->recalc_sector
));
2634 spin_lock_irq(&ic
->endio_wait
.lock
);
2638 if (unlikely(dm_post_suspending(ic
->ti
)))
2641 range
.logical_sector
= le64_to_cpu(ic
->sb
->recalc_sector
);
2642 if (unlikely(range
.logical_sector
>= ic
->provided_data_sectors
)) {
2643 if (ic
->mode
== 'B') {
2644 block_bitmap_op(ic
, ic
->recalc_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
2645 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2646 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, 0);
2651 get_area_and_offset(ic
, range
.logical_sector
, &area
, &offset
);
2652 range
.n_sectors
= min((sector_t
)RECALC_SECTORS
, ic
->provided_data_sectors
- range
.logical_sector
);
2654 range
.n_sectors
= min(range
.n_sectors
, ((sector_t
)1U << ic
->sb
->log2_interleave_sectors
) - (unsigned)offset
);
2656 add_new_range_and_wait(ic
, &range
);
2657 spin_unlock_irq(&ic
->endio_wait
.lock
);
2658 logical_sector
= range
.logical_sector
;
2659 n_sectors
= range
.n_sectors
;
2661 if (ic
->mode
== 'B') {
2662 if (block_bitmap_op(ic
, ic
->recalc_bitmap
, logical_sector
, n_sectors
, BITMAP_OP_TEST_ALL_CLEAR
)) {
2663 goto advance_and_next
;
2665 while (block_bitmap_op(ic
, ic
->recalc_bitmap
, logical_sector
,
2666 ic
->sectors_per_block
, BITMAP_OP_TEST_ALL_CLEAR
)) {
2667 logical_sector
+= ic
->sectors_per_block
;
2668 n_sectors
-= ic
->sectors_per_block
;
2671 while (block_bitmap_op(ic
, ic
->recalc_bitmap
, logical_sector
+ n_sectors
- ic
->sectors_per_block
,
2672 ic
->sectors_per_block
, BITMAP_OP_TEST_ALL_CLEAR
)) {
2673 n_sectors
-= ic
->sectors_per_block
;
2676 get_area_and_offset(ic
, logical_sector
, &area
, &offset
);
2679 DEBUG_print("recalculating: %llx, %llx\n", logical_sector
, n_sectors
);
2681 if (unlikely(++super_counter
== RECALC_WRITE_SUPER
)) {
2682 recalc_write_super(ic
);
2683 if (ic
->mode
== 'B') {
2684 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, ic
->bitmap_flush_interval
);
2689 if (unlikely(dm_integrity_failed(ic
)))
2692 io_req
.bi_op
= REQ_OP_READ
;
2693 io_req
.bi_op_flags
= 0;
2694 io_req
.mem
.type
= DM_IO_VMA
;
2695 io_req
.mem
.ptr
.addr
= ic
->recalc_buffer
;
2696 io_req
.notify
.fn
= NULL
;
2697 io_req
.client
= ic
->io
;
2698 io_loc
.bdev
= ic
->dev
->bdev
;
2699 io_loc
.sector
= get_data_sector(ic
, area
, offset
);
2700 io_loc
.count
= n_sectors
;
2702 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
2704 dm_integrity_io_error(ic
, "reading data", r
);
2708 t
= ic
->recalc_tags
;
2709 for (i
= 0; i
< n_sectors
; i
+= ic
->sectors_per_block
) {
2710 integrity_sector_checksum(ic
, logical_sector
+ i
, ic
->recalc_buffer
+ (i
<< SECTOR_SHIFT
), t
);
2714 metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &metadata_offset
);
2716 r
= dm_integrity_rw_tag(ic
, ic
->recalc_tags
, &metadata_block
, &metadata_offset
, t
- ic
->recalc_tags
, TAG_WRITE
);
2718 dm_integrity_io_error(ic
, "writing tags", r
);
2722 if (ic
->mode
== 'B') {
2723 sector_t start
, end
;
2724 start
= (range
.logical_sector
>>
2725 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
)) <<
2726 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
2727 end
= ((range
.logical_sector
+ range
.n_sectors
) >>
2728 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
)) <<
2729 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
2730 block_bitmap_op(ic
, ic
->recalc_bitmap
, start
, end
- start
, BITMAP_OP_CLEAR
);
2736 spin_lock_irq(&ic
->endio_wait
.lock
);
2737 remove_range_unlocked(ic
, &range
);
2738 ic
->sb
->recalc_sector
= cpu_to_le64(range
.logical_sector
+ range
.n_sectors
);
2742 remove_range(ic
, &range
);
2746 spin_unlock_irq(&ic
->endio_wait
.lock
);
2748 recalc_write_super(ic
);
2751 static void bitmap_block_work(struct work_struct
*w
)
2753 struct bitmap_block_status
*bbs
= container_of(w
, struct bitmap_block_status
, work
);
2754 struct dm_integrity_c
*ic
= bbs
->ic
;
2756 struct bio_list bio_queue
;
2757 struct bio_list waiting
;
2759 bio_list_init(&waiting
);
2761 spin_lock(&bbs
->bio_queue_lock
);
2762 bio_queue
= bbs
->bio_queue
;
2763 bio_list_init(&bbs
->bio_queue
);
2764 spin_unlock(&bbs
->bio_queue_lock
);
2766 while ((bio
= bio_list_pop(&bio_queue
))) {
2767 struct dm_integrity_io
*dio
;
2769 dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
2771 if (block_bitmap_op(ic
, ic
->may_write_bitmap
, dio
->range
.logical_sector
,
2772 dio
->range
.n_sectors
, BITMAP_OP_TEST_ALL_SET
)) {
2773 remove_range(ic
, &dio
->range
);
2774 INIT_WORK(&dio
->work
, integrity_bio_wait
);
2775 queue_work(ic
->offload_wq
, &dio
->work
);
2777 block_bitmap_op(ic
, ic
->journal
, dio
->range
.logical_sector
,
2778 dio
->range
.n_sectors
, BITMAP_OP_SET
);
2779 bio_list_add(&waiting
, bio
);
2783 if (bio_list_empty(&waiting
))
2786 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
,
2787 bbs
->idx
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
),
2788 BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
, NULL
);
2790 while ((bio
= bio_list_pop(&waiting
))) {
2791 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
2793 block_bitmap_op(ic
, ic
->may_write_bitmap
, dio
->range
.logical_sector
,
2794 dio
->range
.n_sectors
, BITMAP_OP_SET
);
2796 remove_range(ic
, &dio
->range
);
2797 INIT_WORK(&dio
->work
, integrity_bio_wait
);
2798 queue_work(ic
->offload_wq
, &dio
->work
);
2801 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, ic
->bitmap_flush_interval
);
2804 static void bitmap_flush_work(struct work_struct
*work
)
2806 struct dm_integrity_c
*ic
= container_of(work
, struct dm_integrity_c
, bitmap_flush_work
.work
);
2807 struct dm_integrity_range range
;
2808 unsigned long limit
;
2811 dm_integrity_flush_buffers(ic
, false);
2813 range
.logical_sector
= 0;
2814 range
.n_sectors
= ic
->provided_data_sectors
;
2816 spin_lock_irq(&ic
->endio_wait
.lock
);
2817 add_new_range_and_wait(ic
, &range
);
2818 spin_unlock_irq(&ic
->endio_wait
.lock
);
2820 dm_integrity_flush_buffers(ic
, true);
2822 limit
= ic
->provided_data_sectors
;
2823 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
)) {
2824 limit
= le64_to_cpu(ic
->sb
->recalc_sector
)
2825 >> (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
)
2826 << (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
2828 /*DEBUG_print("zeroing journal\n");*/
2829 block_bitmap_op(ic
, ic
->journal
, 0, limit
, BITMAP_OP_CLEAR
);
2830 block_bitmap_op(ic
, ic
->may_write_bitmap
, 0, limit
, BITMAP_OP_CLEAR
);
2832 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, 0,
2833 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
2835 spin_lock_irq(&ic
->endio_wait
.lock
);
2836 remove_range_unlocked(ic
, &range
);
2837 while (unlikely((bio
= bio_list_pop(&ic
->synchronous_bios
)) != NULL
)) {
2839 spin_unlock_irq(&ic
->endio_wait
.lock
);
2840 spin_lock_irq(&ic
->endio_wait
.lock
);
2842 spin_unlock_irq(&ic
->endio_wait
.lock
);
2846 static void init_journal(struct dm_integrity_c
*ic
, unsigned start_section
,
2847 unsigned n_sections
, unsigned char commit_seq
)
2854 for (n
= 0; n
< n_sections
; n
++) {
2855 i
= start_section
+ n
;
2856 wraparound_section(ic
, &i
);
2857 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2858 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2859 memset(&js
->entries
, 0, JOURNAL_SECTOR_DATA
);
2860 js
->commit_id
= dm_integrity_commit_id(ic
, i
, j
, commit_seq
);
2862 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2863 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2864 journal_entry_set_unused(je
);
2868 write_journal(ic
, start_section
, n_sections
);
2871 static int find_commit_seq(struct dm_integrity_c
*ic
, unsigned i
, unsigned j
, commit_id_t id
)
2874 for (k
= 0; k
< N_COMMIT_IDS
; k
++) {
2875 if (dm_integrity_commit_id(ic
, i
, j
, k
) == id
)
2878 dm_integrity_io_error(ic
, "journal commit id", -EIO
);
2882 static void replay_journal(struct dm_integrity_c
*ic
)
2885 bool used_commit_ids
[N_COMMIT_IDS
];
2886 unsigned max_commit_id_sections
[N_COMMIT_IDS
];
2887 unsigned write_start
, write_sections
;
2888 unsigned continue_section
;
2890 unsigned char unused
, last_used
, want_commit_seq
;
2892 if (ic
->mode
== 'R')
2895 if (ic
->journal_uptodate
)
2901 if (!ic
->just_formatted
) {
2902 DEBUG_print("reading journal\n");
2903 rw_journal(ic
, REQ_OP_READ
, 0, 0, ic
->journal_sections
, NULL
);
2905 DEBUG_bytes(lowmem_page_address(ic
->journal_io
[0].page
), 64, "read journal");
2906 if (ic
->journal_io
) {
2907 struct journal_completion crypt_comp
;
2909 init_completion(&crypt_comp
.comp
);
2910 crypt_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
2911 encrypt_journal(ic
, false, 0, ic
->journal_sections
, &crypt_comp
);
2912 wait_for_completion(&crypt_comp
.comp
);
2914 DEBUG_bytes(lowmem_page_address(ic
->journal
[0].page
), 64, "decrypted journal");
2917 if (dm_integrity_failed(ic
))
2920 journal_empty
= true;
2921 memset(used_commit_ids
, 0, sizeof used_commit_ids
);
2922 memset(max_commit_id_sections
, 0, sizeof max_commit_id_sections
);
2923 for (i
= 0; i
< ic
->journal_sections
; i
++) {
2924 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2926 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2927 k
= find_commit_seq(ic
, i
, j
, js
->commit_id
);
2930 used_commit_ids
[k
] = true;
2931 max_commit_id_sections
[k
] = i
;
2933 if (journal_empty
) {
2934 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2935 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2936 if (!journal_entry_is_unused(je
)) {
2937 journal_empty
= false;
2944 if (!used_commit_ids
[N_COMMIT_IDS
- 1]) {
2945 unused
= N_COMMIT_IDS
- 1;
2946 while (unused
&& !used_commit_ids
[unused
- 1])
2949 for (unused
= 0; unused
< N_COMMIT_IDS
; unused
++)
2950 if (!used_commit_ids
[unused
])
2952 if (unused
== N_COMMIT_IDS
) {
2953 dm_integrity_io_error(ic
, "journal commit ids", -EIO
);
2957 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2958 unused
, used_commit_ids
[0], used_commit_ids
[1],
2959 used_commit_ids
[2], used_commit_ids
[3]);
2961 last_used
= prev_commit_seq(unused
);
2962 want_commit_seq
= prev_commit_seq(last_used
);
2964 if (!used_commit_ids
[want_commit_seq
] && used_commit_ids
[prev_commit_seq(want_commit_seq
)])
2965 journal_empty
= true;
2967 write_start
= max_commit_id_sections
[last_used
] + 1;
2968 if (unlikely(write_start
>= ic
->journal_sections
))
2969 want_commit_seq
= next_commit_seq(want_commit_seq
);
2970 wraparound_section(ic
, &write_start
);
2973 for (write_sections
= 0; write_sections
< ic
->journal_sections
; write_sections
++) {
2974 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2975 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2977 if (js
->commit_id
!= dm_integrity_commit_id(ic
, i
, j
, want_commit_seq
)) {
2979 * This could be caused by crash during writing.
2980 * We won't replay the inconsistent part of the
2983 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2984 i
, j
, find_commit_seq(ic
, i
, j
, js
->commit_id
), want_commit_seq
);
2989 if (unlikely(i
>= ic
->journal_sections
))
2990 want_commit_seq
= next_commit_seq(want_commit_seq
);
2991 wraparound_section(ic
, &i
);
2995 if (!journal_empty
) {
2996 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2997 write_sections
, write_start
, want_commit_seq
);
2998 do_journal_write(ic
, write_start
, write_sections
, true);
3001 if (write_sections
== ic
->journal_sections
&& (ic
->mode
== 'J' || journal_empty
)) {
3002 continue_section
= write_start
;
3003 ic
->commit_seq
= want_commit_seq
;
3004 DEBUG_print("continuing from section %u, commit seq %d\n", write_start
, ic
->commit_seq
);
3007 unsigned char erase_seq
;
3009 DEBUG_print("clearing journal\n");
3011 erase_seq
= prev_commit_seq(prev_commit_seq(last_used
));
3013 init_journal(ic
, s
, 1, erase_seq
);
3015 wraparound_section(ic
, &s
);
3016 if (ic
->journal_sections
>= 2) {
3017 init_journal(ic
, s
, ic
->journal_sections
- 2, erase_seq
);
3018 s
+= ic
->journal_sections
- 2;
3019 wraparound_section(ic
, &s
);
3020 init_journal(ic
, s
, 1, erase_seq
);
3023 continue_section
= 0;
3024 ic
->commit_seq
= next_commit_seq(erase_seq
);
3027 ic
->committed_section
= continue_section
;
3028 ic
->n_committed_sections
= 0;
3030 ic
->uncommitted_section
= continue_section
;
3031 ic
->n_uncommitted_sections
= 0;
3033 ic
->free_section
= continue_section
;
3034 ic
->free_section_entry
= 0;
3035 ic
->free_sectors
= ic
->journal_entries
;
3037 ic
->journal_tree_root
= RB_ROOT
;
3038 for (i
= 0; i
< ic
->journal_entries
; i
++)
3039 init_journal_node(&ic
->journal_tree
[i
]);
3042 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c
*ic
)
3044 DEBUG_print("dm_integrity_enter_synchronous_mode\n");
3046 if (ic
->mode
== 'B') {
3047 ic
->bitmap_flush_interval
= msecs_to_jiffies(10) + 1;
3048 ic
->synchronous_mode
= 1;
3050 cancel_delayed_work_sync(&ic
->bitmap_flush_work
);
3051 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, 0);
3052 flush_workqueue(ic
->commit_wq
);
3056 static int dm_integrity_reboot(struct notifier_block
*n
, unsigned long code
, void *x
)
3058 struct dm_integrity_c
*ic
= container_of(n
, struct dm_integrity_c
, reboot_notifier
);
3060 DEBUG_print("dm_integrity_reboot\n");
3062 dm_integrity_enter_synchronous_mode(ic
);
3067 static void dm_integrity_postsuspend(struct dm_target
*ti
)
3069 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
3072 WARN_ON(unregister_reboot_notifier(&ic
->reboot_notifier
));
3074 del_timer_sync(&ic
->autocommit_timer
);
3077 drain_workqueue(ic
->recalc_wq
);
3079 if (ic
->mode
== 'B')
3080 cancel_delayed_work_sync(&ic
->bitmap_flush_work
);
3082 queue_work(ic
->commit_wq
, &ic
->commit_work
);
3083 drain_workqueue(ic
->commit_wq
);
3085 if (ic
->mode
== 'J') {
3087 queue_work(ic
->writer_wq
, &ic
->writer_work
);
3088 drain_workqueue(ic
->writer_wq
);
3089 dm_integrity_flush_buffers(ic
, true);
3092 if (ic
->mode
== 'B') {
3093 dm_integrity_flush_buffers(ic
, true);
3095 /* set to 0 to test bitmap replay code */
3096 init_journal(ic
, 0, ic
->journal_sections
, 0);
3097 ic
->sb
->flags
&= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP
);
3098 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
3100 dm_integrity_io_error(ic
, "writing superblock", r
);
3104 BUG_ON(!RB_EMPTY_ROOT(&ic
->in_progress
));
3106 ic
->journal_uptodate
= true;
3109 static void dm_integrity_resume(struct dm_target
*ti
)
3111 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
3112 __u64 old_provided_data_sectors
= le64_to_cpu(ic
->sb
->provided_data_sectors
);
3115 DEBUG_print("resume\n");
3117 if (ic
->provided_data_sectors
!= old_provided_data_sectors
) {
3118 if (ic
->provided_data_sectors
> old_provided_data_sectors
&&
3120 ic
->sb
->log2_blocks_per_bitmap_bit
== ic
->log2_blocks_per_bitmap_bit
) {
3121 rw_journal_sectors(ic
, REQ_OP_READ
, 0, 0,
3122 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
3123 block_bitmap_op(ic
, ic
->journal
, old_provided_data_sectors
,
3124 ic
->provided_data_sectors
- old_provided_data_sectors
, BITMAP_OP_SET
);
3125 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, 0,
3126 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
3129 ic
->sb
->provided_data_sectors
= cpu_to_le64(ic
->provided_data_sectors
);
3130 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
3132 dm_integrity_io_error(ic
, "writing superblock", r
);
3135 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_DIRTY_BITMAP
)) {
3136 DEBUG_print("resume dirty_bitmap\n");
3137 rw_journal_sectors(ic
, REQ_OP_READ
, 0, 0,
3138 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
3139 if (ic
->mode
== 'B') {
3140 if (ic
->sb
->log2_blocks_per_bitmap_bit
== ic
->log2_blocks_per_bitmap_bit
&&
3141 !ic
->reset_recalculate_flag
) {
3142 block_bitmap_copy(ic
, ic
->recalc_bitmap
, ic
->journal
);
3143 block_bitmap_copy(ic
, ic
->may_write_bitmap
, ic
->journal
);
3144 if (!block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
,
3145 BITMAP_OP_TEST_ALL_CLEAR
)) {
3146 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
3147 ic
->sb
->recalc_sector
= cpu_to_le64(0);
3150 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3151 ic
->sb
->log2_blocks_per_bitmap_bit
, ic
->log2_blocks_per_bitmap_bit
);
3152 ic
->sb
->log2_blocks_per_bitmap_bit
= ic
->log2_blocks_per_bitmap_bit
;
3153 block_bitmap_op(ic
, ic
->recalc_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_SET
);
3154 block_bitmap_op(ic
, ic
->may_write_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_SET
);
3155 block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
, BITMAP_OP_SET
);
3156 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, 0,
3157 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
3158 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
3159 ic
->sb
->recalc_sector
= cpu_to_le64(0);
3162 if (!(ic
->sb
->log2_blocks_per_bitmap_bit
== ic
->log2_blocks_per_bitmap_bit
&&
3163 block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
, BITMAP_OP_TEST_ALL_CLEAR
)) ||
3164 ic
->reset_recalculate_flag
) {
3165 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
3166 ic
->sb
->recalc_sector
= cpu_to_le64(0);
3168 init_journal(ic
, 0, ic
->journal_sections
, 0);
3170 ic
->sb
->flags
&= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP
);
3172 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
3174 dm_integrity_io_error(ic
, "writing superblock", r
);
3177 if (ic
->reset_recalculate_flag
) {
3178 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
3179 ic
->sb
->recalc_sector
= cpu_to_le64(0);
3181 if (ic
->mode
== 'B') {
3182 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_DIRTY_BITMAP
);
3183 ic
->sb
->log2_blocks_per_bitmap_bit
= ic
->log2_blocks_per_bitmap_bit
;
3184 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
3186 dm_integrity_io_error(ic
, "writing superblock", r
);
3188 block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
3189 block_bitmap_op(ic
, ic
->recalc_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
3190 block_bitmap_op(ic
, ic
->may_write_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
3191 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
) &&
3192 le64_to_cpu(ic
->sb
->recalc_sector
) < ic
->provided_data_sectors
) {
3193 block_bitmap_op(ic
, ic
->journal
, le64_to_cpu(ic
->sb
->recalc_sector
),
3194 ic
->provided_data_sectors
- le64_to_cpu(ic
->sb
->recalc_sector
), BITMAP_OP_SET
);
3195 block_bitmap_op(ic
, ic
->recalc_bitmap
, le64_to_cpu(ic
->sb
->recalc_sector
),
3196 ic
->provided_data_sectors
- le64_to_cpu(ic
->sb
->recalc_sector
), BITMAP_OP_SET
);
3197 block_bitmap_op(ic
, ic
->may_write_bitmap
, le64_to_cpu(ic
->sb
->recalc_sector
),
3198 ic
->provided_data_sectors
- le64_to_cpu(ic
->sb
->recalc_sector
), BITMAP_OP_SET
);
3200 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, 0,
3201 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
3205 DEBUG_print("testing recalc: %x\n", ic
->sb
->flags
);
3206 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
)) {
3207 __u64 recalc_pos
= le64_to_cpu(ic
->sb
->recalc_sector
);
3208 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos
, ic
->provided_data_sectors
);
3209 if (recalc_pos
< ic
->provided_data_sectors
) {
3210 queue_work(ic
->recalc_wq
, &ic
->recalc_work
);
3211 } else if (recalc_pos
> ic
->provided_data_sectors
) {
3212 ic
->sb
->recalc_sector
= cpu_to_le64(ic
->provided_data_sectors
);
3213 recalc_write_super(ic
);
3217 ic
->reboot_notifier
.notifier_call
= dm_integrity_reboot
;
3218 ic
->reboot_notifier
.next
= NULL
;
3219 ic
->reboot_notifier
.priority
= INT_MAX
- 1; /* be notified after md and before hardware drivers */
3220 WARN_ON(register_reboot_notifier(&ic
->reboot_notifier
));
3223 /* set to 1 to stress test synchronous mode */
3224 dm_integrity_enter_synchronous_mode(ic
);
3228 static void dm_integrity_status(struct dm_target
*ti
, status_type_t type
,
3229 unsigned status_flags
, char *result
, unsigned maxlen
)
3231 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
3236 case STATUSTYPE_INFO
:
3238 (unsigned long long)atomic64_read(&ic
->number_of_mismatches
),
3239 ic
->provided_data_sectors
);
3240 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))
3241 DMEMIT(" %llu", le64_to_cpu(ic
->sb
->recalc_sector
));
3246 case STATUSTYPE_TABLE
: {
3247 __u64 watermark_percentage
= (__u64
)(ic
->journal_entries
- ic
->free_sectors_threshold
) * 100;
3248 watermark_percentage
+= ic
->journal_entries
/ 2;
3249 do_div(watermark_percentage
, ic
->journal_entries
);
3251 arg_count
+= !!ic
->meta_dev
;
3252 arg_count
+= ic
->sectors_per_block
!= 1;
3253 arg_count
+= !!(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
));
3254 arg_count
+= ic
->reset_recalculate_flag
;
3255 arg_count
+= ic
->discard
;
3256 arg_count
+= ic
->mode
== 'J';
3257 arg_count
+= ic
->mode
== 'J';
3258 arg_count
+= ic
->mode
== 'B';
3259 arg_count
+= ic
->mode
== 'B';
3260 arg_count
+= !!ic
->internal_hash_alg
.alg_string
;
3261 arg_count
+= !!ic
->journal_crypt_alg
.alg_string
;
3262 arg_count
+= !!ic
->journal_mac_alg
.alg_string
;
3263 arg_count
+= (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_PADDING
)) != 0;
3264 arg_count
+= (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) != 0;
3265 arg_count
+= ic
->legacy_recalculate
;
3266 DMEMIT("%s %llu %u %c %u", ic
->dev
->name
, ic
->start
,
3267 ic
->tag_size
, ic
->mode
, arg_count
);
3269 DMEMIT(" meta_device:%s", ic
->meta_dev
->name
);
3270 if (ic
->sectors_per_block
!= 1)
3271 DMEMIT(" block_size:%u", ic
->sectors_per_block
<< SECTOR_SHIFT
);
3272 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))
3273 DMEMIT(" recalculate");
3274 if (ic
->reset_recalculate_flag
)
3275 DMEMIT(" reset_recalculate");
3277 DMEMIT(" allow_discards");
3278 DMEMIT(" journal_sectors:%u", ic
->initial_sectors
- SB_SECTORS
);
3279 DMEMIT(" interleave_sectors:%u", 1U << ic
->sb
->log2_interleave_sectors
);
3280 DMEMIT(" buffer_sectors:%u", 1U << ic
->log2_buffer_sectors
);
3281 if (ic
->mode
== 'J') {
3282 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage
);
3283 DMEMIT(" commit_time:%u", ic
->autocommit_msec
);
3285 if (ic
->mode
== 'B') {
3286 DMEMIT(" sectors_per_bit:%llu", (sector_t
)ic
->sectors_per_block
<< ic
->log2_blocks_per_bitmap_bit
);
3287 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic
->bitmap_flush_interval
));
3289 if ((ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_PADDING
)) != 0)
3290 DMEMIT(" fix_padding");
3291 if ((ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) != 0)
3292 DMEMIT(" fix_hmac");
3293 if (ic
->legacy_recalculate
)
3294 DMEMIT(" legacy_recalculate");
3296 #define EMIT_ALG(a, n) \
3298 if (ic->a.alg_string) { \
3299 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3300 if (ic->a.key_string) \
3301 DMEMIT(":%s", ic->a.key_string);\
3304 EMIT_ALG(internal_hash_alg
, "internal_hash");
3305 EMIT_ALG(journal_crypt_alg
, "journal_crypt");
3306 EMIT_ALG(journal_mac_alg
, "journal_mac");
3309 case STATUSTYPE_IMA
:
3310 DMEMIT_TARGET_NAME_VERSION(ti
->type
);
3311 DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c",
3312 ic
->dev
->name
, ic
->start
, ic
->tag_size
, ic
->mode
);
3315 DMEMIT(",meta_device=%s", ic
->meta_dev
->name
);
3316 if (ic
->sectors_per_block
!= 1)
3317 DMEMIT(",block_size=%u", ic
->sectors_per_block
<< SECTOR_SHIFT
);
3319 DMEMIT(",recalculate=%c", (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
)) ?
3321 DMEMIT(",allow_discards=%c", ic
->discard
? 'y' : 'n');
3322 DMEMIT(",fix_padding=%c",
3323 ((ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_PADDING
)) != 0) ? 'y' : 'n');
3324 DMEMIT(",fix_hmac=%c",
3325 ((ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_HMAC
)) != 0) ? 'y' : 'n');
3326 DMEMIT(",legacy_recalculate=%c", ic
->legacy_recalculate
? 'y' : 'n');
3328 DMEMIT(",journal_sectors=%u", ic
->initial_sectors
- SB_SECTORS
);
3329 DMEMIT(",interleave_sectors=%u", 1U << ic
->sb
->log2_interleave_sectors
);
3330 DMEMIT(",buffer_sectors=%u", 1U << ic
->log2_buffer_sectors
);
3336 static int dm_integrity_iterate_devices(struct dm_target
*ti
,
3337 iterate_devices_callout_fn fn
, void *data
)
3339 struct dm_integrity_c
*ic
= ti
->private;
3342 return fn(ti
, ic
->dev
, ic
->start
+ ic
->initial_sectors
+ ic
->metadata_run
, ti
->len
, data
);
3344 return fn(ti
, ic
->dev
, 0, ti
->len
, data
);
3347 static void dm_integrity_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
3349 struct dm_integrity_c
*ic
= ti
->private;
3351 if (ic
->sectors_per_block
> 1) {
3352 limits
->logical_block_size
= ic
->sectors_per_block
<< SECTOR_SHIFT
;
3353 limits
->physical_block_size
= ic
->sectors_per_block
<< SECTOR_SHIFT
;
3354 blk_limits_io_min(limits
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
3358 static void calculate_journal_section_size(struct dm_integrity_c
*ic
)
3360 unsigned sector_space
= JOURNAL_SECTOR_DATA
;
3362 ic
->journal_sections
= le32_to_cpu(ic
->sb
->journal_sections
);
3363 ic
->journal_entry_size
= roundup(offsetof(struct journal_entry
, last_bytes
[ic
->sectors_per_block
]) + ic
->tag_size
,
3364 JOURNAL_ENTRY_ROUNDUP
);
3366 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
))
3367 sector_space
-= JOURNAL_MAC_PER_SECTOR
;
3368 ic
->journal_entries_per_sector
= sector_space
/ ic
->journal_entry_size
;
3369 ic
->journal_section_entries
= ic
->journal_entries_per_sector
* JOURNAL_BLOCK_SECTORS
;
3370 ic
->journal_section_sectors
= (ic
->journal_section_entries
<< ic
->sb
->log2_sectors_per_block
) + JOURNAL_BLOCK_SECTORS
;
3371 ic
->journal_entries
= ic
->journal_section_entries
* ic
->journal_sections
;
3374 static int calculate_device_limits(struct dm_integrity_c
*ic
)
3376 __u64 initial_sectors
;
3378 calculate_journal_section_size(ic
);
3379 initial_sectors
= SB_SECTORS
+ (__u64
)ic
->journal_section_sectors
* ic
->journal_sections
;
3380 if (initial_sectors
+ METADATA_PADDING_SECTORS
>= ic
->meta_device_sectors
|| initial_sectors
> UINT_MAX
)
3382 ic
->initial_sectors
= initial_sectors
;
3384 if (!ic
->meta_dev
) {
3385 sector_t last_sector
, last_area
, last_offset
;
3387 /* we have to maintain excessive padding for compatibility with existing volumes */
3388 __u64 metadata_run_padding
=
3389 ic
->sb
->flags
& cpu_to_le32(SB_FLAG_FIXED_PADDING
) ?
3390 (__u64
)(METADATA_PADDING_SECTORS
<< SECTOR_SHIFT
) :
3391 (__u64
)(1 << SECTOR_SHIFT
<< METADATA_PADDING_SECTORS
);
3393 ic
->metadata_run
= round_up((__u64
)ic
->tag_size
<< (ic
->sb
->log2_interleave_sectors
- ic
->sb
->log2_sectors_per_block
),
3394 metadata_run_padding
) >> SECTOR_SHIFT
;
3395 if (!(ic
->metadata_run
& (ic
->metadata_run
- 1)))
3396 ic
->log2_metadata_run
= __ffs(ic
->metadata_run
);
3398 ic
->log2_metadata_run
= -1;
3400 get_area_and_offset(ic
, ic
->provided_data_sectors
- 1, &last_area
, &last_offset
);
3401 last_sector
= get_data_sector(ic
, last_area
, last_offset
);
3402 if (last_sector
< ic
->start
|| last_sector
>= ic
->meta_device_sectors
)
3405 __u64 meta_size
= (ic
->provided_data_sectors
>> ic
->sb
->log2_sectors_per_block
) * ic
->tag_size
;
3406 meta_size
= (meta_size
+ ((1U << (ic
->log2_buffer_sectors
+ SECTOR_SHIFT
)) - 1))
3407 >> (ic
->log2_buffer_sectors
+ SECTOR_SHIFT
);
3408 meta_size
<<= ic
->log2_buffer_sectors
;
3409 if (ic
->initial_sectors
+ meta_size
< ic
->initial_sectors
||
3410 ic
->initial_sectors
+ meta_size
> ic
->meta_device_sectors
)
3412 ic
->metadata_run
= 1;
3413 ic
->log2_metadata_run
= 0;
3419 static void get_provided_data_sectors(struct dm_integrity_c
*ic
)
3421 if (!ic
->meta_dev
) {
3423 ic
->provided_data_sectors
= 0;
3424 for (test_bit
= fls64(ic
->meta_device_sectors
) - 1; test_bit
>= 3; test_bit
--) {
3425 __u64 prev_data_sectors
= ic
->provided_data_sectors
;
3427 ic
->provided_data_sectors
|= (sector_t
)1 << test_bit
;
3428 if (calculate_device_limits(ic
))
3429 ic
->provided_data_sectors
= prev_data_sectors
;
3432 ic
->provided_data_sectors
= ic
->data_device_sectors
;
3433 ic
->provided_data_sectors
&= ~(sector_t
)(ic
->sectors_per_block
- 1);
3437 static int initialize_superblock(struct dm_integrity_c
*ic
, unsigned journal_sectors
, unsigned interleave_sectors
)
3439 unsigned journal_sections
;
3442 memset(ic
->sb
, 0, SB_SECTORS
<< SECTOR_SHIFT
);
3443 memcpy(ic
->sb
->magic
, SB_MAGIC
, 8);
3444 ic
->sb
->integrity_tag_size
= cpu_to_le16(ic
->tag_size
);
3445 ic
->sb
->log2_sectors_per_block
= __ffs(ic
->sectors_per_block
);
3446 if (ic
->journal_mac_alg
.alg_string
)
3447 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
);
3449 calculate_journal_section_size(ic
);
3450 journal_sections
= journal_sectors
/ ic
->journal_section_sectors
;
3451 if (!journal_sections
)
3452 journal_sections
= 1;
3454 if (ic
->fix_hmac
&& (ic
->internal_hash_alg
.alg_string
|| ic
->journal_mac_alg
.alg_string
)) {
3455 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_FIXED_HMAC
);
3456 get_random_bytes(ic
->sb
->salt
, SALT_SIZE
);
3459 if (!ic
->meta_dev
) {
3460 if (ic
->fix_padding
)
3461 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_FIXED_PADDING
);
3462 ic
->sb
->journal_sections
= cpu_to_le32(journal_sections
);
3463 if (!interleave_sectors
)
3464 interleave_sectors
= DEFAULT_INTERLEAVE_SECTORS
;
3465 ic
->sb
->log2_interleave_sectors
= __fls(interleave_sectors
);
3466 ic
->sb
->log2_interleave_sectors
= max((__u8
)MIN_LOG2_INTERLEAVE_SECTORS
, ic
->sb
->log2_interleave_sectors
);
3467 ic
->sb
->log2_interleave_sectors
= min((__u8
)MAX_LOG2_INTERLEAVE_SECTORS
, ic
->sb
->log2_interleave_sectors
);
3469 get_provided_data_sectors(ic
);
3470 if (!ic
->provided_data_sectors
)
3473 ic
->sb
->log2_interleave_sectors
= 0;
3475 get_provided_data_sectors(ic
);
3476 if (!ic
->provided_data_sectors
)
3480 ic
->sb
->journal_sections
= cpu_to_le32(0);
3481 for (test_bit
= fls(journal_sections
) - 1; test_bit
>= 0; test_bit
--) {
3482 __u32 prev_journal_sections
= le32_to_cpu(ic
->sb
->journal_sections
);
3483 __u32 test_journal_sections
= prev_journal_sections
| (1U << test_bit
);
3484 if (test_journal_sections
> journal_sections
)
3486 ic
->sb
->journal_sections
= cpu_to_le32(test_journal_sections
);
3487 if (calculate_device_limits(ic
))
3488 ic
->sb
->journal_sections
= cpu_to_le32(prev_journal_sections
);
3491 if (!le32_to_cpu(ic
->sb
->journal_sections
)) {
3492 if (ic
->log2_buffer_sectors
> 3) {
3493 ic
->log2_buffer_sectors
--;
3494 goto try_smaller_buffer
;
3500 ic
->sb
->provided_data_sectors
= cpu_to_le64(ic
->provided_data_sectors
);
3507 static void dm_integrity_set(struct dm_target
*ti
, struct dm_integrity_c
*ic
)
3509 struct gendisk
*disk
= dm_disk(dm_table_get_md(ti
->table
));
3510 struct blk_integrity bi
;
3512 memset(&bi
, 0, sizeof(bi
));
3513 bi
.profile
= &dm_integrity_profile
;
3514 bi
.tuple_size
= ic
->tag_size
;
3515 bi
.tag_size
= bi
.tuple_size
;
3516 bi
.interval_exp
= ic
->sb
->log2_sectors_per_block
+ SECTOR_SHIFT
;
3518 blk_integrity_register(disk
, &bi
);
3519 blk_queue_max_integrity_segments(disk
->queue
, UINT_MAX
);
3522 static void dm_integrity_free_page_list(struct page_list
*pl
)
3528 for (i
= 0; pl
[i
].page
; i
++)
3529 __free_page(pl
[i
].page
);
3533 static struct page_list
*dm_integrity_alloc_page_list(unsigned n_pages
)
3535 struct page_list
*pl
;
3538 pl
= kvmalloc_array(n_pages
+ 1, sizeof(struct page_list
), GFP_KERNEL
| __GFP_ZERO
);
3542 for (i
= 0; i
< n_pages
; i
++) {
3543 pl
[i
].page
= alloc_page(GFP_KERNEL
);
3545 dm_integrity_free_page_list(pl
);
3549 pl
[i
- 1].next
= &pl
[i
];
3557 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c
*ic
, struct scatterlist
**sl
)
3560 for (i
= 0; i
< ic
->journal_sections
; i
++)
3565 static struct scatterlist
**dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c
*ic
,
3566 struct page_list
*pl
)
3568 struct scatterlist
**sl
;
3571 sl
= kvmalloc_array(ic
->journal_sections
,
3572 sizeof(struct scatterlist
*),
3573 GFP_KERNEL
| __GFP_ZERO
);
3577 for (i
= 0; i
< ic
->journal_sections
; i
++) {
3578 struct scatterlist
*s
;
3579 unsigned start_index
, start_offset
;
3580 unsigned end_index
, end_offset
;
3584 page_list_location(ic
, i
, 0, &start_index
, &start_offset
);
3585 page_list_location(ic
, i
, ic
->journal_section_sectors
- 1,
3586 &end_index
, &end_offset
);
3588 n_pages
= (end_index
- start_index
+ 1);
3590 s
= kvmalloc_array(n_pages
, sizeof(struct scatterlist
),
3593 dm_integrity_free_journal_scatterlist(ic
, sl
);
3597 sg_init_table(s
, n_pages
);
3598 for (idx
= start_index
; idx
<= end_index
; idx
++) {
3599 char *va
= lowmem_page_address(pl
[idx
].page
);
3600 unsigned start
= 0, end
= PAGE_SIZE
;
3601 if (idx
== start_index
)
3602 start
= start_offset
;
3603 if (idx
== end_index
)
3604 end
= end_offset
+ (1 << SECTOR_SHIFT
);
3605 sg_set_buf(&s
[idx
- start_index
], va
+ start
, end
- start
);
3614 static void free_alg(struct alg_spec
*a
)
3616 kfree_sensitive(a
->alg_string
);
3617 kfree_sensitive(a
->key
);
3618 memset(a
, 0, sizeof *a
);
3621 static int get_alg_and_key(const char *arg
, struct alg_spec
*a
, char **error
, char *error_inval
)
3627 a
->alg_string
= kstrdup(strchr(arg
, ':') + 1, GFP_KERNEL
);
3631 k
= strchr(a
->alg_string
, ':');
3634 a
->key_string
= k
+ 1;
3635 if (strlen(a
->key_string
) & 1)
3638 a
->key_size
= strlen(a
->key_string
) / 2;
3639 a
->key
= kmalloc(a
->key_size
, GFP_KERNEL
);
3642 if (hex2bin(a
->key
, a
->key_string
, a
->key_size
))
3648 *error
= error_inval
;
3651 *error
= "Out of memory for an argument";
3655 static int get_mac(struct crypto_shash
**hash
, struct alg_spec
*a
, char **error
,
3656 char *error_alg
, char *error_key
)
3660 if (a
->alg_string
) {
3661 *hash
= crypto_alloc_shash(a
->alg_string
, 0, CRYPTO_ALG_ALLOCATES_MEMORY
);
3662 if (IS_ERR(*hash
)) {
3670 r
= crypto_shash_setkey(*hash
, a
->key
, a
->key_size
);
3675 } else if (crypto_shash_get_flags(*hash
) & CRYPTO_TFM_NEED_KEY
) {
3684 static int create_journal(struct dm_integrity_c
*ic
, char **error
)
3688 __u64 journal_pages
, journal_desc_size
, journal_tree_size
;
3689 unsigned char *crypt_data
= NULL
, *crypt_iv
= NULL
;
3690 struct skcipher_request
*req
= NULL
;
3692 ic
->commit_ids
[0] = cpu_to_le64(0x1111111111111111ULL
);
3693 ic
->commit_ids
[1] = cpu_to_le64(0x2222222222222222ULL
);
3694 ic
->commit_ids
[2] = cpu_to_le64(0x3333333333333333ULL
);
3695 ic
->commit_ids
[3] = cpu_to_le64(0x4444444444444444ULL
);
3697 journal_pages
= roundup((__u64
)ic
->journal_sections
* ic
->journal_section_sectors
,
3698 PAGE_SIZE
>> SECTOR_SHIFT
) >> (PAGE_SHIFT
- SECTOR_SHIFT
);
3699 journal_desc_size
= journal_pages
* sizeof(struct page_list
);
3700 if (journal_pages
>= totalram_pages() - totalhigh_pages() || journal_desc_size
> ULONG_MAX
) {
3701 *error
= "Journal doesn't fit into memory";
3705 ic
->journal_pages
= journal_pages
;
3707 ic
->journal
= dm_integrity_alloc_page_list(ic
->journal_pages
);
3709 *error
= "Could not allocate memory for journal";
3713 if (ic
->journal_crypt_alg
.alg_string
) {
3714 unsigned ivsize
, blocksize
;
3715 struct journal_completion comp
;
3718 ic
->journal_crypt
= crypto_alloc_skcipher(ic
->journal_crypt_alg
.alg_string
, 0, CRYPTO_ALG_ALLOCATES_MEMORY
);
3719 if (IS_ERR(ic
->journal_crypt
)) {
3720 *error
= "Invalid journal cipher";
3721 r
= PTR_ERR(ic
->journal_crypt
);
3722 ic
->journal_crypt
= NULL
;
3725 ivsize
= crypto_skcipher_ivsize(ic
->journal_crypt
);
3726 blocksize
= crypto_skcipher_blocksize(ic
->journal_crypt
);
3728 if (ic
->journal_crypt_alg
.key
) {
3729 r
= crypto_skcipher_setkey(ic
->journal_crypt
, ic
->journal_crypt_alg
.key
,
3730 ic
->journal_crypt_alg
.key_size
);
3732 *error
= "Error setting encryption key";
3736 DEBUG_print("cipher %s, block size %u iv size %u\n",
3737 ic
->journal_crypt_alg
.alg_string
, blocksize
, ivsize
);
3739 ic
->journal_io
= dm_integrity_alloc_page_list(ic
->journal_pages
);
3740 if (!ic
->journal_io
) {
3741 *error
= "Could not allocate memory for journal io";
3746 if (blocksize
== 1) {
3747 struct scatterlist
*sg
;
3749 req
= skcipher_request_alloc(ic
->journal_crypt
, GFP_KERNEL
);
3751 *error
= "Could not allocate crypt request";
3756 crypt_iv
= kzalloc(ivsize
, GFP_KERNEL
);
3758 *error
= "Could not allocate iv";
3763 ic
->journal_xor
= dm_integrity_alloc_page_list(ic
->journal_pages
);
3764 if (!ic
->journal_xor
) {
3765 *error
= "Could not allocate memory for journal xor";
3770 sg
= kvmalloc_array(ic
->journal_pages
+ 1,
3771 sizeof(struct scatterlist
),
3774 *error
= "Unable to allocate sg list";
3778 sg_init_table(sg
, ic
->journal_pages
+ 1);
3779 for (i
= 0; i
< ic
->journal_pages
; i
++) {
3780 char *va
= lowmem_page_address(ic
->journal_xor
[i
].page
);
3782 sg_set_buf(&sg
[i
], va
, PAGE_SIZE
);
3784 sg_set_buf(&sg
[i
], &ic
->commit_ids
, sizeof ic
->commit_ids
);
3786 skcipher_request_set_crypt(req
, sg
, sg
,
3787 PAGE_SIZE
* ic
->journal_pages
+ sizeof ic
->commit_ids
, crypt_iv
);
3788 init_completion(&comp
.comp
);
3789 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
3790 if (do_crypt(true, req
, &comp
))
3791 wait_for_completion(&comp
.comp
);
3793 r
= dm_integrity_failed(ic
);
3795 *error
= "Unable to encrypt journal";
3798 DEBUG_bytes(lowmem_page_address(ic
->journal_xor
[0].page
), 64, "xor data");
3800 crypto_free_skcipher(ic
->journal_crypt
);
3801 ic
->journal_crypt
= NULL
;
3803 unsigned crypt_len
= roundup(ivsize
, blocksize
);
3805 req
= skcipher_request_alloc(ic
->journal_crypt
, GFP_KERNEL
);
3807 *error
= "Could not allocate crypt request";
3812 crypt_iv
= kmalloc(ivsize
, GFP_KERNEL
);
3814 *error
= "Could not allocate iv";
3819 crypt_data
= kmalloc(crypt_len
, GFP_KERNEL
);
3821 *error
= "Unable to allocate crypt data";
3826 ic
->journal_scatterlist
= dm_integrity_alloc_journal_scatterlist(ic
, ic
->journal
);
3827 if (!ic
->journal_scatterlist
) {
3828 *error
= "Unable to allocate sg list";
3832 ic
->journal_io_scatterlist
= dm_integrity_alloc_journal_scatterlist(ic
, ic
->journal_io
);
3833 if (!ic
->journal_io_scatterlist
) {
3834 *error
= "Unable to allocate sg list";
3838 ic
->sk_requests
= kvmalloc_array(ic
->journal_sections
,
3839 sizeof(struct skcipher_request
*),
3840 GFP_KERNEL
| __GFP_ZERO
);
3841 if (!ic
->sk_requests
) {
3842 *error
= "Unable to allocate sk requests";
3846 for (i
= 0; i
< ic
->journal_sections
; i
++) {
3847 struct scatterlist sg
;
3848 struct skcipher_request
*section_req
;
3849 __le32 section_le
= cpu_to_le32(i
);
3851 memset(crypt_iv
, 0x00, ivsize
);
3852 memset(crypt_data
, 0x00, crypt_len
);
3853 memcpy(crypt_data
, §ion_le
, min((size_t)crypt_len
, sizeof(section_le
)));
3855 sg_init_one(&sg
, crypt_data
, crypt_len
);
3856 skcipher_request_set_crypt(req
, &sg
, &sg
, crypt_len
, crypt_iv
);
3857 init_completion(&comp
.comp
);
3858 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
3859 if (do_crypt(true, req
, &comp
))
3860 wait_for_completion(&comp
.comp
);
3862 r
= dm_integrity_failed(ic
);
3864 *error
= "Unable to generate iv";
3868 section_req
= skcipher_request_alloc(ic
->journal_crypt
, GFP_KERNEL
);
3870 *error
= "Unable to allocate crypt request";
3874 section_req
->iv
= kmalloc_array(ivsize
, 2,
3876 if (!section_req
->iv
) {
3877 skcipher_request_free(section_req
);
3878 *error
= "Unable to allocate iv";
3882 memcpy(section_req
->iv
+ ivsize
, crypt_data
, ivsize
);
3883 section_req
->cryptlen
= (size_t)ic
->journal_section_sectors
<< SECTOR_SHIFT
;
3884 ic
->sk_requests
[i
] = section_req
;
3885 DEBUG_bytes(crypt_data
, ivsize
, "iv(%u)", i
);
3890 for (i
= 0; i
< N_COMMIT_IDS
; i
++) {
3893 for (j
= 0; j
< i
; j
++) {
3894 if (ic
->commit_ids
[j
] == ic
->commit_ids
[i
]) {
3895 ic
->commit_ids
[i
] = cpu_to_le64(le64_to_cpu(ic
->commit_ids
[i
]) + 1);
3896 goto retest_commit_id
;
3899 DEBUG_print("commit id %u: %016llx\n", i
, ic
->commit_ids
[i
]);
3902 journal_tree_size
= (__u64
)ic
->journal_entries
* sizeof(struct journal_node
);
3903 if (journal_tree_size
> ULONG_MAX
) {
3904 *error
= "Journal doesn't fit into memory";
3908 ic
->journal_tree
= kvmalloc(journal_tree_size
, GFP_KERNEL
);
3909 if (!ic
->journal_tree
) {
3910 *error
= "Could not allocate memory for journal tree";
3916 skcipher_request_free(req
);
3922 * Construct a integrity mapping
3926 * offset from the start of the device
3928 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3929 * number of optional arguments
3930 * optional arguments:
3932 * interleave_sectors
3939 * bitmap_flush_interval
3945 static int dm_integrity_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
3947 struct dm_integrity_c
*ic
;
3950 unsigned extra_args
;
3951 struct dm_arg_set as
;
3952 static const struct dm_arg _args
[] = {
3953 {0, 18, "Invalid number of feature args"},
3955 unsigned journal_sectors
, interleave_sectors
, buffer_sectors
, journal_watermark
, sync_msec
;
3956 bool should_write_sb
;
3958 unsigned long long start
;
3959 __s8 log2_sectors_per_bitmap_bit
= -1;
3960 __s8 log2_blocks_per_bitmap_bit
;
3961 __u64 bits_in_journal
;
3962 __u64 n_bitmap_bits
;
3964 #define DIRECT_ARGUMENTS 4
3966 if (argc
<= DIRECT_ARGUMENTS
) {
3967 ti
->error
= "Invalid argument count";
3971 ic
= kzalloc(sizeof(struct dm_integrity_c
), GFP_KERNEL
);
3973 ti
->error
= "Cannot allocate integrity context";
3977 ti
->per_io_data_size
= sizeof(struct dm_integrity_io
);
3980 ic
->in_progress
= RB_ROOT
;
3981 INIT_LIST_HEAD(&ic
->wait_list
);
3982 init_waitqueue_head(&ic
->endio_wait
);
3983 bio_list_init(&ic
->flush_bio_list
);
3984 init_waitqueue_head(&ic
->copy_to_journal_wait
);
3985 init_completion(&ic
->crypto_backoff
);
3986 atomic64_set(&ic
->number_of_mismatches
, 0);
3987 ic
->bitmap_flush_interval
= BITMAP_FLUSH_INTERVAL
;
3989 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &ic
->dev
);
3991 ti
->error
= "Device lookup failed";
3995 if (sscanf(argv
[1], "%llu%c", &start
, &dummy
) != 1 || start
!= (sector_t
)start
) {
3996 ti
->error
= "Invalid starting offset";
4002 if (strcmp(argv
[2], "-")) {
4003 if (sscanf(argv
[2], "%u%c", &ic
->tag_size
, &dummy
) != 1 || !ic
->tag_size
) {
4004 ti
->error
= "Invalid tag size";
4010 if (!strcmp(argv
[3], "J") || !strcmp(argv
[3], "B") ||
4011 !strcmp(argv
[3], "D") || !strcmp(argv
[3], "R")) {
4012 ic
->mode
= argv
[3][0];
4014 ti
->error
= "Invalid mode (expecting J, B, D, R)";
4019 journal_sectors
= 0;
4020 interleave_sectors
= DEFAULT_INTERLEAVE_SECTORS
;
4021 buffer_sectors
= DEFAULT_BUFFER_SECTORS
;
4022 journal_watermark
= DEFAULT_JOURNAL_WATERMARK
;
4023 sync_msec
= DEFAULT_SYNC_MSEC
;
4024 ic
->sectors_per_block
= 1;
4026 as
.argc
= argc
- DIRECT_ARGUMENTS
;
4027 as
.argv
= argv
+ DIRECT_ARGUMENTS
;
4028 r
= dm_read_arg_group(_args
, &as
, &extra_args
, &ti
->error
);
4032 while (extra_args
--) {
4033 const char *opt_string
;
4035 unsigned long long llval
;
4036 opt_string
= dm_shift_arg(&as
);
4039 ti
->error
= "Not enough feature arguments";
4042 if (sscanf(opt_string
, "journal_sectors:%u%c", &val
, &dummy
) == 1)
4043 journal_sectors
= val
? val
: 1;
4044 else if (sscanf(opt_string
, "interleave_sectors:%u%c", &val
, &dummy
) == 1)
4045 interleave_sectors
= val
;
4046 else if (sscanf(opt_string
, "buffer_sectors:%u%c", &val
, &dummy
) == 1)
4047 buffer_sectors
= val
;
4048 else if (sscanf(opt_string
, "journal_watermark:%u%c", &val
, &dummy
) == 1 && val
<= 100)
4049 journal_watermark
= val
;
4050 else if (sscanf(opt_string
, "commit_time:%u%c", &val
, &dummy
) == 1)
4052 else if (!strncmp(opt_string
, "meta_device:", strlen("meta_device:"))) {
4054 dm_put_device(ti
, ic
->meta_dev
);
4055 ic
->meta_dev
= NULL
;
4057 r
= dm_get_device(ti
, strchr(opt_string
, ':') + 1,
4058 dm_table_get_mode(ti
->table
), &ic
->meta_dev
);
4060 ti
->error
= "Device lookup failed";
4063 } else if (sscanf(opt_string
, "block_size:%u%c", &val
, &dummy
) == 1) {
4064 if (val
< 1 << SECTOR_SHIFT
||
4065 val
> MAX_SECTORS_PER_BLOCK
<< SECTOR_SHIFT
||
4068 ti
->error
= "Invalid block_size argument";
4071 ic
->sectors_per_block
= val
>> SECTOR_SHIFT
;
4072 } else if (sscanf(opt_string
, "sectors_per_bit:%llu%c", &llval
, &dummy
) == 1) {
4073 log2_sectors_per_bitmap_bit
= !llval
? 0 : __ilog2_u64(llval
);
4074 } else if (sscanf(opt_string
, "bitmap_flush_interval:%u%c", &val
, &dummy
) == 1) {
4075 if (val
>= (uint64_t)UINT_MAX
* 1000 / HZ
) {
4077 ti
->error
= "Invalid bitmap_flush_interval argument";
4080 ic
->bitmap_flush_interval
= msecs_to_jiffies(val
);
4081 } else if (!strncmp(opt_string
, "internal_hash:", strlen("internal_hash:"))) {
4082 r
= get_alg_and_key(opt_string
, &ic
->internal_hash_alg
, &ti
->error
,
4083 "Invalid internal_hash argument");
4086 } else if (!strncmp(opt_string
, "journal_crypt:", strlen("journal_crypt:"))) {
4087 r
= get_alg_and_key(opt_string
, &ic
->journal_crypt_alg
, &ti
->error
,
4088 "Invalid journal_crypt argument");
4091 } else if (!strncmp(opt_string
, "journal_mac:", strlen("journal_mac:"))) {
4092 r
= get_alg_and_key(opt_string
, &ic
->journal_mac_alg
, &ti
->error
,
4093 "Invalid journal_mac argument");
4096 } else if (!strcmp(opt_string
, "recalculate")) {
4097 ic
->recalculate_flag
= true;
4098 } else if (!strcmp(opt_string
, "reset_recalculate")) {
4099 ic
->recalculate_flag
= true;
4100 ic
->reset_recalculate_flag
= true;
4101 } else if (!strcmp(opt_string
, "allow_discards")) {
4103 } else if (!strcmp(opt_string
, "fix_padding")) {
4104 ic
->fix_padding
= true;
4105 } else if (!strcmp(opt_string
, "fix_hmac")) {
4106 ic
->fix_hmac
= true;
4107 } else if (!strcmp(opt_string
, "legacy_recalculate")) {
4108 ic
->legacy_recalculate
= true;
4111 ti
->error
= "Invalid argument";
4116 ic
->data_device_sectors
= i_size_read(ic
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
4118 ic
->meta_device_sectors
= ic
->data_device_sectors
;
4120 ic
->meta_device_sectors
= i_size_read(ic
->meta_dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
4122 if (!journal_sectors
) {
4123 journal_sectors
= min((sector_t
)DEFAULT_MAX_JOURNAL_SECTORS
,
4124 ic
->data_device_sectors
>> DEFAULT_JOURNAL_SIZE_FACTOR
);
4127 if (!buffer_sectors
)
4129 ic
->log2_buffer_sectors
= min((int)__fls(buffer_sectors
), 31 - SECTOR_SHIFT
);
4131 r
= get_mac(&ic
->internal_hash
, &ic
->internal_hash_alg
, &ti
->error
,
4132 "Invalid internal hash", "Error setting internal hash key");
4136 r
= get_mac(&ic
->journal_mac
, &ic
->journal_mac_alg
, &ti
->error
,
4137 "Invalid journal mac", "Error setting journal mac key");
4141 if (!ic
->tag_size
) {
4142 if (!ic
->internal_hash
) {
4143 ti
->error
= "Unknown tag size";
4147 ic
->tag_size
= crypto_shash_digestsize(ic
->internal_hash
);
4149 if (ic
->tag_size
> MAX_TAG_SIZE
) {
4150 ti
->error
= "Too big tag size";
4154 if (!(ic
->tag_size
& (ic
->tag_size
- 1)))
4155 ic
->log2_tag_size
= __ffs(ic
->tag_size
);
4157 ic
->log2_tag_size
= -1;
4159 if (ic
->mode
== 'B' && !ic
->internal_hash
) {
4161 ti
->error
= "Bitmap mode can be only used with internal hash";
4165 if (ic
->discard
&& !ic
->internal_hash
) {
4167 ti
->error
= "Discard can be only used with internal hash";
4171 ic
->autocommit_jiffies
= msecs_to_jiffies(sync_msec
);
4172 ic
->autocommit_msec
= sync_msec
;
4173 timer_setup(&ic
->autocommit_timer
, autocommit_fn
, 0);
4175 ic
->io
= dm_io_client_create();
4176 if (IS_ERR(ic
->io
)) {
4177 r
= PTR_ERR(ic
->io
);
4179 ti
->error
= "Cannot allocate dm io";
4183 r
= mempool_init_slab_pool(&ic
->journal_io_mempool
, JOURNAL_IO_MEMPOOL
, journal_io_cache
);
4185 ti
->error
= "Cannot allocate mempool";
4189 ic
->metadata_wq
= alloc_workqueue("dm-integrity-metadata",
4190 WQ_MEM_RECLAIM
, METADATA_WORKQUEUE_MAX_ACTIVE
);
4191 if (!ic
->metadata_wq
) {
4192 ti
->error
= "Cannot allocate workqueue";
4198 * If this workqueue were percpu, it would cause bio reordering
4199 * and reduced performance.
4201 ic
->wait_wq
= alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1);
4203 ti
->error
= "Cannot allocate workqueue";
4208 ic
->offload_wq
= alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM
,
4209 METADATA_WORKQUEUE_MAX_ACTIVE
);
4210 if (!ic
->offload_wq
) {
4211 ti
->error
= "Cannot allocate workqueue";
4216 ic
->commit_wq
= alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM
, 1);
4217 if (!ic
->commit_wq
) {
4218 ti
->error
= "Cannot allocate workqueue";
4222 INIT_WORK(&ic
->commit_work
, integrity_commit
);
4224 if (ic
->mode
== 'J' || ic
->mode
== 'B') {
4225 ic
->writer_wq
= alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM
, 1);
4226 if (!ic
->writer_wq
) {
4227 ti
->error
= "Cannot allocate workqueue";
4231 INIT_WORK(&ic
->writer_work
, integrity_writer
);
4234 ic
->sb
= alloc_pages_exact(SB_SECTORS
<< SECTOR_SHIFT
, GFP_KERNEL
);
4237 ti
->error
= "Cannot allocate superblock area";
4241 r
= sync_rw_sb(ic
, REQ_OP_READ
, 0);
4243 ti
->error
= "Error reading superblock";
4246 should_write_sb
= false;
4247 if (memcmp(ic
->sb
->magic
, SB_MAGIC
, 8)) {
4248 if (ic
->mode
!= 'R') {
4249 if (memchr_inv(ic
->sb
, 0, SB_SECTORS
<< SECTOR_SHIFT
)) {
4251 ti
->error
= "The device is not initialized";
4256 r
= initialize_superblock(ic
, journal_sectors
, interleave_sectors
);
4258 ti
->error
= "Could not initialize superblock";
4261 if (ic
->mode
!= 'R')
4262 should_write_sb
= true;
4265 if (!ic
->sb
->version
|| ic
->sb
->version
> SB_VERSION_5
) {
4267 ti
->error
= "Unknown version";
4270 if (le16_to_cpu(ic
->sb
->integrity_tag_size
) != ic
->tag_size
) {
4272 ti
->error
= "Tag size doesn't match the information in superblock";
4275 if (ic
->sb
->log2_sectors_per_block
!= __ffs(ic
->sectors_per_block
)) {
4277 ti
->error
= "Block size doesn't match the information in superblock";
4280 if (!le32_to_cpu(ic
->sb
->journal_sections
)) {
4282 ti
->error
= "Corrupted superblock, journal_sections is 0";
4285 /* make sure that ti->max_io_len doesn't overflow */
4286 if (!ic
->meta_dev
) {
4287 if (ic
->sb
->log2_interleave_sectors
< MIN_LOG2_INTERLEAVE_SECTORS
||
4288 ic
->sb
->log2_interleave_sectors
> MAX_LOG2_INTERLEAVE_SECTORS
) {
4290 ti
->error
= "Invalid interleave_sectors in the superblock";
4294 if (ic
->sb
->log2_interleave_sectors
) {
4296 ti
->error
= "Invalid interleave_sectors in the superblock";
4300 if (!!(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
)) != !!ic
->journal_mac_alg
.alg_string
) {
4302 ti
->error
= "Journal mac mismatch";
4306 get_provided_data_sectors(ic
);
4307 if (!ic
->provided_data_sectors
) {
4309 ti
->error
= "The device is too small";
4314 r
= calculate_device_limits(ic
);
4317 if (ic
->log2_buffer_sectors
> 3) {
4318 ic
->log2_buffer_sectors
--;
4319 goto try_smaller_buffer
;
4322 ti
->error
= "The device is too small";
4326 if (log2_sectors_per_bitmap_bit
< 0)
4327 log2_sectors_per_bitmap_bit
= __fls(DEFAULT_SECTORS_PER_BITMAP_BIT
);
4328 if (log2_sectors_per_bitmap_bit
< ic
->sb
->log2_sectors_per_block
)
4329 log2_sectors_per_bitmap_bit
= ic
->sb
->log2_sectors_per_block
;
4331 bits_in_journal
= ((__u64
)ic
->journal_section_sectors
* ic
->journal_sections
) << (SECTOR_SHIFT
+ 3);
4332 if (bits_in_journal
> UINT_MAX
)
4333 bits_in_journal
= UINT_MAX
;
4334 while (bits_in_journal
< (ic
->provided_data_sectors
+ ((sector_t
)1 << log2_sectors_per_bitmap_bit
) - 1) >> log2_sectors_per_bitmap_bit
)
4335 log2_sectors_per_bitmap_bit
++;
4337 log2_blocks_per_bitmap_bit
= log2_sectors_per_bitmap_bit
- ic
->sb
->log2_sectors_per_block
;
4338 ic
->log2_blocks_per_bitmap_bit
= log2_blocks_per_bitmap_bit
;
4339 if (should_write_sb
) {
4340 ic
->sb
->log2_blocks_per_bitmap_bit
= log2_blocks_per_bitmap_bit
;
4342 n_bitmap_bits
= ((ic
->provided_data_sectors
>> ic
->sb
->log2_sectors_per_block
)
4343 + (((sector_t
)1 << log2_blocks_per_bitmap_bit
) - 1)) >> log2_blocks_per_bitmap_bit
;
4344 ic
->n_bitmap_blocks
= DIV_ROUND_UP(n_bitmap_bits
, BITMAP_BLOCK_SIZE
* 8);
4347 ic
->log2_buffer_sectors
= min(ic
->log2_buffer_sectors
, (__u8
)__ffs(ic
->metadata_run
));
4349 if (ti
->len
> ic
->provided_data_sectors
) {
4351 ti
->error
= "Not enough provided sectors for requested mapping size";
4356 threshold
= (__u64
)ic
->journal_entries
* (100 - journal_watermark
);
4358 do_div(threshold
, 100);
4359 ic
->free_sectors_threshold
= threshold
;
4361 DEBUG_print("initialized:\n");
4362 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic
->sb
->integrity_tag_size
));
4363 DEBUG_print(" journal_entry_size %u\n", ic
->journal_entry_size
);
4364 DEBUG_print(" journal_entries_per_sector %u\n", ic
->journal_entries_per_sector
);
4365 DEBUG_print(" journal_section_entries %u\n", ic
->journal_section_entries
);
4366 DEBUG_print(" journal_section_sectors %u\n", ic
->journal_section_sectors
);
4367 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic
->sb
->journal_sections
));
4368 DEBUG_print(" journal_entries %u\n", ic
->journal_entries
);
4369 DEBUG_print(" log2_interleave_sectors %d\n", ic
->sb
->log2_interleave_sectors
);
4370 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
);
4371 DEBUG_print(" initial_sectors 0x%x\n", ic
->initial_sectors
);
4372 DEBUG_print(" metadata_run 0x%x\n", ic
->metadata_run
);
4373 DEBUG_print(" log2_metadata_run %d\n", ic
->log2_metadata_run
);
4374 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic
->provided_data_sectors
, ic
->provided_data_sectors
);
4375 DEBUG_print(" log2_buffer_sectors %u\n", ic
->log2_buffer_sectors
);
4376 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal
);
4378 if (ic
->recalculate_flag
&& !(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))) {
4379 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
4380 ic
->sb
->recalc_sector
= cpu_to_le64(0);
4383 if (ic
->internal_hash
) {
4384 ic
->recalc_wq
= alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM
, 1);
4385 if (!ic
->recalc_wq
) {
4386 ti
->error
= "Cannot allocate workqueue";
4390 INIT_WORK(&ic
->recalc_work
, integrity_recalc
);
4391 ic
->recalc_buffer
= vmalloc(RECALC_SECTORS
<< SECTOR_SHIFT
);
4392 if (!ic
->recalc_buffer
) {
4393 ti
->error
= "Cannot allocate buffer for recalculating";
4397 ic
->recalc_tags
= kvmalloc_array(RECALC_SECTORS
>> ic
->sb
->log2_sectors_per_block
,
4398 ic
->tag_size
, GFP_KERNEL
);
4399 if (!ic
->recalc_tags
) {
4400 ti
->error
= "Cannot allocate tags for recalculating";
4405 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
)) {
4406 ti
->error
= "Recalculate can only be specified with internal_hash";
4412 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
) &&
4413 le64_to_cpu(ic
->sb
->recalc_sector
) < ic
->provided_data_sectors
&&
4414 dm_integrity_disable_recalculate(ic
)) {
4415 ti
->error
= "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4420 ic
->bufio
= dm_bufio_client_create(ic
->meta_dev
? ic
->meta_dev
->bdev
: ic
->dev
->bdev
,
4421 1U << (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
), 1, 0, NULL
, NULL
);
4422 if (IS_ERR(ic
->bufio
)) {
4423 r
= PTR_ERR(ic
->bufio
);
4424 ti
->error
= "Cannot initialize dm-bufio";
4428 dm_bufio_set_sector_offset(ic
->bufio
, ic
->start
+ ic
->initial_sectors
);
4430 if (ic
->mode
!= 'R') {
4431 r
= create_journal(ic
, &ti
->error
);
4437 if (ic
->mode
== 'B') {
4439 unsigned n_bitmap_pages
= DIV_ROUND_UP(ic
->n_bitmap_blocks
, PAGE_SIZE
/ BITMAP_BLOCK_SIZE
);
4441 ic
->recalc_bitmap
= dm_integrity_alloc_page_list(n_bitmap_pages
);
4442 if (!ic
->recalc_bitmap
) {
4446 ic
->may_write_bitmap
= dm_integrity_alloc_page_list(n_bitmap_pages
);
4447 if (!ic
->may_write_bitmap
) {
4451 ic
->bbs
= kvmalloc_array(ic
->n_bitmap_blocks
, sizeof(struct bitmap_block_status
), GFP_KERNEL
);
4456 INIT_DELAYED_WORK(&ic
->bitmap_flush_work
, bitmap_flush_work
);
4457 for (i
= 0; i
< ic
->n_bitmap_blocks
; i
++) {
4458 struct bitmap_block_status
*bbs
= &ic
->bbs
[i
];
4459 unsigned sector
, pl_index
, pl_offset
;
4461 INIT_WORK(&bbs
->work
, bitmap_block_work
);
4464 bio_list_init(&bbs
->bio_queue
);
4465 spin_lock_init(&bbs
->bio_queue_lock
);
4467 sector
= i
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
);
4468 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
4469 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
4471 bbs
->bitmap
= lowmem_page_address(ic
->journal
[pl_index
].page
) + pl_offset
;
4475 if (should_write_sb
) {
4478 init_journal(ic
, 0, ic
->journal_sections
, 0);
4479 r
= dm_integrity_failed(ic
);
4481 ti
->error
= "Error initializing journal";
4484 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
4486 ti
->error
= "Error initializing superblock";
4489 ic
->just_formatted
= true;
4492 if (!ic
->meta_dev
) {
4493 r
= dm_set_target_max_io_len(ti
, 1U << ic
->sb
->log2_interleave_sectors
);
4497 if (ic
->mode
== 'B') {
4498 unsigned max_io_len
= ((sector_t
)ic
->sectors_per_block
<< ic
->log2_blocks_per_bitmap_bit
) * (BITMAP_BLOCK_SIZE
* 8);
4500 max_io_len
= 1U << 31;
4501 DEBUG_print("max_io_len: old %u, new %u\n", ti
->max_io_len
, max_io_len
);
4502 if (!ti
->max_io_len
|| ti
->max_io_len
> max_io_len
) {
4503 r
= dm_set_target_max_io_len(ti
, max_io_len
);
4509 if (!ic
->internal_hash
)
4510 dm_integrity_set(ti
, ic
);
4512 ti
->num_flush_bios
= 1;
4513 ti
->flush_supported
= true;
4515 ti
->num_discard_bios
= 1;
4520 dm_integrity_dtr(ti
);
4524 static void dm_integrity_dtr(struct dm_target
*ti
)
4526 struct dm_integrity_c
*ic
= ti
->private;
4528 BUG_ON(!RB_EMPTY_ROOT(&ic
->in_progress
));
4529 BUG_ON(!list_empty(&ic
->wait_list
));
4531 if (ic
->metadata_wq
)
4532 destroy_workqueue(ic
->metadata_wq
);
4534 destroy_workqueue(ic
->wait_wq
);
4536 destroy_workqueue(ic
->offload_wq
);
4538 destroy_workqueue(ic
->commit_wq
);
4540 destroy_workqueue(ic
->writer_wq
);
4542 destroy_workqueue(ic
->recalc_wq
);
4543 vfree(ic
->recalc_buffer
);
4544 kvfree(ic
->recalc_tags
);
4547 dm_bufio_client_destroy(ic
->bufio
);
4548 mempool_exit(&ic
->journal_io_mempool
);
4550 dm_io_client_destroy(ic
->io
);
4552 dm_put_device(ti
, ic
->dev
);
4554 dm_put_device(ti
, ic
->meta_dev
);
4555 dm_integrity_free_page_list(ic
->journal
);
4556 dm_integrity_free_page_list(ic
->journal_io
);
4557 dm_integrity_free_page_list(ic
->journal_xor
);
4558 dm_integrity_free_page_list(ic
->recalc_bitmap
);
4559 dm_integrity_free_page_list(ic
->may_write_bitmap
);
4560 if (ic
->journal_scatterlist
)
4561 dm_integrity_free_journal_scatterlist(ic
, ic
->journal_scatterlist
);
4562 if (ic
->journal_io_scatterlist
)
4563 dm_integrity_free_journal_scatterlist(ic
, ic
->journal_io_scatterlist
);
4564 if (ic
->sk_requests
) {
4567 for (i
= 0; i
< ic
->journal_sections
; i
++) {
4568 struct skcipher_request
*req
= ic
->sk_requests
[i
];
4570 kfree_sensitive(req
->iv
);
4571 skcipher_request_free(req
);
4574 kvfree(ic
->sk_requests
);
4576 kvfree(ic
->journal_tree
);
4578 free_pages_exact(ic
->sb
, SB_SECTORS
<< SECTOR_SHIFT
);
4580 if (ic
->internal_hash
)
4581 crypto_free_shash(ic
->internal_hash
);
4582 free_alg(&ic
->internal_hash_alg
);
4584 if (ic
->journal_crypt
)
4585 crypto_free_skcipher(ic
->journal_crypt
);
4586 free_alg(&ic
->journal_crypt_alg
);
4588 if (ic
->journal_mac
)
4589 crypto_free_shash(ic
->journal_mac
);
4590 free_alg(&ic
->journal_mac_alg
);
4595 static struct target_type integrity_target
= {
4596 .name
= "integrity",
4597 .version
= {1, 10, 0},
4598 .module
= THIS_MODULE
,
4599 .features
= DM_TARGET_SINGLETON
| DM_TARGET_INTEGRITY
,
4600 .ctr
= dm_integrity_ctr
,
4601 .dtr
= dm_integrity_dtr
,
4602 .map
= dm_integrity_map
,
4603 .postsuspend
= dm_integrity_postsuspend
,
4604 .resume
= dm_integrity_resume
,
4605 .status
= dm_integrity_status
,
4606 .iterate_devices
= dm_integrity_iterate_devices
,
4607 .io_hints
= dm_integrity_io_hints
,
4610 static int __init
dm_integrity_init(void)
4614 journal_io_cache
= kmem_cache_create("integrity_journal_io",
4615 sizeof(struct journal_io
), 0, 0, NULL
);
4616 if (!journal_io_cache
) {
4617 DMERR("can't allocate journal io cache");
4621 r
= dm_register_target(&integrity_target
);
4624 DMERR("register failed %d", r
);
4629 static void __exit
dm_integrity_exit(void)
4631 dm_unregister_target(&integrity_target
);
4632 kmem_cache_destroy(journal_io_cache
);
4635 module_init(dm_integrity_init
);
4636 module_exit(dm_integrity_exit
);
4638 MODULE_AUTHOR("Milan Broz");
4639 MODULE_AUTHOR("Mikulas Patocka");
4640 MODULE_DESCRIPTION(DM_NAME
" target for integrity tags extension");
4641 MODULE_LICENSE("GPL");