2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include <linux/module.h>
10 #include <linux/device-mapper.h>
11 #include <linux/dm-io.h>
12 #include <linux/vmalloc.h>
13 #include <linux/sort.h>
14 #include <linux/rbtree.h>
15 #include <linux/delay.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
18 #include <crypto/skcipher.h>
19 #include <linux/async_tx.h>
22 #define DM_MSG_PREFIX "integrity"
24 #define DEFAULT_INTERLEAVE_SECTORS 32768
25 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
26 #define DEFAULT_BUFFER_SECTORS 128
27 #define DEFAULT_JOURNAL_WATERMARK 50
28 #define DEFAULT_SYNC_MSEC 10000
29 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
30 #define MIN_LOG2_INTERLEAVE_SECTORS 3
31 #define MAX_LOG2_INTERLEAVE_SECTORS 31
32 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
35 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
36 * so it should not be enabled in the official kernel
39 //#define INTERNAL_VERIFY
45 #define SB_MAGIC "integrt"
48 #define MAX_SECTORS_PER_BLOCK 8
53 __u8 log2_interleave_sectors
;
54 __u16 integrity_tag_size
;
55 __u32 journal_sections
;
56 __u64 provided_data_sectors
; /* userspace uses this value */
58 __u8 log2_sectors_per_block
;
61 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
63 #define JOURNAL_ENTRY_ROUNDUP 8
65 typedef __u64 commit_id_t
;
66 #define JOURNAL_MAC_PER_SECTOR 8
68 struct journal_entry
{
76 commit_id_t last_bytes
[0];
80 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
82 #if BITS_PER_LONG == 64
83 #define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
84 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
85 #elif defined(CONFIG_LBDAF)
86 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
87 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
89 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
90 #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
92 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
93 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
94 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
95 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
97 #define JOURNAL_BLOCK_SECTORS 8
98 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
99 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
101 struct journal_sector
{
102 __u8 entries
[JOURNAL_SECTOR_DATA
- JOURNAL_MAC_PER_SECTOR
];
103 __u8 mac
[JOURNAL_MAC_PER_SECTOR
];
104 commit_id_t commit_id
;
107 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
109 #define METADATA_PADDING_SECTORS 8
111 #define N_COMMIT_IDS 4
113 static unsigned char prev_commit_seq(unsigned char seq
)
115 return (seq
+ N_COMMIT_IDS
- 1) % N_COMMIT_IDS
;
118 static unsigned char next_commit_seq(unsigned char seq
)
120 return (seq
+ 1) % N_COMMIT_IDS
;
124 * In-memory structures
127 struct journal_node
{
139 struct dm_integrity_c
{
144 mempool_t
*journal_io_mempool
;
145 struct dm_io_client
*io
;
146 struct dm_bufio_client
*bufio
;
147 struct workqueue_struct
*metadata_wq
;
148 struct superblock
*sb
;
149 unsigned journal_pages
;
150 struct page_list
*journal
;
151 struct page_list
*journal_io
;
152 struct page_list
*journal_xor
;
154 struct crypto_skcipher
*journal_crypt
;
155 struct scatterlist
**journal_scatterlist
;
156 struct scatterlist
**journal_io_scatterlist
;
157 struct skcipher_request
**sk_requests
;
159 struct crypto_shash
*journal_mac
;
161 struct journal_node
*journal_tree
;
162 struct rb_root journal_tree_root
;
164 sector_t provided_data_sectors
;
166 unsigned short journal_entry_size
;
167 unsigned char journal_entries_per_sector
;
168 unsigned char journal_section_entries
;
169 unsigned short journal_section_sectors
;
170 unsigned journal_sections
;
171 unsigned journal_entries
;
172 sector_t device_sectors
;
173 unsigned initial_sectors
;
174 unsigned metadata_run
;
175 __s8 log2_metadata_run
;
176 __u8 log2_buffer_sectors
;
177 __u8 sectors_per_block
;
184 struct crypto_shash
*internal_hash
;
186 /* these variables are locked with endio_wait.lock */
187 struct rb_root in_progress
;
188 wait_queue_head_t endio_wait
;
189 struct workqueue_struct
*wait_wq
;
191 unsigned char commit_seq
;
192 commit_id_t commit_ids
[N_COMMIT_IDS
];
194 unsigned committed_section
;
195 unsigned n_committed_sections
;
197 unsigned uncommitted_section
;
198 unsigned n_uncommitted_sections
;
200 unsigned free_section
;
201 unsigned char free_section_entry
;
202 unsigned free_sectors
;
204 unsigned free_sectors_threshold
;
206 struct workqueue_struct
*commit_wq
;
207 struct work_struct commit_work
;
209 struct workqueue_struct
*writer_wq
;
210 struct work_struct writer_work
;
212 struct bio_list flush_bio_list
;
214 unsigned long autocommit_jiffies
;
215 struct timer_list autocommit_timer
;
216 unsigned autocommit_msec
;
218 wait_queue_head_t copy_to_journal_wait
;
220 struct completion crypto_backoff
;
222 bool journal_uptodate
;
225 struct alg_spec internal_hash_alg
;
226 struct alg_spec journal_crypt_alg
;
227 struct alg_spec journal_mac_alg
;
230 struct dm_integrity_range
{
231 sector_t logical_sector
;
236 struct dm_integrity_io
{
237 struct work_struct work
;
239 struct dm_integrity_c
*ic
;
243 struct dm_integrity_range range
;
245 sector_t metadata_block
;
246 unsigned metadata_offset
;
251 struct completion
*completion
;
253 struct block_device
*orig_bi_bdev
;
254 bio_end_io_t
*orig_bi_end_io
;
255 struct bio_integrity_payload
*orig_bi_integrity
;
256 struct bvec_iter orig_bi_iter
;
259 struct journal_completion
{
260 struct dm_integrity_c
*ic
;
262 struct completion comp
;
266 struct dm_integrity_range range
;
267 struct journal_completion
*comp
;
270 static struct kmem_cache
*journal_io_cache
;
272 #define JOURNAL_IO_MEMPOOL 32
275 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
276 static void __DEBUG_bytes(__u8
*bytes
, size_t len
, const char *msg
, ...)
285 pr_cont(" %02x", *bytes
);
291 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
293 #define DEBUG_print(x, ...) do { } while (0)
294 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
298 * DM Integrity profile, protection is performed layer above (dm-crypt)
300 static struct blk_integrity_profile dm_integrity_profile
= {
301 .name
= "DM-DIF-EXT-TAG",
306 static void dm_integrity_map_continue(struct dm_integrity_io
*dio
, bool from_map
);
307 static void integrity_bio_wait(struct work_struct
*w
);
308 static void dm_integrity_dtr(struct dm_target
*ti
);
310 static void dm_integrity_io_error(struct dm_integrity_c
*ic
, const char *msg
, int err
)
312 if (!cmpxchg(&ic
->failed
, 0, err
))
313 DMERR("Error on %s: %d", msg
, err
);
316 static int dm_integrity_failed(struct dm_integrity_c
*ic
)
318 return ACCESS_ONCE(ic
->failed
);
321 static commit_id_t
dm_integrity_commit_id(struct dm_integrity_c
*ic
, unsigned i
,
322 unsigned j
, unsigned char seq
)
325 * Xor the number with section and sector, so that if a piece of
326 * journal is written at wrong place, it is detected.
328 return ic
->commit_ids
[seq
] ^ cpu_to_le64(((__u64
)i
<< 32) ^ j
);
331 static void get_area_and_offset(struct dm_integrity_c
*ic
, sector_t data_sector
,
332 sector_t
*area
, sector_t
*offset
)
334 __u8 log2_interleave_sectors
= ic
->sb
->log2_interleave_sectors
;
336 *area
= data_sector
>> log2_interleave_sectors
;
337 *offset
= (unsigned)data_sector
& ((1U << log2_interleave_sectors
) - 1);
340 #define sector_to_block(ic, n) \
342 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
343 (n) >>= (ic)->sb->log2_sectors_per_block; \
346 static __u64
get_metadata_sector_and_offset(struct dm_integrity_c
*ic
, sector_t area
,
347 sector_t offset
, unsigned *metadata_offset
)
352 ms
= area
<< ic
->sb
->log2_interleave_sectors
;
353 if (likely(ic
->log2_metadata_run
>= 0))
354 ms
+= area
<< ic
->log2_metadata_run
;
356 ms
+= area
* ic
->metadata_run
;
357 ms
>>= ic
->log2_buffer_sectors
;
359 sector_to_block(ic
, offset
);
361 if (likely(ic
->log2_tag_size
>= 0)) {
362 ms
+= offset
>> (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
- ic
->log2_tag_size
);
363 mo
= (offset
<< ic
->log2_tag_size
) & ((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - 1);
365 ms
+= (__u64
)offset
* ic
->tag_size
>> (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
);
366 mo
= (offset
* ic
->tag_size
) & ((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - 1);
368 *metadata_offset
= mo
;
372 static sector_t
get_data_sector(struct dm_integrity_c
*ic
, sector_t area
, sector_t offset
)
376 result
= area
<< ic
->sb
->log2_interleave_sectors
;
377 if (likely(ic
->log2_metadata_run
>= 0))
378 result
+= (area
+ 1) << ic
->log2_metadata_run
;
380 result
+= (area
+ 1) * ic
->metadata_run
;
382 result
+= (sector_t
)ic
->initial_sectors
+ offset
;
386 static void wraparound_section(struct dm_integrity_c
*ic
, unsigned *sec_ptr
)
388 if (unlikely(*sec_ptr
>= ic
->journal_sections
))
389 *sec_ptr
-= ic
->journal_sections
;
392 static int sync_rw_sb(struct dm_integrity_c
*ic
, int op
, int op_flags
)
394 struct dm_io_request io_req
;
395 struct dm_io_region io_loc
;
398 io_req
.bi_op_flags
= op_flags
;
399 io_req
.mem
.type
= DM_IO_KMEM
;
400 io_req
.mem
.ptr
.addr
= ic
->sb
;
401 io_req
.notify
.fn
= NULL
;
402 io_req
.client
= ic
->io
;
403 io_loc
.bdev
= ic
->dev
->bdev
;
404 io_loc
.sector
= ic
->start
;
405 io_loc
.count
= SB_SECTORS
;
407 return dm_io(&io_req
, 1, &io_loc
, NULL
);
410 static void access_journal_check(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
411 bool e
, const char *function
)
413 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
414 unsigned limit
= e
? ic
->journal_section_entries
: ic
->journal_section_sectors
;
416 if (unlikely(section
>= ic
->journal_sections
) ||
417 unlikely(offset
>= limit
)) {
418 printk(KERN_CRIT
"%s: invalid access at (%u,%u), limit (%u,%u)\n",
419 function
, section
, offset
, ic
->journal_sections
, limit
);
425 static void page_list_location(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
426 unsigned *pl_index
, unsigned *pl_offset
)
430 access_journal_check(ic
, section
, offset
, false, "page_list_location");
432 sector
= section
* ic
->journal_section_sectors
+ offset
;
434 *pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
435 *pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
438 static struct journal_sector
*access_page_list(struct dm_integrity_c
*ic
, struct page_list
*pl
,
439 unsigned section
, unsigned offset
, unsigned *n_sectors
)
441 unsigned pl_index
, pl_offset
;
444 page_list_location(ic
, section
, offset
, &pl_index
, &pl_offset
);
447 *n_sectors
= (PAGE_SIZE
- pl_offset
) >> SECTOR_SHIFT
;
449 va
= lowmem_page_address(pl
[pl_index
].page
);
451 return (struct journal_sector
*)(va
+ pl_offset
);
454 static struct journal_sector
*access_journal(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
)
456 return access_page_list(ic
, ic
->journal
, section
, offset
, NULL
);
459 static struct journal_entry
*access_journal_entry(struct dm_integrity_c
*ic
, unsigned section
, unsigned n
)
461 unsigned rel_sector
, offset
;
462 struct journal_sector
*js
;
464 access_journal_check(ic
, section
, n
, true, "access_journal_entry");
466 rel_sector
= n
% JOURNAL_BLOCK_SECTORS
;
467 offset
= n
/ JOURNAL_BLOCK_SECTORS
;
469 js
= access_journal(ic
, section
, rel_sector
);
470 return (struct journal_entry
*)((char *)js
+ offset
* ic
->journal_entry_size
);
473 static struct journal_sector
*access_journal_data(struct dm_integrity_c
*ic
, unsigned section
, unsigned n
)
475 n
<<= ic
->sb
->log2_sectors_per_block
;
477 n
+= JOURNAL_BLOCK_SECTORS
;
479 access_journal_check(ic
, section
, n
, false, "access_journal_data");
481 return access_journal(ic
, section
, n
);
484 static void section_mac(struct dm_integrity_c
*ic
, unsigned section
, __u8 result
[JOURNAL_MAC_SIZE
])
486 SHASH_DESC_ON_STACK(desc
, ic
->journal_mac
);
490 desc
->tfm
= ic
->journal_mac
;
491 desc
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
493 r
= crypto_shash_init(desc
);
495 dm_integrity_io_error(ic
, "crypto_shash_init", r
);
499 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
500 struct journal_entry
*je
= access_journal_entry(ic
, section
, j
);
501 r
= crypto_shash_update(desc
, (__u8
*)&je
->u
.sector
, sizeof je
->u
.sector
);
503 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
508 size
= crypto_shash_digestsize(ic
->journal_mac
);
510 if (likely(size
<= JOURNAL_MAC_SIZE
)) {
511 r
= crypto_shash_final(desc
, result
);
513 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
516 memset(result
+ size
, 0, JOURNAL_MAC_SIZE
- size
);
519 r
= crypto_shash_final(desc
, digest
);
521 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
524 memcpy(result
, digest
, JOURNAL_MAC_SIZE
);
529 memset(result
, 0, JOURNAL_MAC_SIZE
);
532 static void rw_section_mac(struct dm_integrity_c
*ic
, unsigned section
, bool wr
)
534 __u8 result
[JOURNAL_MAC_SIZE
];
537 if (!ic
->journal_mac
)
540 section_mac(ic
, section
, result
);
542 for (j
= 0; j
< JOURNAL_BLOCK_SECTORS
; j
++) {
543 struct journal_sector
*js
= access_journal(ic
, section
, j
);
546 memcpy(&js
->mac
, result
+ (j
* JOURNAL_MAC_PER_SECTOR
), JOURNAL_MAC_PER_SECTOR
);
548 if (memcmp(&js
->mac
, result
+ (j
* JOURNAL_MAC_PER_SECTOR
), JOURNAL_MAC_PER_SECTOR
))
549 dm_integrity_io_error(ic
, "journal mac", -EILSEQ
);
554 static void complete_journal_op(void *context
)
556 struct journal_completion
*comp
= context
;
557 BUG_ON(!atomic_read(&comp
->in_flight
));
558 if (likely(atomic_dec_and_test(&comp
->in_flight
)))
559 complete(&comp
->comp
);
562 static void xor_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
563 unsigned n_sections
, struct journal_completion
*comp
)
565 struct async_submit_ctl submit
;
566 size_t n_bytes
= (size_t)(n_sections
* ic
->journal_section_sectors
) << SECTOR_SHIFT
;
567 unsigned pl_index
, pl_offset
, section_index
;
568 struct page_list
*source_pl
, *target_pl
;
570 if (likely(encrypt
)) {
571 source_pl
= ic
->journal
;
572 target_pl
= ic
->journal_io
;
574 source_pl
= ic
->journal_io
;
575 target_pl
= ic
->journal
;
578 page_list_location(ic
, section
, 0, &pl_index
, &pl_offset
);
580 atomic_add(roundup(pl_offset
+ n_bytes
, PAGE_SIZE
) >> PAGE_SHIFT
, &comp
->in_flight
);
582 init_async_submit(&submit
, ASYNC_TX_XOR_ZERO_DST
, NULL
, complete_journal_op
, comp
, NULL
);
584 section_index
= pl_index
;
588 struct page
*src_pages
[2];
589 struct page
*dst_page
;
591 while (unlikely(pl_index
== section_index
)) {
594 rw_section_mac(ic
, section
, true);
599 page_list_location(ic
, section
, 0, §ion_index
, &dummy
);
602 this_step
= min(n_bytes
, (size_t)PAGE_SIZE
- pl_offset
);
603 dst_page
= target_pl
[pl_index
].page
;
604 src_pages
[0] = source_pl
[pl_index
].page
;
605 src_pages
[1] = ic
->journal_xor
[pl_index
].page
;
607 async_xor(dst_page
, src_pages
, pl_offset
, 2, this_step
, &submit
);
611 n_bytes
-= this_step
;
616 async_tx_issue_pending_all();
619 static void complete_journal_encrypt(struct crypto_async_request
*req
, int err
)
621 struct journal_completion
*comp
= req
->data
;
623 if (likely(err
== -EINPROGRESS
)) {
624 complete(&comp
->ic
->crypto_backoff
);
627 dm_integrity_io_error(comp
->ic
, "asynchronous encrypt", err
);
629 complete_journal_op(comp
);
632 static bool do_crypt(bool encrypt
, struct skcipher_request
*req
, struct journal_completion
*comp
)
635 skcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
636 complete_journal_encrypt
, comp
);
638 r
= crypto_skcipher_encrypt(req
);
640 r
= crypto_skcipher_decrypt(req
);
643 if (likely(r
== -EINPROGRESS
))
645 if (likely(r
== -EBUSY
)) {
646 wait_for_completion(&comp
->ic
->crypto_backoff
);
647 reinit_completion(&comp
->ic
->crypto_backoff
);
650 dm_integrity_io_error(comp
->ic
, "encrypt", r
);
654 static void crypt_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
655 unsigned n_sections
, struct journal_completion
*comp
)
657 struct scatterlist
**source_sg
;
658 struct scatterlist
**target_sg
;
660 atomic_add(2, &comp
->in_flight
);
662 if (likely(encrypt
)) {
663 source_sg
= ic
->journal_scatterlist
;
664 target_sg
= ic
->journal_io_scatterlist
;
666 source_sg
= ic
->journal_io_scatterlist
;
667 target_sg
= ic
->journal_scatterlist
;
671 struct skcipher_request
*req
;
676 rw_section_mac(ic
, section
, true);
678 req
= ic
->sk_requests
[section
];
679 ivsize
= crypto_skcipher_ivsize(ic
->journal_crypt
);
682 memcpy(iv
, iv
+ ivsize
, ivsize
);
684 req
->src
= source_sg
[section
];
685 req
->dst
= target_sg
[section
];
687 if (unlikely(do_crypt(encrypt
, req
, comp
)))
688 atomic_inc(&comp
->in_flight
);
692 } while (n_sections
);
694 atomic_dec(&comp
->in_flight
);
695 complete_journal_op(comp
);
698 static void encrypt_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
699 unsigned n_sections
, struct journal_completion
*comp
)
702 return xor_journal(ic
, encrypt
, section
, n_sections
, comp
);
704 return crypt_journal(ic
, encrypt
, section
, n_sections
, comp
);
707 static void complete_journal_io(unsigned long error
, void *context
)
709 struct journal_completion
*comp
= context
;
710 if (unlikely(error
!= 0))
711 dm_integrity_io_error(comp
->ic
, "writing journal", -EIO
);
712 complete_journal_op(comp
);
715 static void rw_journal(struct dm_integrity_c
*ic
, int op
, int op_flags
, unsigned section
,
716 unsigned n_sections
, struct journal_completion
*comp
)
718 struct dm_io_request io_req
;
719 struct dm_io_region io_loc
;
720 unsigned sector
, n_sectors
, pl_index
, pl_offset
;
723 if (unlikely(dm_integrity_failed(ic
))) {
725 complete_journal_io(-1UL, comp
);
729 sector
= section
* ic
->journal_section_sectors
;
730 n_sectors
= n_sections
* ic
->journal_section_sectors
;
732 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
733 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
736 io_req
.bi_op_flags
= op_flags
;
737 io_req
.mem
.type
= DM_IO_PAGE_LIST
;
739 io_req
.mem
.ptr
.pl
= &ic
->journal_io
[pl_index
];
741 io_req
.mem
.ptr
.pl
= &ic
->journal
[pl_index
];
742 io_req
.mem
.offset
= pl_offset
;
743 if (likely(comp
!= NULL
)) {
744 io_req
.notify
.fn
= complete_journal_io
;
745 io_req
.notify
.context
= comp
;
747 io_req
.notify
.fn
= NULL
;
749 io_req
.client
= ic
->io
;
750 io_loc
.bdev
= ic
->dev
->bdev
;
751 io_loc
.sector
= ic
->start
+ SB_SECTORS
+ sector
;
752 io_loc
.count
= n_sectors
;
754 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
756 dm_integrity_io_error(ic
, op
== REQ_OP_READ
? "reading journal" : "writing journal", r
);
758 WARN_ONCE(1, "asynchronous dm_io failed: %d", r
);
759 complete_journal_io(-1UL, comp
);
764 static void write_journal(struct dm_integrity_c
*ic
, unsigned commit_start
, unsigned commit_sections
)
766 struct journal_completion io_comp
;
767 struct journal_completion crypt_comp_1
;
768 struct journal_completion crypt_comp_2
;
772 io_comp
.comp
= COMPLETION_INITIALIZER_ONSTACK(io_comp
.comp
);
774 if (commit_start
+ commit_sections
<= ic
->journal_sections
) {
775 io_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
776 if (ic
->journal_io
) {
777 crypt_comp_1
.ic
= ic
;
778 crypt_comp_1
.comp
= COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1
.comp
);
779 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
780 encrypt_journal(ic
, true, commit_start
, commit_sections
, &crypt_comp_1
);
781 wait_for_completion_io(&crypt_comp_1
.comp
);
783 for (i
= 0; i
< commit_sections
; i
++)
784 rw_section_mac(ic
, commit_start
+ i
, true);
786 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, commit_start
,
787 commit_sections
, &io_comp
);
790 io_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(2);
791 to_end
= ic
->journal_sections
- commit_start
;
792 if (ic
->journal_io
) {
793 crypt_comp_1
.ic
= ic
;
794 crypt_comp_1
.comp
= COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1
.comp
);
795 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
796 encrypt_journal(ic
, true, commit_start
, to_end
, &crypt_comp_1
);
797 if (try_wait_for_completion(&crypt_comp_1
.comp
)) {
798 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
799 crypt_comp_1
.comp
= COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1
.comp
);
800 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
801 encrypt_journal(ic
, true, 0, commit_sections
- to_end
, &crypt_comp_1
);
802 wait_for_completion_io(&crypt_comp_1
.comp
);
804 crypt_comp_2
.ic
= ic
;
805 crypt_comp_2
.comp
= COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2
.comp
);
806 crypt_comp_2
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
807 encrypt_journal(ic
, true, 0, commit_sections
- to_end
, &crypt_comp_2
);
808 wait_for_completion_io(&crypt_comp_1
.comp
);
809 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
810 wait_for_completion_io(&crypt_comp_2
.comp
);
813 for (i
= 0; i
< to_end
; i
++)
814 rw_section_mac(ic
, commit_start
+ i
, true);
815 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
816 for (i
= 0; i
< commit_sections
- to_end
; i
++)
817 rw_section_mac(ic
, i
, true);
819 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, 0, commit_sections
- to_end
, &io_comp
);
822 wait_for_completion_io(&io_comp
.comp
);
825 static void copy_from_journal(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
826 unsigned n_sectors
, sector_t target
, io_notify_fn fn
, void *data
)
828 struct dm_io_request io_req
;
829 struct dm_io_region io_loc
;
831 unsigned sector
, pl_index
, pl_offset
;
833 BUG_ON((target
| n_sectors
| offset
) & (unsigned)(ic
->sectors_per_block
- 1));
835 if (unlikely(dm_integrity_failed(ic
))) {
840 sector
= section
* ic
->journal_section_sectors
+ JOURNAL_BLOCK_SECTORS
+ offset
;
842 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
843 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
845 io_req
.bi_op
= REQ_OP_WRITE
;
846 io_req
.bi_op_flags
= 0;
847 io_req
.mem
.type
= DM_IO_PAGE_LIST
;
848 io_req
.mem
.ptr
.pl
= &ic
->journal
[pl_index
];
849 io_req
.mem
.offset
= pl_offset
;
850 io_req
.notify
.fn
= fn
;
851 io_req
.notify
.context
= data
;
852 io_req
.client
= ic
->io
;
853 io_loc
.bdev
= ic
->dev
->bdev
;
854 io_loc
.sector
= ic
->start
+ target
;
855 io_loc
.count
= n_sectors
;
857 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
859 WARN_ONCE(1, "asynchronous dm_io failed: %d", r
);
864 static bool add_new_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*new_range
)
866 struct rb_node
**n
= &ic
->in_progress
.rb_node
;
867 struct rb_node
*parent
;
869 BUG_ON((new_range
->logical_sector
| new_range
->n_sectors
) & (unsigned)(ic
->sectors_per_block
- 1));
874 struct dm_integrity_range
*range
= container_of(*n
, struct dm_integrity_range
, node
);
877 if (new_range
->logical_sector
+ new_range
->n_sectors
<= range
->logical_sector
) {
878 n
= &range
->node
.rb_left
;
879 } else if (new_range
->logical_sector
>= range
->logical_sector
+ range
->n_sectors
) {
880 n
= &range
->node
.rb_right
;
886 rb_link_node(&new_range
->node
, parent
, n
);
887 rb_insert_color(&new_range
->node
, &ic
->in_progress
);
892 static void remove_range_unlocked(struct dm_integrity_c
*ic
, struct dm_integrity_range
*range
)
894 rb_erase(&range
->node
, &ic
->in_progress
);
895 wake_up_locked(&ic
->endio_wait
);
898 static void remove_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*range
)
902 spin_lock_irqsave(&ic
->endio_wait
.lock
, flags
);
903 remove_range_unlocked(ic
, range
);
904 spin_unlock_irqrestore(&ic
->endio_wait
.lock
, flags
);
907 static void init_journal_node(struct journal_node
*node
)
909 RB_CLEAR_NODE(&node
->node
);
910 node
->sector
= (sector_t
)-1;
913 static void add_journal_node(struct dm_integrity_c
*ic
, struct journal_node
*node
, sector_t sector
)
915 struct rb_node
**link
;
916 struct rb_node
*parent
;
918 node
->sector
= sector
;
919 BUG_ON(!RB_EMPTY_NODE(&node
->node
));
921 link
= &ic
->journal_tree_root
.rb_node
;
925 struct journal_node
*j
;
927 j
= container_of(parent
, struct journal_node
, node
);
928 if (sector
< j
->sector
)
929 link
= &j
->node
.rb_left
;
931 link
= &j
->node
.rb_right
;
934 rb_link_node(&node
->node
, parent
, link
);
935 rb_insert_color(&node
->node
, &ic
->journal_tree_root
);
938 static void remove_journal_node(struct dm_integrity_c
*ic
, struct journal_node
*node
)
940 BUG_ON(RB_EMPTY_NODE(&node
->node
));
941 rb_erase(&node
->node
, &ic
->journal_tree_root
);
942 init_journal_node(node
);
945 #define NOT_FOUND (-1U)
947 static unsigned find_journal_node(struct dm_integrity_c
*ic
, sector_t sector
, sector_t
*next_sector
)
949 struct rb_node
*n
= ic
->journal_tree_root
.rb_node
;
950 unsigned found
= NOT_FOUND
;
951 *next_sector
= (sector_t
)-1;
953 struct journal_node
*j
= container_of(n
, struct journal_node
, node
);
954 if (sector
== j
->sector
) {
955 found
= j
- ic
->journal_tree
;
957 if (sector
< j
->sector
) {
958 *next_sector
= j
->sector
;
961 n
= j
->node
.rb_right
;
968 static bool test_journal_node(struct dm_integrity_c
*ic
, unsigned pos
, sector_t sector
)
970 struct journal_node
*node
, *next_node
;
971 struct rb_node
*next
;
973 if (unlikely(pos
>= ic
->journal_entries
))
975 node
= &ic
->journal_tree
[pos
];
976 if (unlikely(RB_EMPTY_NODE(&node
->node
)))
978 if (unlikely(node
->sector
!= sector
))
981 next
= rb_next(&node
->node
);
985 next_node
= container_of(next
, struct journal_node
, node
);
986 return next_node
->sector
!= sector
;
989 static bool find_newer_committed_node(struct dm_integrity_c
*ic
, struct journal_node
*node
)
991 struct rb_node
*next
;
992 struct journal_node
*next_node
;
993 unsigned next_section
;
995 BUG_ON(RB_EMPTY_NODE(&node
->node
));
997 next
= rb_next(&node
->node
);
1001 next_node
= container_of(next
, struct journal_node
, node
);
1003 if (next_node
->sector
!= node
->sector
)
1006 next_section
= (unsigned)(next_node
- ic
->journal_tree
) / ic
->journal_section_entries
;
1007 if (next_section
>= ic
->committed_section
&&
1008 next_section
< ic
->committed_section
+ ic
->n_committed_sections
)
1010 if (next_section
+ ic
->journal_sections
< ic
->committed_section
+ ic
->n_committed_sections
)
1020 static int dm_integrity_rw_tag(struct dm_integrity_c
*ic
, unsigned char *tag
, sector_t
*metadata_block
,
1021 unsigned *metadata_offset
, unsigned total_size
, int op
)
1024 unsigned char *data
, *dp
;
1025 struct dm_buffer
*b
;
1029 r
= dm_integrity_failed(ic
);
1033 data
= dm_bufio_read(ic
->bufio
, *metadata_block
, &b
);
1034 if (unlikely(IS_ERR(data
)))
1035 return PTR_ERR(data
);
1037 to_copy
= min((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - *metadata_offset
, total_size
);
1038 dp
= data
+ *metadata_offset
;
1039 if (op
== TAG_READ
) {
1040 memcpy(tag
, dp
, to_copy
);
1041 } else if (op
== TAG_WRITE
) {
1042 memcpy(dp
, tag
, to_copy
);
1043 dm_bufio_mark_buffer_dirty(b
);
1045 /* e.g.: op == TAG_CMP */
1046 if (unlikely(memcmp(dp
, tag
, to_copy
))) {
1049 for (i
= 0; i
< to_copy
; i
++) {
1050 if (dp
[i
] != tag
[i
])
1054 dm_bufio_release(b
);
1058 dm_bufio_release(b
);
1061 *metadata_offset
+= to_copy
;
1062 if (unlikely(*metadata_offset
== 1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
)) {
1063 (*metadata_block
)++;
1064 *metadata_offset
= 0;
1066 total_size
-= to_copy
;
1067 } while (unlikely(total_size
));
1072 static void dm_integrity_flush_buffers(struct dm_integrity_c
*ic
)
1075 r
= dm_bufio_write_dirty_buffers(ic
->bufio
);
1077 dm_integrity_io_error(ic
, "writing tags", r
);
1080 static void sleep_on_endio_wait(struct dm_integrity_c
*ic
)
1082 DECLARE_WAITQUEUE(wait
, current
);
1083 __add_wait_queue(&ic
->endio_wait
, &wait
);
1084 __set_current_state(TASK_UNINTERRUPTIBLE
);
1085 spin_unlock_irq(&ic
->endio_wait
.lock
);
1087 spin_lock_irq(&ic
->endio_wait
.lock
);
1088 __remove_wait_queue(&ic
->endio_wait
, &wait
);
1091 static void autocommit_fn(unsigned long data
)
1093 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)data
;
1095 if (likely(!dm_integrity_failed(ic
)))
1096 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1099 static void schedule_autocommit(struct dm_integrity_c
*ic
)
1101 if (!timer_pending(&ic
->autocommit_timer
))
1102 mod_timer(&ic
->autocommit_timer
, jiffies
+ ic
->autocommit_jiffies
);
1105 static void submit_flush_bio(struct dm_integrity_c
*ic
, struct dm_integrity_io
*dio
)
1108 spin_lock_irq(&ic
->endio_wait
.lock
);
1109 bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1110 bio_list_add(&ic
->flush_bio_list
, bio
);
1111 spin_unlock_irq(&ic
->endio_wait
.lock
);
1112 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1115 static void do_endio(struct dm_integrity_c
*ic
, struct bio
*bio
)
1117 int r
= dm_integrity_failed(ic
);
1118 if (unlikely(r
) && !bio
->bi_error
)
1123 static void do_endio_flush(struct dm_integrity_c
*ic
, struct dm_integrity_io
*dio
)
1125 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1127 if (unlikely(dio
->fua
) && likely(!bio
->bi_error
) && likely(!dm_integrity_failed(ic
)))
1128 submit_flush_bio(ic
, dio
);
1133 static void dec_in_flight(struct dm_integrity_io
*dio
)
1135 if (atomic_dec_and_test(&dio
->in_flight
)) {
1136 struct dm_integrity_c
*ic
= dio
->ic
;
1139 remove_range(ic
, &dio
->range
);
1141 if (unlikely(dio
->write
))
1142 schedule_autocommit(ic
);
1144 bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1146 if (unlikely(dio
->bi_error
) && !bio
->bi_error
)
1147 bio
->bi_error
= dio
->bi_error
;
1148 if (likely(!bio
->bi_error
) && unlikely(bio_sectors(bio
) != dio
->range
.n_sectors
)) {
1149 dio
->range
.logical_sector
+= dio
->range
.n_sectors
;
1150 bio_advance(bio
, dio
->range
.n_sectors
<< SECTOR_SHIFT
);
1151 INIT_WORK(&dio
->work
, integrity_bio_wait
);
1152 queue_work(ic
->wait_wq
, &dio
->work
);
1155 do_endio_flush(ic
, dio
);
1159 static void integrity_end_io(struct bio
*bio
)
1161 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
1163 bio
->bi_iter
= dio
->orig_bi_iter
;
1164 bio
->bi_bdev
= dio
->orig_bi_bdev
;
1165 if (dio
->orig_bi_integrity
) {
1166 bio
->bi_integrity
= dio
->orig_bi_integrity
;
1167 bio
->bi_opf
|= REQ_INTEGRITY
;
1169 bio
->bi_end_io
= dio
->orig_bi_end_io
;
1171 if (dio
->completion
)
1172 complete(dio
->completion
);
1177 static void integrity_sector_checksum(struct dm_integrity_c
*ic
, sector_t sector
,
1178 const char *data
, char *result
)
1180 __u64 sector_le
= cpu_to_le64(sector
);
1181 SHASH_DESC_ON_STACK(req
, ic
->internal_hash
);
1183 unsigned digest_size
;
1185 req
->tfm
= ic
->internal_hash
;
1188 r
= crypto_shash_init(req
);
1189 if (unlikely(r
< 0)) {
1190 dm_integrity_io_error(ic
, "crypto_shash_init", r
);
1194 r
= crypto_shash_update(req
, (const __u8
*)§or_le
, sizeof sector_le
);
1195 if (unlikely(r
< 0)) {
1196 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
1200 r
= crypto_shash_update(req
, data
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
1201 if (unlikely(r
< 0)) {
1202 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
1206 r
= crypto_shash_final(req
, result
);
1207 if (unlikely(r
< 0)) {
1208 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
1212 digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1213 if (unlikely(digest_size
< ic
->tag_size
))
1214 memset(result
+ digest_size
, 0, ic
->tag_size
- digest_size
);
1219 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1220 get_random_bytes(result
, ic
->tag_size
);
1223 static void integrity_metadata(struct work_struct
*w
)
1225 struct dm_integrity_io
*dio
= container_of(w
, struct dm_integrity_io
, work
);
1226 struct dm_integrity_c
*ic
= dio
->ic
;
1230 if (ic
->internal_hash
) {
1231 struct bvec_iter iter
;
1233 unsigned digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1234 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1236 unsigned extra_space
= unlikely(digest_size
> ic
->tag_size
) ? digest_size
- ic
->tag_size
: 0;
1237 char checksums_onstack
[ic
->tag_size
+ extra_space
];
1238 unsigned sectors_to_process
= dio
->range
.n_sectors
;
1239 sector_t sector
= dio
->range
.logical_sector
;
1241 if (unlikely(ic
->mode
== 'R'))
1244 checksums
= kmalloc((PAGE_SIZE
>> SECTOR_SHIFT
>> ic
->sb
->log2_sectors_per_block
) * ic
->tag_size
+ extra_space
,
1245 GFP_NOIO
| __GFP_NORETRY
| __GFP_NOWARN
);
1247 checksums
= checksums_onstack
;
1249 __bio_for_each_segment(bv
, bio
, iter
, dio
->orig_bi_iter
) {
1251 char *mem
, *checksums_ptr
;
1254 mem
= (char *)kmap_atomic(bv
.bv_page
) + bv
.bv_offset
;
1256 checksums_ptr
= checksums
;
1258 integrity_sector_checksum(ic
, sector
, mem
+ pos
, checksums_ptr
);
1259 checksums_ptr
+= ic
->tag_size
;
1260 sectors_to_process
-= ic
->sectors_per_block
;
1261 pos
+= ic
->sectors_per_block
<< SECTOR_SHIFT
;
1262 sector
+= ic
->sectors_per_block
;
1263 } while (pos
< bv
.bv_len
&& sectors_to_process
&& checksums
!= checksums_onstack
);
1266 r
= dm_integrity_rw_tag(ic
, checksums
, &dio
->metadata_block
, &dio
->metadata_offset
,
1267 checksums_ptr
- checksums
, !dio
->write
? TAG_CMP
: TAG_WRITE
);
1270 DMERR("Checksum failed at sector 0x%llx",
1271 (unsigned long long)(sector
- ((r
+ ic
->tag_size
- 1) / ic
->tag_size
)));
1274 if (likely(checksums
!= checksums_onstack
))
1279 if (!sectors_to_process
)
1282 if (unlikely(pos
< bv
.bv_len
)) {
1283 bv
.bv_offset
+= pos
;
1289 if (likely(checksums
!= checksums_onstack
))
1292 struct bio_integrity_payload
*bip
= dio
->orig_bi_integrity
;
1296 struct bvec_iter iter
;
1297 unsigned data_to_process
= dio
->range
.n_sectors
;
1298 sector_to_block(ic
, data_to_process
);
1299 data_to_process
*= ic
->tag_size
;
1301 bip_for_each_vec(biv
, bip
, iter
) {
1305 BUG_ON(PageHighMem(biv
.bv_page
));
1306 tag
= lowmem_page_address(biv
.bv_page
) + biv
.bv_offset
;
1307 this_len
= min(biv
.bv_len
, data_to_process
);
1308 r
= dm_integrity_rw_tag(ic
, tag
, &dio
->metadata_block
, &dio
->metadata_offset
,
1309 this_len
, !dio
->write
? TAG_READ
: TAG_WRITE
);
1312 data_to_process
-= this_len
;
1313 if (!data_to_process
)
1326 static int dm_integrity_map(struct dm_target
*ti
, struct bio
*bio
)
1328 struct dm_integrity_c
*ic
= ti
->private;
1329 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
1330 struct bio_integrity_payload
*bip
;
1332 sector_t area
, offset
;
1337 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
1338 submit_flush_bio(ic
, dio
);
1339 return DM_MAPIO_SUBMITTED
;
1342 dio
->range
.logical_sector
= dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1343 dio
->write
= bio_op(bio
) == REQ_OP_WRITE
;
1344 dio
->fua
= dio
->write
&& bio
->bi_opf
& REQ_FUA
;
1345 if (unlikely(dio
->fua
)) {
1347 * Don't pass down the FUA flag because we have to flush
1348 * disk cache anyway.
1350 bio
->bi_opf
&= ~REQ_FUA
;
1352 if (unlikely(dio
->range
.logical_sector
+ bio_sectors(bio
) > ic
->provided_data_sectors
)) {
1353 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1354 (unsigned long long)dio
->range
.logical_sector
, bio_sectors(bio
),
1355 (unsigned long long)ic
->provided_data_sectors
);
1358 if (unlikely((dio
->range
.logical_sector
| bio_sectors(bio
)) & (unsigned)(ic
->sectors_per_block
- 1))) {
1359 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1360 ic
->sectors_per_block
,
1361 (unsigned long long)dio
->range
.logical_sector
, bio_sectors(bio
));
1365 if (ic
->sectors_per_block
> 1) {
1366 struct bvec_iter iter
;
1368 bio_for_each_segment(bv
, bio
, iter
) {
1369 if (unlikely((bv
.bv_offset
| bv
.bv_len
) & ((ic
->sectors_per_block
<< SECTOR_SHIFT
) - 1))) {
1370 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1371 bv
.bv_offset
, bv
.bv_len
, ic
->sectors_per_block
);
1377 bip
= bio_integrity(bio
);
1378 if (!ic
->internal_hash
) {
1380 unsigned wanted_tag_size
= bio_sectors(bio
) >> ic
->sb
->log2_sectors_per_block
;
1381 if (ic
->log2_tag_size
>= 0)
1382 wanted_tag_size
<<= ic
->log2_tag_size
;
1384 wanted_tag_size
*= ic
->tag_size
;
1385 if (unlikely(wanted_tag_size
!= bip
->bip_iter
.bi_size
)) {
1386 DMERR("Invalid integrity data size %u, expected %u", bip
->bip_iter
.bi_size
, wanted_tag_size
);
1391 if (unlikely(bip
!= NULL
)) {
1392 DMERR("Unexpected integrity data when using internal hash");
1397 if (unlikely(ic
->mode
== 'R') && unlikely(dio
->write
))
1400 get_area_and_offset(ic
, dio
->range
.logical_sector
, &area
, &offset
);
1401 dio
->metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &dio
->metadata_offset
);
1402 bio
->bi_iter
.bi_sector
= get_data_sector(ic
, area
, offset
);
1404 dm_integrity_map_continue(dio
, true);
1405 return DM_MAPIO_SUBMITTED
;
1408 static bool __journal_read_write(struct dm_integrity_io
*dio
, struct bio
*bio
,
1409 unsigned journal_section
, unsigned journal_entry
)
1411 struct dm_integrity_c
*ic
= dio
->ic
;
1412 sector_t logical_sector
;
1415 logical_sector
= dio
->range
.logical_sector
;
1416 n_sectors
= dio
->range
.n_sectors
;
1418 struct bio_vec bv
= bio_iovec(bio
);
1421 if (unlikely(bv
.bv_len
>> SECTOR_SHIFT
> n_sectors
))
1422 bv
.bv_len
= n_sectors
<< SECTOR_SHIFT
;
1423 n_sectors
-= bv
.bv_len
>> SECTOR_SHIFT
;
1424 bio_advance_iter(bio
, &bio
->bi_iter
, bv
.bv_len
);
1426 mem
= kmap_atomic(bv
.bv_page
);
1427 if (likely(dio
->write
))
1428 flush_dcache_page(bv
.bv_page
);
1431 struct journal_entry
*je
= access_journal_entry(ic
, journal_section
, journal_entry
);
1433 if (unlikely(!dio
->write
)) {
1434 struct journal_sector
*js
;
1438 if (unlikely(journal_entry_is_inprogress(je
))) {
1439 flush_dcache_page(bv
.bv_page
);
1442 __io_wait_event(ic
->copy_to_journal_wait
, !journal_entry_is_inprogress(je
));
1446 BUG_ON(journal_entry_get_sector(je
) != logical_sector
);
1447 js
= access_journal_data(ic
, journal_section
, journal_entry
);
1448 mem_ptr
= mem
+ bv
.bv_offset
;
1451 memcpy(mem_ptr
, js
, JOURNAL_SECTOR_DATA
);
1452 *(commit_id_t
*)(mem_ptr
+ JOURNAL_SECTOR_DATA
) = je
->last_bytes
[s
];
1454 mem_ptr
+= 1 << SECTOR_SHIFT
;
1455 } while (++s
< ic
->sectors_per_block
);
1456 #ifdef INTERNAL_VERIFY
1457 if (ic
->internal_hash
) {
1458 char checksums_onstack
[max(crypto_shash_digestsize(ic
->internal_hash
), ic
->tag_size
)];
1460 integrity_sector_checksum(ic
, logical_sector
, mem
+ bv
.bv_offset
, checksums_onstack
);
1461 if (unlikely(memcmp(checksums_onstack
, journal_entry_tag(ic
, je
), ic
->tag_size
))) {
1462 DMERR("Checksum failed when reading from journal, at sector 0x%llx",
1463 (unsigned long long)logical_sector
);
1469 if (!ic
->internal_hash
) {
1470 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
1471 unsigned tag_todo
= ic
->tag_size
;
1472 char *tag_ptr
= journal_entry_tag(ic
, je
);
1475 struct bio_vec biv
= bvec_iter_bvec(bip
->bip_vec
, bip
->bip_iter
);
1476 unsigned tag_now
= min(biv
.bv_len
, tag_todo
);
1478 BUG_ON(PageHighMem(biv
.bv_page
));
1479 tag_addr
= lowmem_page_address(biv
.bv_page
) + biv
.bv_offset
;
1480 if (likely(dio
->write
))
1481 memcpy(tag_ptr
, tag_addr
, tag_now
);
1483 memcpy(tag_addr
, tag_ptr
, tag_now
);
1484 bvec_iter_advance(bip
->bip_vec
, &bip
->bip_iter
, tag_now
);
1486 tag_todo
-= tag_now
;
1487 } while (unlikely(tag_todo
)); else {
1488 if (likely(dio
->write
))
1489 memset(tag_ptr
, 0, tag_todo
);
1493 if (likely(dio
->write
)) {
1494 struct journal_sector
*js
;
1497 js
= access_journal_data(ic
, journal_section
, journal_entry
);
1498 memcpy(js
, mem
+ bv
.bv_offset
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
1502 je
->last_bytes
[s
] = js
[s
].commit_id
;
1503 } while (++s
< ic
->sectors_per_block
);
1505 if (ic
->internal_hash
) {
1506 unsigned digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1507 if (unlikely(digest_size
> ic
->tag_size
)) {
1508 char checksums_onstack
[digest_size
];
1509 integrity_sector_checksum(ic
, logical_sector
, (char *)js
, checksums_onstack
);
1510 memcpy(journal_entry_tag(ic
, je
), checksums_onstack
, ic
->tag_size
);
1512 integrity_sector_checksum(ic
, logical_sector
, (char *)js
, journal_entry_tag(ic
, je
));
1515 journal_entry_set_sector(je
, logical_sector
);
1517 logical_sector
+= ic
->sectors_per_block
;
1520 if (unlikely(journal_entry
== ic
->journal_section_entries
)) {
1523 wraparound_section(ic
, &journal_section
);
1526 bv
.bv_offset
+= ic
->sectors_per_block
<< SECTOR_SHIFT
;
1527 } while (bv
.bv_len
-= ic
->sectors_per_block
<< SECTOR_SHIFT
);
1529 if (unlikely(!dio
->write
))
1530 flush_dcache_page(bv
.bv_page
);
1532 } while (n_sectors
);
1534 if (likely(dio
->write
)) {
1536 if (unlikely(waitqueue_active(&ic
->copy_to_journal_wait
)))
1537 wake_up(&ic
->copy_to_journal_wait
);
1538 if (ACCESS_ONCE(ic
->free_sectors
) <= ic
->free_sectors_threshold
) {
1539 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1541 schedule_autocommit(ic
);
1544 remove_range(ic
, &dio
->range
);
1547 if (unlikely(bio
->bi_iter
.bi_size
)) {
1548 sector_t area
, offset
;
1550 dio
->range
.logical_sector
= logical_sector
;
1551 get_area_and_offset(ic
, dio
->range
.logical_sector
, &area
, &offset
);
1552 dio
->metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &dio
->metadata_offset
);
1559 static void dm_integrity_map_continue(struct dm_integrity_io
*dio
, bool from_map
)
1561 struct dm_integrity_c
*ic
= dio
->ic
;
1562 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1563 unsigned journal_section
, journal_entry
;
1564 unsigned journal_read_pos
;
1565 struct completion read_comp
;
1566 bool need_sync_io
= ic
->internal_hash
&& !dio
->write
;
1568 if (need_sync_io
&& from_map
) {
1569 INIT_WORK(&dio
->work
, integrity_bio_wait
);
1570 queue_work(ic
->metadata_wq
, &dio
->work
);
1575 spin_lock_irq(&ic
->endio_wait
.lock
);
1577 if (unlikely(dm_integrity_failed(ic
))) {
1578 spin_unlock_irq(&ic
->endio_wait
.lock
);
1582 dio
->range
.n_sectors
= bio_sectors(bio
);
1583 journal_read_pos
= NOT_FOUND
;
1584 if (likely(ic
->mode
== 'J')) {
1586 unsigned next_entry
, i
, pos
;
1589 dio
->range
.n_sectors
= min(dio
->range
.n_sectors
, ic
->free_sectors
);
1590 if (unlikely(!dio
->range
.n_sectors
))
1592 ic
->free_sectors
-= dio
->range
.n_sectors
;
1593 journal_section
= ic
->free_section
;
1594 journal_entry
= ic
->free_section_entry
;
1596 next_entry
= ic
->free_section_entry
+ dio
->range
.n_sectors
;
1597 ic
->free_section_entry
= next_entry
% ic
->journal_section_entries
;
1598 ic
->free_section
+= next_entry
/ ic
->journal_section_entries
;
1599 ic
->n_uncommitted_sections
+= next_entry
/ ic
->journal_section_entries
;
1600 wraparound_section(ic
, &ic
->free_section
);
1602 pos
= journal_section
* ic
->journal_section_entries
+ journal_entry
;
1603 ws
= journal_section
;
1607 struct journal_entry
*je
;
1609 add_journal_node(ic
, &ic
->journal_tree
[pos
], dio
->range
.logical_sector
+ i
);
1611 if (unlikely(pos
>= ic
->journal_entries
))
1614 je
= access_journal_entry(ic
, ws
, we
);
1615 BUG_ON(!journal_entry_is_unused(je
));
1616 journal_entry_set_inprogress(je
);
1618 if (unlikely(we
== ic
->journal_section_entries
)) {
1621 wraparound_section(ic
, &ws
);
1623 } while ((i
+= ic
->sectors_per_block
) < dio
->range
.n_sectors
);
1625 spin_unlock_irq(&ic
->endio_wait
.lock
);
1626 goto journal_read_write
;
1628 sector_t next_sector
;
1629 journal_read_pos
= find_journal_node(ic
, dio
->range
.logical_sector
, &next_sector
);
1630 if (likely(journal_read_pos
== NOT_FOUND
)) {
1631 if (unlikely(dio
->range
.n_sectors
> next_sector
- dio
->range
.logical_sector
))
1632 dio
->range
.n_sectors
= next_sector
- dio
->range
.logical_sector
;
1635 unsigned jp
= journal_read_pos
+ 1;
1636 for (i
= ic
->sectors_per_block
; i
< dio
->range
.n_sectors
; i
+= ic
->sectors_per_block
, jp
++) {
1637 if (!test_journal_node(ic
, jp
, dio
->range
.logical_sector
+ i
))
1640 dio
->range
.n_sectors
= i
;
1644 if (unlikely(!add_new_range(ic
, &dio
->range
))) {
1646 * We must not sleep in the request routine because it could
1647 * stall bios on current->bio_list.
1648 * So, we offload the bio to a workqueue if we have to sleep.
1652 spin_unlock_irq(&ic
->endio_wait
.lock
);
1653 INIT_WORK(&dio
->work
, integrity_bio_wait
);
1654 queue_work(ic
->wait_wq
, &dio
->work
);
1657 sleep_on_endio_wait(ic
);
1661 spin_unlock_irq(&ic
->endio_wait
.lock
);
1663 if (unlikely(journal_read_pos
!= NOT_FOUND
)) {
1664 journal_section
= journal_read_pos
/ ic
->journal_section_entries
;
1665 journal_entry
= journal_read_pos
% ic
->journal_section_entries
;
1666 goto journal_read_write
;
1669 dio
->in_flight
= (atomic_t
)ATOMIC_INIT(2);
1672 read_comp
= COMPLETION_INITIALIZER_ONSTACK(read_comp
);
1673 dio
->completion
= &read_comp
;
1675 dio
->completion
= NULL
;
1677 dio
->orig_bi_iter
= bio
->bi_iter
;
1679 dio
->orig_bi_bdev
= bio
->bi_bdev
;
1680 bio
->bi_bdev
= ic
->dev
->bdev
;
1682 dio
->orig_bi_integrity
= bio_integrity(bio
);
1683 bio
->bi_integrity
= NULL
;
1684 bio
->bi_opf
&= ~REQ_INTEGRITY
;
1686 dio
->orig_bi_end_io
= bio
->bi_end_io
;
1687 bio
->bi_end_io
= integrity_end_io
;
1689 bio
->bi_iter
.bi_size
= dio
->range
.n_sectors
<< SECTOR_SHIFT
;
1690 bio
->bi_iter
.bi_sector
+= ic
->start
;
1691 generic_make_request(bio
);
1694 wait_for_completion_io(&read_comp
);
1695 integrity_metadata(&dio
->work
);
1697 INIT_WORK(&dio
->work
, integrity_metadata
);
1698 queue_work(ic
->metadata_wq
, &dio
->work
);
1704 if (unlikely(__journal_read_write(dio
, bio
, journal_section
, journal_entry
)))
1707 do_endio_flush(ic
, dio
);
1711 static void integrity_bio_wait(struct work_struct
*w
)
1713 struct dm_integrity_io
*dio
= container_of(w
, struct dm_integrity_io
, work
);
1715 dm_integrity_map_continue(dio
, false);
1718 static void pad_uncommitted(struct dm_integrity_c
*ic
)
1720 if (ic
->free_section_entry
) {
1721 ic
->free_sectors
-= ic
->journal_section_entries
- ic
->free_section_entry
;
1722 ic
->free_section_entry
= 0;
1724 wraparound_section(ic
, &ic
->free_section
);
1725 ic
->n_uncommitted_sections
++;
1729 static void integrity_commit(struct work_struct
*w
)
1731 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, commit_work
);
1732 unsigned commit_start
, commit_sections
;
1734 struct bio
*flushes
;
1736 del_timer(&ic
->autocommit_timer
);
1738 spin_lock_irq(&ic
->endio_wait
.lock
);
1739 flushes
= bio_list_get(&ic
->flush_bio_list
);
1740 if (unlikely(ic
->mode
!= 'J')) {
1741 spin_unlock_irq(&ic
->endio_wait
.lock
);
1742 dm_integrity_flush_buffers(ic
);
1743 goto release_flush_bios
;
1746 pad_uncommitted(ic
);
1747 commit_start
= ic
->uncommitted_section
;
1748 commit_sections
= ic
->n_uncommitted_sections
;
1749 spin_unlock_irq(&ic
->endio_wait
.lock
);
1751 if (!commit_sections
)
1752 goto release_flush_bios
;
1755 for (n
= 0; n
< commit_sections
; n
++) {
1756 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
1757 struct journal_entry
*je
;
1758 je
= access_journal_entry(ic
, i
, j
);
1759 io_wait_event(ic
->copy_to_journal_wait
, !journal_entry_is_inprogress(je
));
1761 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
1762 struct journal_sector
*js
;
1763 js
= access_journal(ic
, i
, j
);
1764 js
->commit_id
= dm_integrity_commit_id(ic
, i
, j
, ic
->commit_seq
);
1767 if (unlikely(i
>= ic
->journal_sections
))
1768 ic
->commit_seq
= next_commit_seq(ic
->commit_seq
);
1769 wraparound_section(ic
, &i
);
1773 write_journal(ic
, commit_start
, commit_sections
);
1775 spin_lock_irq(&ic
->endio_wait
.lock
);
1776 ic
->uncommitted_section
+= commit_sections
;
1777 wraparound_section(ic
, &ic
->uncommitted_section
);
1778 ic
->n_uncommitted_sections
-= commit_sections
;
1779 ic
->n_committed_sections
+= commit_sections
;
1780 spin_unlock_irq(&ic
->endio_wait
.lock
);
1782 if (ACCESS_ONCE(ic
->free_sectors
) <= ic
->free_sectors_threshold
)
1783 queue_work(ic
->writer_wq
, &ic
->writer_work
);
1787 struct bio
*next
= flushes
->bi_next
;
1788 flushes
->bi_next
= NULL
;
1789 do_endio(ic
, flushes
);
1794 static void complete_copy_from_journal(unsigned long error
, void *context
)
1796 struct journal_io
*io
= context
;
1797 struct journal_completion
*comp
= io
->comp
;
1798 struct dm_integrity_c
*ic
= comp
->ic
;
1799 remove_range(ic
, &io
->range
);
1800 mempool_free(io
, ic
->journal_io_mempool
);
1801 if (unlikely(error
!= 0))
1802 dm_integrity_io_error(ic
, "copying from journal", -EIO
);
1803 complete_journal_op(comp
);
1806 static void restore_last_bytes(struct dm_integrity_c
*ic
, struct journal_sector
*js
,
1807 struct journal_entry
*je
)
1811 js
->commit_id
= je
->last_bytes
[s
];
1813 } while (++s
< ic
->sectors_per_block
);
1816 static void do_journal_write(struct dm_integrity_c
*ic
, unsigned write_start
,
1817 unsigned write_sections
, bool from_replay
)
1820 struct journal_completion comp
;
1823 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
1824 comp
.comp
= COMPLETION_INITIALIZER_ONSTACK(comp
.comp
);
1827 for (n
= 0; n
< write_sections
; n
++, i
++, wraparound_section(ic
, &i
)) {
1828 #ifndef INTERNAL_VERIFY
1829 if (unlikely(from_replay
))
1831 rw_section_mac(ic
, i
, false);
1832 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
1833 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
1834 sector_t sec
, area
, offset
;
1835 unsigned k
, l
, next_loop
;
1836 sector_t metadata_block
;
1837 unsigned metadata_offset
;
1838 struct journal_io
*io
;
1840 if (journal_entry_is_unused(je
))
1842 BUG_ON(unlikely(journal_entry_is_inprogress(je
)) && !from_replay
);
1843 sec
= journal_entry_get_sector(je
);
1844 if (unlikely(from_replay
)) {
1845 if (unlikely(sec
& (unsigned)(ic
->sectors_per_block
- 1))) {
1846 dm_integrity_io_error(ic
, "invalid sector in journal", -EIO
);
1847 sec
&= ~(sector_t
)(ic
->sectors_per_block
- 1);
1850 get_area_and_offset(ic
, sec
, &area
, &offset
);
1851 restore_last_bytes(ic
, access_journal_data(ic
, i
, j
), je
);
1852 for (k
= j
+ 1; k
< ic
->journal_section_entries
; k
++) {
1853 struct journal_entry
*je2
= access_journal_entry(ic
, i
, k
);
1854 sector_t sec2
, area2
, offset2
;
1855 if (journal_entry_is_unused(je2
))
1857 BUG_ON(unlikely(journal_entry_is_inprogress(je2
)) && !from_replay
);
1858 sec2
= journal_entry_get_sector(je2
);
1859 get_area_and_offset(ic
, sec2
, &area2
, &offset2
);
1860 if (area2
!= area
|| offset2
!= offset
+ ((k
- j
) << ic
->sb
->log2_sectors_per_block
))
1862 restore_last_bytes(ic
, access_journal_data(ic
, i
, k
), je2
);
1866 io
= mempool_alloc(ic
->journal_io_mempool
, GFP_NOIO
);
1868 io
->range
.logical_sector
= sec
;
1869 io
->range
.n_sectors
= (k
- j
) << ic
->sb
->log2_sectors_per_block
;
1871 spin_lock_irq(&ic
->endio_wait
.lock
);
1872 while (unlikely(!add_new_range(ic
, &io
->range
)))
1873 sleep_on_endio_wait(ic
);
1875 if (likely(!from_replay
)) {
1876 struct journal_node
*section_node
= &ic
->journal_tree
[i
* ic
->journal_section_entries
];
1878 /* don't write if there is newer committed sector */
1879 while (j
< k
&& find_newer_committed_node(ic
, §ion_node
[j
])) {
1880 struct journal_entry
*je2
= access_journal_entry(ic
, i
, j
);
1882 journal_entry_set_unused(je2
);
1883 remove_journal_node(ic
, §ion_node
[j
]);
1885 sec
+= ic
->sectors_per_block
;
1886 offset
+= ic
->sectors_per_block
;
1888 while (j
< k
&& find_newer_committed_node(ic
, §ion_node
[k
- 1])) {
1889 struct journal_entry
*je2
= access_journal_entry(ic
, i
, k
- 1);
1891 journal_entry_set_unused(je2
);
1892 remove_journal_node(ic
, §ion_node
[k
- 1]);
1896 remove_range_unlocked(ic
, &io
->range
);
1897 spin_unlock_irq(&ic
->endio_wait
.lock
);
1898 mempool_free(io
, ic
->journal_io_mempool
);
1901 for (l
= j
; l
< k
; l
++) {
1902 remove_journal_node(ic
, §ion_node
[l
]);
1905 spin_unlock_irq(&ic
->endio_wait
.lock
);
1907 metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &metadata_offset
);
1908 for (l
= j
; l
< k
; l
++) {
1910 struct journal_entry
*je2
= access_journal_entry(ic
, i
, l
);
1913 #ifndef INTERNAL_VERIFY
1914 unlikely(from_replay
) &&
1916 ic
->internal_hash
) {
1917 char test_tag
[max(crypto_shash_digestsize(ic
->internal_hash
), ic
->tag_size
)];
1919 integrity_sector_checksum(ic
, sec
+ ((l
- j
) << ic
->sb
->log2_sectors_per_block
),
1920 (char *)access_journal_data(ic
, i
, l
), test_tag
);
1921 if (unlikely(memcmp(test_tag
, journal_entry_tag(ic
, je2
), ic
->tag_size
)))
1922 dm_integrity_io_error(ic
, "tag mismatch when replaying journal", -EILSEQ
);
1925 journal_entry_set_unused(je2
);
1926 r
= dm_integrity_rw_tag(ic
, journal_entry_tag(ic
, je2
), &metadata_block
, &metadata_offset
,
1927 ic
->tag_size
, TAG_WRITE
);
1929 dm_integrity_io_error(ic
, "reading tags", r
);
1933 atomic_inc(&comp
.in_flight
);
1934 copy_from_journal(ic
, i
, j
<< ic
->sb
->log2_sectors_per_block
,
1935 (k
- j
) << ic
->sb
->log2_sectors_per_block
,
1936 get_data_sector(ic
, area
, offset
),
1937 complete_copy_from_journal
, io
);
1943 dm_bufio_write_dirty_buffers_async(ic
->bufio
);
1945 complete_journal_op(&comp
);
1946 wait_for_completion_io(&comp
.comp
);
1948 dm_integrity_flush_buffers(ic
);
1951 static void integrity_writer(struct work_struct
*w
)
1953 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, writer_work
);
1954 unsigned write_start
, write_sections
;
1956 unsigned prev_free_sectors
;
1958 /* the following test is not needed, but it tests the replay code */
1959 if (ACCESS_ONCE(ic
->suspending
))
1962 spin_lock_irq(&ic
->endio_wait
.lock
);
1963 write_start
= ic
->committed_section
;
1964 write_sections
= ic
->n_committed_sections
;
1965 spin_unlock_irq(&ic
->endio_wait
.lock
);
1967 if (!write_sections
)
1970 do_journal_write(ic
, write_start
, write_sections
, false);
1972 spin_lock_irq(&ic
->endio_wait
.lock
);
1974 ic
->committed_section
+= write_sections
;
1975 wraparound_section(ic
, &ic
->committed_section
);
1976 ic
->n_committed_sections
-= write_sections
;
1978 prev_free_sectors
= ic
->free_sectors
;
1979 ic
->free_sectors
+= write_sections
* ic
->journal_section_entries
;
1980 if (unlikely(!prev_free_sectors
))
1981 wake_up_locked(&ic
->endio_wait
);
1983 spin_unlock_irq(&ic
->endio_wait
.lock
);
1986 static void init_journal(struct dm_integrity_c
*ic
, unsigned start_section
,
1987 unsigned n_sections
, unsigned char commit_seq
)
1994 for (n
= 0; n
< n_sections
; n
++) {
1995 i
= start_section
+ n
;
1996 wraparound_section(ic
, &i
);
1997 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
1998 struct journal_sector
*js
= access_journal(ic
, i
, j
);
1999 memset(&js
->entries
, 0, JOURNAL_SECTOR_DATA
);
2000 js
->commit_id
= dm_integrity_commit_id(ic
, i
, j
, commit_seq
);
2002 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2003 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2004 journal_entry_set_unused(je
);
2008 write_journal(ic
, start_section
, n_sections
);
2011 static int find_commit_seq(struct dm_integrity_c
*ic
, unsigned i
, unsigned j
, commit_id_t id
)
2014 for (k
= 0; k
< N_COMMIT_IDS
; k
++) {
2015 if (dm_integrity_commit_id(ic
, i
, j
, k
) == id
)
2018 dm_integrity_io_error(ic
, "journal commit id", -EIO
);
2022 static void replay_journal(struct dm_integrity_c
*ic
)
2025 bool used_commit_ids
[N_COMMIT_IDS
];
2026 unsigned max_commit_id_sections
[N_COMMIT_IDS
];
2027 unsigned write_start
, write_sections
;
2028 unsigned continue_section
;
2030 unsigned char unused
, last_used
, want_commit_seq
;
2032 if (ic
->mode
== 'R')
2035 if (ic
->journal_uptodate
)
2041 if (!ic
->just_formatted
) {
2042 DEBUG_print("reading journal\n");
2043 rw_journal(ic
, REQ_OP_READ
, 0, 0, ic
->journal_sections
, NULL
);
2045 DEBUG_bytes(lowmem_page_address(ic
->journal_io
[0].page
), 64, "read journal");
2046 if (ic
->journal_io
) {
2047 struct journal_completion crypt_comp
;
2049 crypt_comp
.comp
= COMPLETION_INITIALIZER_ONSTACK(crypt_comp
.comp
);
2050 crypt_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
2051 encrypt_journal(ic
, false, 0, ic
->journal_sections
, &crypt_comp
);
2052 wait_for_completion(&crypt_comp
.comp
);
2054 DEBUG_bytes(lowmem_page_address(ic
->journal
[0].page
), 64, "decrypted journal");
2057 if (dm_integrity_failed(ic
))
2060 journal_empty
= true;
2061 memset(used_commit_ids
, 0, sizeof used_commit_ids
);
2062 memset(max_commit_id_sections
, 0, sizeof max_commit_id_sections
);
2063 for (i
= 0; i
< ic
->journal_sections
; i
++) {
2064 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2066 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2067 k
= find_commit_seq(ic
, i
, j
, js
->commit_id
);
2070 used_commit_ids
[k
] = true;
2071 max_commit_id_sections
[k
] = i
;
2073 if (journal_empty
) {
2074 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2075 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2076 if (!journal_entry_is_unused(je
)) {
2077 journal_empty
= false;
2084 if (!used_commit_ids
[N_COMMIT_IDS
- 1]) {
2085 unused
= N_COMMIT_IDS
- 1;
2086 while (unused
&& !used_commit_ids
[unused
- 1])
2089 for (unused
= 0; unused
< N_COMMIT_IDS
; unused
++)
2090 if (!used_commit_ids
[unused
])
2092 if (unused
== N_COMMIT_IDS
) {
2093 dm_integrity_io_error(ic
, "journal commit ids", -EIO
);
2097 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2098 unused
, used_commit_ids
[0], used_commit_ids
[1],
2099 used_commit_ids
[2], used_commit_ids
[3]);
2101 last_used
= prev_commit_seq(unused
);
2102 want_commit_seq
= prev_commit_seq(last_used
);
2104 if (!used_commit_ids
[want_commit_seq
] && used_commit_ids
[prev_commit_seq(want_commit_seq
)])
2105 journal_empty
= true;
2107 write_start
= max_commit_id_sections
[last_used
] + 1;
2108 if (unlikely(write_start
>= ic
->journal_sections
))
2109 want_commit_seq
= next_commit_seq(want_commit_seq
);
2110 wraparound_section(ic
, &write_start
);
2113 for (write_sections
= 0; write_sections
< ic
->journal_sections
; write_sections
++) {
2114 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2115 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2117 if (js
->commit_id
!= dm_integrity_commit_id(ic
, i
, j
, want_commit_seq
)) {
2119 * This could be caused by crash during writing.
2120 * We won't replay the inconsistent part of the
2123 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2124 i
, j
, find_commit_seq(ic
, i
, j
, js
->commit_id
), want_commit_seq
);
2129 if (unlikely(i
>= ic
->journal_sections
))
2130 want_commit_seq
= next_commit_seq(want_commit_seq
);
2131 wraparound_section(ic
, &i
);
2135 if (!journal_empty
) {
2136 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2137 write_sections
, write_start
, want_commit_seq
);
2138 do_journal_write(ic
, write_start
, write_sections
, true);
2141 if (write_sections
== ic
->journal_sections
&& (ic
->mode
== 'J' || journal_empty
)) {
2142 continue_section
= write_start
;
2143 ic
->commit_seq
= want_commit_seq
;
2144 DEBUG_print("continuing from section %u, commit seq %d\n", write_start
, ic
->commit_seq
);
2147 unsigned char erase_seq
;
2149 DEBUG_print("clearing journal\n");
2151 erase_seq
= prev_commit_seq(prev_commit_seq(last_used
));
2153 init_journal(ic
, s
, 1, erase_seq
);
2155 wraparound_section(ic
, &s
);
2156 if (ic
->journal_sections
>= 2) {
2157 init_journal(ic
, s
, ic
->journal_sections
- 2, erase_seq
);
2158 s
+= ic
->journal_sections
- 2;
2159 wraparound_section(ic
, &s
);
2160 init_journal(ic
, s
, 1, erase_seq
);
2163 continue_section
= 0;
2164 ic
->commit_seq
= next_commit_seq(erase_seq
);
2167 ic
->committed_section
= continue_section
;
2168 ic
->n_committed_sections
= 0;
2170 ic
->uncommitted_section
= continue_section
;
2171 ic
->n_uncommitted_sections
= 0;
2173 ic
->free_section
= continue_section
;
2174 ic
->free_section_entry
= 0;
2175 ic
->free_sectors
= ic
->journal_entries
;
2177 ic
->journal_tree_root
= RB_ROOT
;
2178 for (i
= 0; i
< ic
->journal_entries
; i
++)
2179 init_journal_node(&ic
->journal_tree
[i
]);
2182 static void dm_integrity_postsuspend(struct dm_target
*ti
)
2184 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
2186 del_timer_sync(&ic
->autocommit_timer
);
2188 ic
->suspending
= true;
2190 queue_work(ic
->commit_wq
, &ic
->commit_work
);
2191 drain_workqueue(ic
->commit_wq
);
2193 if (ic
->mode
== 'J') {
2194 drain_workqueue(ic
->writer_wq
);
2195 dm_integrity_flush_buffers(ic
);
2198 ic
->suspending
= false;
2200 BUG_ON(!RB_EMPTY_ROOT(&ic
->in_progress
));
2202 ic
->journal_uptodate
= true;
2205 static void dm_integrity_resume(struct dm_target
*ti
)
2207 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
2212 static void dm_integrity_status(struct dm_target
*ti
, status_type_t type
,
2213 unsigned status_flags
, char *result
, unsigned maxlen
)
2215 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
2220 case STATUSTYPE_INFO
:
2224 case STATUSTYPE_TABLE
: {
2225 __u64 watermark_percentage
= (__u64
)(ic
->journal_entries
- ic
->free_sectors_threshold
) * 100;
2226 watermark_percentage
+= ic
->journal_entries
/ 2;
2227 do_div(watermark_percentage
, ic
->journal_entries
);
2229 arg_count
+= ic
->sectors_per_block
!= 1;
2230 arg_count
+= !!ic
->internal_hash_alg
.alg_string
;
2231 arg_count
+= !!ic
->journal_crypt_alg
.alg_string
;
2232 arg_count
+= !!ic
->journal_mac_alg
.alg_string
;
2233 DMEMIT("%s %llu %u %c %u", ic
->dev
->name
, (unsigned long long)ic
->start
,
2234 ic
->tag_size
, ic
->mode
, arg_count
);
2235 DMEMIT(" journal_sectors:%u", ic
->initial_sectors
- SB_SECTORS
);
2236 DMEMIT(" interleave_sectors:%u", 1U << ic
->sb
->log2_interleave_sectors
);
2237 DMEMIT(" buffer_sectors:%u", 1U << ic
->log2_buffer_sectors
);
2238 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage
);
2239 DMEMIT(" commit_time:%u", ic
->autocommit_msec
);
2240 if (ic
->sectors_per_block
!= 1)
2241 DMEMIT(" block_size:%u", ic
->sectors_per_block
<< SECTOR_SHIFT
);
2243 #define EMIT_ALG(a, n) \
2245 if (ic->a.alg_string) { \
2246 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2247 if (ic->a.key_string) \
2248 DMEMIT(":%s", ic->a.key_string);\
2251 EMIT_ALG(internal_hash_alg
, "internal_hash");
2252 EMIT_ALG(journal_crypt_alg
, "journal_crypt");
2253 EMIT_ALG(journal_mac_alg
, "journal_mac");
2259 static int dm_integrity_iterate_devices(struct dm_target
*ti
,
2260 iterate_devices_callout_fn fn
, void *data
)
2262 struct dm_integrity_c
*ic
= ti
->private;
2264 return fn(ti
, ic
->dev
, ic
->start
+ ic
->initial_sectors
+ ic
->metadata_run
, ti
->len
, data
);
2267 static void dm_integrity_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
2269 struct dm_integrity_c
*ic
= ti
->private;
2271 if (ic
->sectors_per_block
> 1) {
2272 limits
->logical_block_size
= ic
->sectors_per_block
<< SECTOR_SHIFT
;
2273 limits
->physical_block_size
= ic
->sectors_per_block
<< SECTOR_SHIFT
;
2274 blk_limits_io_min(limits
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
2278 static void calculate_journal_section_size(struct dm_integrity_c
*ic
)
2280 unsigned sector_space
= JOURNAL_SECTOR_DATA
;
2282 ic
->journal_sections
= le32_to_cpu(ic
->sb
->journal_sections
);
2283 ic
->journal_entry_size
= roundup(offsetof(struct journal_entry
, last_bytes
[ic
->sectors_per_block
]) + ic
->tag_size
,
2284 JOURNAL_ENTRY_ROUNDUP
);
2286 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
))
2287 sector_space
-= JOURNAL_MAC_PER_SECTOR
;
2288 ic
->journal_entries_per_sector
= sector_space
/ ic
->journal_entry_size
;
2289 ic
->journal_section_entries
= ic
->journal_entries_per_sector
* JOURNAL_BLOCK_SECTORS
;
2290 ic
->journal_section_sectors
= (ic
->journal_section_entries
<< ic
->sb
->log2_sectors_per_block
) + JOURNAL_BLOCK_SECTORS
;
2291 ic
->journal_entries
= ic
->journal_section_entries
* ic
->journal_sections
;
2294 static int calculate_device_limits(struct dm_integrity_c
*ic
)
2296 __u64 initial_sectors
;
2297 sector_t last_sector
, last_area
, last_offset
;
2299 calculate_journal_section_size(ic
);
2300 initial_sectors
= SB_SECTORS
+ (__u64
)ic
->journal_section_sectors
* ic
->journal_sections
;
2301 if (initial_sectors
+ METADATA_PADDING_SECTORS
>= ic
->device_sectors
|| initial_sectors
> UINT_MAX
)
2303 ic
->initial_sectors
= initial_sectors
;
2305 ic
->metadata_run
= roundup((__u64
)ic
->tag_size
<< (ic
->sb
->log2_interleave_sectors
- ic
->sb
->log2_sectors_per_block
),
2306 (__u64
)(1 << SECTOR_SHIFT
<< METADATA_PADDING_SECTORS
)) >> SECTOR_SHIFT
;
2307 if (!(ic
->metadata_run
& (ic
->metadata_run
- 1)))
2308 ic
->log2_metadata_run
= __ffs(ic
->metadata_run
);
2310 ic
->log2_metadata_run
= -1;
2312 get_area_and_offset(ic
, ic
->provided_data_sectors
- 1, &last_area
, &last_offset
);
2313 last_sector
= get_data_sector(ic
, last_area
, last_offset
);
2315 if (ic
->start
+ last_sector
< last_sector
|| ic
->start
+ last_sector
>= ic
->device_sectors
)
2321 static int initialize_superblock(struct dm_integrity_c
*ic
, unsigned journal_sectors
, unsigned interleave_sectors
)
2323 unsigned journal_sections
;
2326 memset(ic
->sb
, 0, SB_SECTORS
<< SECTOR_SHIFT
);
2327 memcpy(ic
->sb
->magic
, SB_MAGIC
, 8);
2328 ic
->sb
->version
= SB_VERSION
;
2329 ic
->sb
->integrity_tag_size
= cpu_to_le16(ic
->tag_size
);
2330 ic
->sb
->log2_sectors_per_block
= __ffs(ic
->sectors_per_block
);
2331 if (ic
->journal_mac_alg
.alg_string
)
2332 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
);
2334 calculate_journal_section_size(ic
);
2335 journal_sections
= journal_sectors
/ ic
->journal_section_sectors
;
2336 if (!journal_sections
)
2337 journal_sections
= 1;
2338 ic
->sb
->journal_sections
= cpu_to_le32(journal_sections
);
2340 if (!interleave_sectors
)
2341 interleave_sectors
= DEFAULT_INTERLEAVE_SECTORS
;
2342 ic
->sb
->log2_interleave_sectors
= __fls(interleave_sectors
);
2343 ic
->sb
->log2_interleave_sectors
= max((__u8
)MIN_LOG2_INTERLEAVE_SECTORS
, ic
->sb
->log2_interleave_sectors
);
2344 ic
->sb
->log2_interleave_sectors
= min((__u8
)MAX_LOG2_INTERLEAVE_SECTORS
, ic
->sb
->log2_interleave_sectors
);
2346 ic
->provided_data_sectors
= 0;
2347 for (test_bit
= fls64(ic
->device_sectors
) - 1; test_bit
>= 3; test_bit
--) {
2348 __u64 prev_data_sectors
= ic
->provided_data_sectors
;
2350 ic
->provided_data_sectors
|= (sector_t
)1 << test_bit
;
2351 if (calculate_device_limits(ic
))
2352 ic
->provided_data_sectors
= prev_data_sectors
;
2355 if (!ic
->provided_data_sectors
)
2358 ic
->sb
->provided_data_sectors
= cpu_to_le64(ic
->provided_data_sectors
);
2363 static void dm_integrity_set(struct dm_target
*ti
, struct dm_integrity_c
*ic
)
2365 struct gendisk
*disk
= dm_disk(dm_table_get_md(ti
->table
));
2366 struct blk_integrity bi
;
2368 memset(&bi
, 0, sizeof(bi
));
2369 bi
.profile
= &dm_integrity_profile
;
2370 bi
.tuple_size
= ic
->tag_size
;
2371 bi
.tag_size
= bi
.tuple_size
;
2372 bi
.interval_exp
= ic
->sb
->log2_sectors_per_block
+ SECTOR_SHIFT
;
2374 blk_integrity_register(disk
, &bi
);
2375 blk_queue_max_integrity_segments(disk
->queue
, UINT_MAX
);
2378 static void dm_integrity_free_page_list(struct dm_integrity_c
*ic
, struct page_list
*pl
)
2384 for (i
= 0; i
< ic
->journal_pages
; i
++)
2386 __free_page(pl
[i
].page
);
2390 static struct page_list
*dm_integrity_alloc_page_list(struct dm_integrity_c
*ic
)
2392 size_t page_list_desc_size
= ic
->journal_pages
* sizeof(struct page_list
);
2393 struct page_list
*pl
;
2396 pl
= kvmalloc(page_list_desc_size
, GFP_KERNEL
| __GFP_ZERO
);
2400 for (i
= 0; i
< ic
->journal_pages
; i
++) {
2401 pl
[i
].page
= alloc_page(GFP_KERNEL
);
2403 dm_integrity_free_page_list(ic
, pl
);
2407 pl
[i
- 1].next
= &pl
[i
];
2413 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c
*ic
, struct scatterlist
**sl
)
2416 for (i
= 0; i
< ic
->journal_sections
; i
++)
2421 static struct scatterlist
**dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c
*ic
, struct page_list
*pl
)
2423 struct scatterlist
**sl
;
2426 sl
= kvmalloc(ic
->journal_sections
* sizeof(struct scatterlist
*), GFP_KERNEL
| __GFP_ZERO
);
2430 for (i
= 0; i
< ic
->journal_sections
; i
++) {
2431 struct scatterlist
*s
;
2432 unsigned start_index
, start_offset
;
2433 unsigned end_index
, end_offset
;
2437 page_list_location(ic
, i
, 0, &start_index
, &start_offset
);
2438 page_list_location(ic
, i
, ic
->journal_section_sectors
- 1, &end_index
, &end_offset
);
2440 n_pages
= (end_index
- start_index
+ 1);
2442 s
= kvmalloc(n_pages
* sizeof(struct scatterlist
), GFP_KERNEL
);
2444 dm_integrity_free_journal_scatterlist(ic
, sl
);
2448 sg_init_table(s
, n_pages
);
2449 for (idx
= start_index
; idx
<= end_index
; idx
++) {
2450 char *va
= lowmem_page_address(pl
[idx
].page
);
2451 unsigned start
= 0, end
= PAGE_SIZE
;
2452 if (idx
== start_index
)
2453 start
= start_offset
;
2454 if (idx
== end_index
)
2455 end
= end_offset
+ (1 << SECTOR_SHIFT
);
2456 sg_set_buf(&s
[idx
- start_index
], va
+ start
, end
- start
);
2465 static void free_alg(struct alg_spec
*a
)
2467 kzfree(a
->alg_string
);
2469 memset(a
, 0, sizeof *a
);
2472 static int get_alg_and_key(const char *arg
, struct alg_spec
*a
, char **error
, char *error_inval
)
2478 a
->alg_string
= kstrdup(strchr(arg
, ':') + 1, GFP_KERNEL
);
2482 k
= strchr(a
->alg_string
, ':');
2485 a
->key_string
= k
+ 1;
2486 if (strlen(a
->key_string
) & 1)
2489 a
->key_size
= strlen(a
->key_string
) / 2;
2490 a
->key
= kmalloc(a
->key_size
, GFP_KERNEL
);
2493 if (hex2bin(a
->key
, a
->key_string
, a
->key_size
))
2499 *error
= error_inval
;
2502 *error
= "Out of memory for an argument";
2506 static int get_mac(struct crypto_shash
**hash
, struct alg_spec
*a
, char **error
,
2507 char *error_alg
, char *error_key
)
2511 if (a
->alg_string
) {
2512 *hash
= crypto_alloc_shash(a
->alg_string
, 0, CRYPTO_ALG_ASYNC
);
2513 if (IS_ERR(*hash
)) {
2521 r
= crypto_shash_setkey(*hash
, a
->key
, a
->key_size
);
2532 static int create_journal(struct dm_integrity_c
*ic
, char **error
)
2536 __u64 journal_pages
, journal_desc_size
, journal_tree_size
;
2537 unsigned char *crypt_data
= NULL
;
2539 ic
->commit_ids
[0] = cpu_to_le64(0x1111111111111111ULL
);
2540 ic
->commit_ids
[1] = cpu_to_le64(0x2222222222222222ULL
);
2541 ic
->commit_ids
[2] = cpu_to_le64(0x3333333333333333ULL
);
2542 ic
->commit_ids
[3] = cpu_to_le64(0x4444444444444444ULL
);
2544 journal_pages
= roundup((__u64
)ic
->journal_sections
* ic
->journal_section_sectors
,
2545 PAGE_SIZE
>> SECTOR_SHIFT
) >> (PAGE_SHIFT
- SECTOR_SHIFT
);
2546 journal_desc_size
= journal_pages
* sizeof(struct page_list
);
2547 if (journal_pages
>= totalram_pages
- totalhigh_pages
|| journal_desc_size
> ULONG_MAX
) {
2548 *error
= "Journal doesn't fit into memory";
2552 ic
->journal_pages
= journal_pages
;
2554 ic
->journal
= dm_integrity_alloc_page_list(ic
);
2556 *error
= "Could not allocate memory for journal";
2560 if (ic
->journal_crypt_alg
.alg_string
) {
2561 unsigned ivsize
, blocksize
;
2562 struct journal_completion comp
;
2565 ic
->journal_crypt
= crypto_alloc_skcipher(ic
->journal_crypt_alg
.alg_string
, 0, 0);
2566 if (IS_ERR(ic
->journal_crypt
)) {
2567 *error
= "Invalid journal cipher";
2568 r
= PTR_ERR(ic
->journal_crypt
);
2569 ic
->journal_crypt
= NULL
;
2572 ivsize
= crypto_skcipher_ivsize(ic
->journal_crypt
);
2573 blocksize
= crypto_skcipher_blocksize(ic
->journal_crypt
);
2575 if (ic
->journal_crypt_alg
.key
) {
2576 r
= crypto_skcipher_setkey(ic
->journal_crypt
, ic
->journal_crypt_alg
.key
,
2577 ic
->journal_crypt_alg
.key_size
);
2579 *error
= "Error setting encryption key";
2583 DEBUG_print("cipher %s, block size %u iv size %u\n",
2584 ic
->journal_crypt_alg
.alg_string
, blocksize
, ivsize
);
2586 ic
->journal_io
= dm_integrity_alloc_page_list(ic
);
2587 if (!ic
->journal_io
) {
2588 *error
= "Could not allocate memory for journal io";
2593 if (blocksize
== 1) {
2594 struct scatterlist
*sg
;
2595 SKCIPHER_REQUEST_ON_STACK(req
, ic
->journal_crypt
);
2596 unsigned char iv
[ivsize
];
2597 skcipher_request_set_tfm(req
, ic
->journal_crypt
);
2599 ic
->journal_xor
= dm_integrity_alloc_page_list(ic
);
2600 if (!ic
->journal_xor
) {
2601 *error
= "Could not allocate memory for journal xor";
2606 sg
= kvmalloc((ic
->journal_pages
+ 1) * sizeof(struct scatterlist
), GFP_KERNEL
);
2608 *error
= "Unable to allocate sg list";
2612 sg_init_table(sg
, ic
->journal_pages
+ 1);
2613 for (i
= 0; i
< ic
->journal_pages
; i
++) {
2614 char *va
= lowmem_page_address(ic
->journal_xor
[i
].page
);
2616 sg_set_buf(&sg
[i
], va
, PAGE_SIZE
);
2618 sg_set_buf(&sg
[i
], &ic
->commit_ids
, sizeof ic
->commit_ids
);
2619 memset(iv
, 0x00, ivsize
);
2621 skcipher_request_set_crypt(req
, sg
, sg
, PAGE_SIZE
* ic
->journal_pages
+ sizeof ic
->commit_ids
, iv
);
2622 comp
.comp
= COMPLETION_INITIALIZER_ONSTACK(comp
.comp
);
2623 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
2624 if (do_crypt(true, req
, &comp
))
2625 wait_for_completion(&comp
.comp
);
2627 r
= dm_integrity_failed(ic
);
2629 *error
= "Unable to encrypt journal";
2632 DEBUG_bytes(lowmem_page_address(ic
->journal_xor
[0].page
), 64, "xor data");
2634 crypto_free_skcipher(ic
->journal_crypt
);
2635 ic
->journal_crypt
= NULL
;
2637 SKCIPHER_REQUEST_ON_STACK(req
, ic
->journal_crypt
);
2638 unsigned char iv
[ivsize
];
2639 unsigned crypt_len
= roundup(ivsize
, blocksize
);
2641 crypt_data
= kmalloc(crypt_len
, GFP_KERNEL
);
2643 *error
= "Unable to allocate crypt data";
2648 skcipher_request_set_tfm(req
, ic
->journal_crypt
);
2650 ic
->journal_scatterlist
= dm_integrity_alloc_journal_scatterlist(ic
, ic
->journal
);
2651 if (!ic
->journal_scatterlist
) {
2652 *error
= "Unable to allocate sg list";
2656 ic
->journal_io_scatterlist
= dm_integrity_alloc_journal_scatterlist(ic
, ic
->journal_io
);
2657 if (!ic
->journal_io_scatterlist
) {
2658 *error
= "Unable to allocate sg list";
2662 ic
->sk_requests
= kvmalloc(ic
->journal_sections
* sizeof(struct skcipher_request
*), GFP_KERNEL
| __GFP_ZERO
);
2663 if (!ic
->sk_requests
) {
2664 *error
= "Unable to allocate sk requests";
2668 for (i
= 0; i
< ic
->journal_sections
; i
++) {
2669 struct scatterlist sg
;
2670 struct skcipher_request
*section_req
;
2671 __u32 section_le
= cpu_to_le32(i
);
2673 memset(iv
, 0x00, ivsize
);
2674 memset(crypt_data
, 0x00, crypt_len
);
2675 memcpy(crypt_data
, §ion_le
, min((size_t)crypt_len
, sizeof(section_le
)));
2677 sg_init_one(&sg
, crypt_data
, crypt_len
);
2678 skcipher_request_set_crypt(req
, &sg
, &sg
, crypt_len
, iv
);
2679 comp
.comp
= COMPLETION_INITIALIZER_ONSTACK(comp
.comp
);
2680 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
2681 if (do_crypt(true, req
, &comp
))
2682 wait_for_completion(&comp
.comp
);
2684 r
= dm_integrity_failed(ic
);
2686 *error
= "Unable to generate iv";
2690 section_req
= skcipher_request_alloc(ic
->journal_crypt
, GFP_KERNEL
);
2692 *error
= "Unable to allocate crypt request";
2696 section_req
->iv
= kmalloc(ivsize
* 2, GFP_KERNEL
);
2697 if (!section_req
->iv
) {
2698 skcipher_request_free(section_req
);
2699 *error
= "Unable to allocate iv";
2703 memcpy(section_req
->iv
+ ivsize
, crypt_data
, ivsize
);
2704 section_req
->cryptlen
= (size_t)ic
->journal_section_sectors
<< SECTOR_SHIFT
;
2705 ic
->sk_requests
[i
] = section_req
;
2706 DEBUG_bytes(crypt_data
, ivsize
, "iv(%u)", i
);
2711 for (i
= 0; i
< N_COMMIT_IDS
; i
++) {
2714 for (j
= 0; j
< i
; j
++) {
2715 if (ic
->commit_ids
[j
] == ic
->commit_ids
[i
]) {
2716 ic
->commit_ids
[i
] = cpu_to_le64(le64_to_cpu(ic
->commit_ids
[i
]) + 1);
2717 goto retest_commit_id
;
2720 DEBUG_print("commit id %u: %016llx\n", i
, ic
->commit_ids
[i
]);
2723 journal_tree_size
= (__u64
)ic
->journal_entries
* sizeof(struct journal_node
);
2724 if (journal_tree_size
> ULONG_MAX
) {
2725 *error
= "Journal doesn't fit into memory";
2729 ic
->journal_tree
= kvmalloc(journal_tree_size
, GFP_KERNEL
);
2730 if (!ic
->journal_tree
) {
2731 *error
= "Could not allocate memory for journal tree";
2740 * Construct a integrity mapping
2744 * offset from the start of the device
2746 * D - direct writes, J - journal writes, R - recovery mode
2747 * number of optional arguments
2748 * optional arguments:
2750 * interleave_sectors
2759 static int dm_integrity_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
2761 struct dm_integrity_c
*ic
;
2764 unsigned extra_args
;
2765 struct dm_arg_set as
;
2766 static struct dm_arg _args
[] = {
2767 {0, 9, "Invalid number of feature args"},
2769 unsigned journal_sectors
, interleave_sectors
, buffer_sectors
, journal_watermark
, sync_msec
;
2770 bool should_write_sb
;
2772 unsigned long long start
;
2774 #define DIRECT_ARGUMENTS 4
2776 if (argc
<= DIRECT_ARGUMENTS
) {
2777 ti
->error
= "Invalid argument count";
2781 ic
= kzalloc(sizeof(struct dm_integrity_c
), GFP_KERNEL
);
2783 ti
->error
= "Cannot allocate integrity context";
2787 ti
->per_io_data_size
= sizeof(struct dm_integrity_io
);
2789 ic
->in_progress
= RB_ROOT
;
2790 init_waitqueue_head(&ic
->endio_wait
);
2791 bio_list_init(&ic
->flush_bio_list
);
2792 init_waitqueue_head(&ic
->copy_to_journal_wait
);
2793 init_completion(&ic
->crypto_backoff
);
2795 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &ic
->dev
);
2797 ti
->error
= "Device lookup failed";
2801 if (sscanf(argv
[1], "%llu%c", &start
, &dummy
) != 1 || start
!= (sector_t
)start
) {
2802 ti
->error
= "Invalid starting offset";
2808 if (strcmp(argv
[2], "-")) {
2809 if (sscanf(argv
[2], "%u%c", &ic
->tag_size
, &dummy
) != 1 || !ic
->tag_size
) {
2810 ti
->error
= "Invalid tag size";
2816 if (!strcmp(argv
[3], "J") || !strcmp(argv
[3], "D") || !strcmp(argv
[3], "R"))
2817 ic
->mode
= argv
[3][0];
2819 ti
->error
= "Invalid mode (expecting J, D, R)";
2824 ic
->device_sectors
= i_size_read(ic
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
2825 journal_sectors
= min((sector_t
)DEFAULT_MAX_JOURNAL_SECTORS
,
2826 ic
->device_sectors
>> DEFAULT_JOURNAL_SIZE_FACTOR
);
2827 interleave_sectors
= DEFAULT_INTERLEAVE_SECTORS
;
2828 buffer_sectors
= DEFAULT_BUFFER_SECTORS
;
2829 journal_watermark
= DEFAULT_JOURNAL_WATERMARK
;
2830 sync_msec
= DEFAULT_SYNC_MSEC
;
2831 ic
->sectors_per_block
= 1;
2833 as
.argc
= argc
- DIRECT_ARGUMENTS
;
2834 as
.argv
= argv
+ DIRECT_ARGUMENTS
;
2835 r
= dm_read_arg_group(_args
, &as
, &extra_args
, &ti
->error
);
2839 while (extra_args
--) {
2840 const char *opt_string
;
2842 opt_string
= dm_shift_arg(&as
);
2845 ti
->error
= "Not enough feature arguments";
2848 if (sscanf(opt_string
, "journal_sectors:%u%c", &val
, &dummy
) == 1)
2849 journal_sectors
= val
;
2850 else if (sscanf(opt_string
, "interleave_sectors:%u%c", &val
, &dummy
) == 1)
2851 interleave_sectors
= val
;
2852 else if (sscanf(opt_string
, "buffer_sectors:%u%c", &val
, &dummy
) == 1)
2853 buffer_sectors
= val
;
2854 else if (sscanf(opt_string
, "journal_watermark:%u%c", &val
, &dummy
) == 1 && val
<= 100)
2855 journal_watermark
= val
;
2856 else if (sscanf(opt_string
, "commit_time:%u%c", &val
, &dummy
) == 1)
2858 else if (sscanf(opt_string
, "block_size:%u%c", &val
, &dummy
) == 1) {
2859 if (val
< 1 << SECTOR_SHIFT
||
2860 val
> MAX_SECTORS_PER_BLOCK
<< SECTOR_SHIFT
||
2863 ti
->error
= "Invalid block_size argument";
2866 ic
->sectors_per_block
= val
>> SECTOR_SHIFT
;
2867 } else if (!memcmp(opt_string
, "internal_hash:", strlen("internal_hash:"))) {
2868 r
= get_alg_and_key(opt_string
, &ic
->internal_hash_alg
, &ti
->error
,
2869 "Invalid internal_hash argument");
2872 } else if (!memcmp(opt_string
, "journal_crypt:", strlen("journal_crypt:"))) {
2873 r
= get_alg_and_key(opt_string
, &ic
->journal_crypt_alg
, &ti
->error
,
2874 "Invalid journal_crypt argument");
2877 } else if (!memcmp(opt_string
, "journal_mac:", strlen("journal_mac:"))) {
2878 r
= get_alg_and_key(opt_string
, &ic
->journal_mac_alg
, &ti
->error
,
2879 "Invalid journal_mac argument");
2884 ti
->error
= "Invalid argument";
2889 r
= get_mac(&ic
->internal_hash
, &ic
->internal_hash_alg
, &ti
->error
,
2890 "Invalid internal hash", "Error setting internal hash key");
2894 r
= get_mac(&ic
->journal_mac
, &ic
->journal_mac_alg
, &ti
->error
,
2895 "Invalid journal mac", "Error setting journal mac key");
2899 if (!ic
->tag_size
) {
2900 if (!ic
->internal_hash
) {
2901 ti
->error
= "Unknown tag size";
2905 ic
->tag_size
= crypto_shash_digestsize(ic
->internal_hash
);
2907 if (ic
->tag_size
> MAX_TAG_SIZE
) {
2908 ti
->error
= "Too big tag size";
2912 if (!(ic
->tag_size
& (ic
->tag_size
- 1)))
2913 ic
->log2_tag_size
= __ffs(ic
->tag_size
);
2915 ic
->log2_tag_size
= -1;
2917 ic
->autocommit_jiffies
= msecs_to_jiffies(sync_msec
);
2918 ic
->autocommit_msec
= sync_msec
;
2919 setup_timer(&ic
->autocommit_timer
, autocommit_fn
, (unsigned long)ic
);
2921 ic
->io
= dm_io_client_create();
2922 if (IS_ERR(ic
->io
)) {
2923 r
= PTR_ERR(ic
->io
);
2925 ti
->error
= "Cannot allocate dm io";
2929 ic
->journal_io_mempool
= mempool_create_slab_pool(JOURNAL_IO_MEMPOOL
, journal_io_cache
);
2930 if (!ic
->journal_io_mempool
) {
2932 ti
->error
= "Cannot allocate mempool";
2936 ic
->metadata_wq
= alloc_workqueue("dm-integrity-metadata",
2937 WQ_MEM_RECLAIM
, METADATA_WORKQUEUE_MAX_ACTIVE
);
2938 if (!ic
->metadata_wq
) {
2939 ti
->error
= "Cannot allocate workqueue";
2945 * If this workqueue were percpu, it would cause bio reordering
2946 * and reduced performance.
2948 ic
->wait_wq
= alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1);
2950 ti
->error
= "Cannot allocate workqueue";
2955 ic
->commit_wq
= alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM
, 1);
2956 if (!ic
->commit_wq
) {
2957 ti
->error
= "Cannot allocate workqueue";
2961 INIT_WORK(&ic
->commit_work
, integrity_commit
);
2963 if (ic
->mode
== 'J') {
2964 ic
->writer_wq
= alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM
, 1);
2965 if (!ic
->writer_wq
) {
2966 ti
->error
= "Cannot allocate workqueue";
2970 INIT_WORK(&ic
->writer_work
, integrity_writer
);
2973 ic
->sb
= alloc_pages_exact(SB_SECTORS
<< SECTOR_SHIFT
, GFP_KERNEL
);
2976 ti
->error
= "Cannot allocate superblock area";
2980 r
= sync_rw_sb(ic
, REQ_OP_READ
, 0);
2982 ti
->error
= "Error reading superblock";
2985 should_write_sb
= false;
2986 if (memcmp(ic
->sb
->magic
, SB_MAGIC
, 8)) {
2987 if (ic
->mode
!= 'R') {
2988 if (memchr_inv(ic
->sb
, 0, SB_SECTORS
<< SECTOR_SHIFT
)) {
2990 ti
->error
= "The device is not initialized";
2995 r
= initialize_superblock(ic
, journal_sectors
, interleave_sectors
);
2997 ti
->error
= "Could not initialize superblock";
3000 if (ic
->mode
!= 'R')
3001 should_write_sb
= true;
3004 if (ic
->sb
->version
!= SB_VERSION
) {
3006 ti
->error
= "Unknown version";
3009 if (le16_to_cpu(ic
->sb
->integrity_tag_size
) != ic
->tag_size
) {
3011 ti
->error
= "Tag size doesn't match the information in superblock";
3014 if (ic
->sb
->log2_sectors_per_block
!= __ffs(ic
->sectors_per_block
)) {
3016 ti
->error
= "Block size doesn't match the information in superblock";
3019 /* make sure that ti->max_io_len doesn't overflow */
3020 if (ic
->sb
->log2_interleave_sectors
< MIN_LOG2_INTERLEAVE_SECTORS
||
3021 ic
->sb
->log2_interleave_sectors
> MAX_LOG2_INTERLEAVE_SECTORS
) {
3023 ti
->error
= "Invalid interleave_sectors in the superblock";
3026 ic
->provided_data_sectors
= le64_to_cpu(ic
->sb
->provided_data_sectors
);
3027 if (ic
->provided_data_sectors
!= le64_to_cpu(ic
->sb
->provided_data_sectors
)) {
3028 /* test for overflow */
3030 ti
->error
= "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3033 if (!!(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
)) != !!ic
->journal_mac_alg
.alg_string
) {
3035 ti
->error
= "Journal mac mismatch";
3038 r
= calculate_device_limits(ic
);
3040 ti
->error
= "The device is too small";
3044 if (!buffer_sectors
)
3046 ic
->log2_buffer_sectors
= min3((int)__fls(buffer_sectors
), (int)__ffs(ic
->metadata_run
), 31 - SECTOR_SHIFT
);
3048 threshold
= (__u64
)ic
->journal_entries
* (100 - journal_watermark
);
3050 do_div(threshold
, 100);
3051 ic
->free_sectors_threshold
= threshold
;
3053 DEBUG_print("initialized:\n");
3054 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic
->sb
->integrity_tag_size
));
3055 DEBUG_print(" journal_entry_size %u\n", ic
->journal_entry_size
);
3056 DEBUG_print(" journal_entries_per_sector %u\n", ic
->journal_entries_per_sector
);
3057 DEBUG_print(" journal_section_entries %u\n", ic
->journal_section_entries
);
3058 DEBUG_print(" journal_section_sectors %u\n", ic
->journal_section_sectors
);
3059 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic
->sb
->journal_sections
));
3060 DEBUG_print(" journal_entries %u\n", ic
->journal_entries
);
3061 DEBUG_print(" log2_interleave_sectors %d\n", ic
->sb
->log2_interleave_sectors
);
3062 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic
->device_sectors
);
3063 DEBUG_print(" initial_sectors 0x%x\n", ic
->initial_sectors
);
3064 DEBUG_print(" metadata_run 0x%x\n", ic
->metadata_run
);
3065 DEBUG_print(" log2_metadata_run %d\n", ic
->log2_metadata_run
);
3066 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic
->provided_data_sectors
,
3067 (unsigned long long)ic
->provided_data_sectors
);
3068 DEBUG_print(" log2_buffer_sectors %u\n", ic
->log2_buffer_sectors
);
3070 ic
->bufio
= dm_bufio_client_create(ic
->dev
->bdev
, 1U << (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
),
3072 if (IS_ERR(ic
->bufio
)) {
3073 r
= PTR_ERR(ic
->bufio
);
3074 ti
->error
= "Cannot initialize dm-bufio";
3078 dm_bufio_set_sector_offset(ic
->bufio
, ic
->start
+ ic
->initial_sectors
);
3080 if (ic
->mode
!= 'R') {
3081 r
= create_journal(ic
, &ti
->error
);
3086 if (should_write_sb
) {
3089 init_journal(ic
, 0, ic
->journal_sections
, 0);
3090 r
= dm_integrity_failed(ic
);
3092 ti
->error
= "Error initializing journal";
3095 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
3097 ti
->error
= "Error initializing superblock";
3100 ic
->just_formatted
= true;
3103 r
= dm_set_target_max_io_len(ti
, 1U << ic
->sb
->log2_interleave_sectors
);
3107 if (!ic
->internal_hash
)
3108 dm_integrity_set(ti
, ic
);
3110 ti
->num_flush_bios
= 1;
3111 ti
->flush_supported
= true;
3115 dm_integrity_dtr(ti
);
3119 static void dm_integrity_dtr(struct dm_target
*ti
)
3121 struct dm_integrity_c
*ic
= ti
->private;
3123 BUG_ON(!RB_EMPTY_ROOT(&ic
->in_progress
));
3125 if (ic
->metadata_wq
)
3126 destroy_workqueue(ic
->metadata_wq
);
3128 destroy_workqueue(ic
->wait_wq
);
3130 destroy_workqueue(ic
->commit_wq
);
3132 destroy_workqueue(ic
->writer_wq
);
3134 dm_bufio_client_destroy(ic
->bufio
);
3135 mempool_destroy(ic
->journal_io_mempool
);
3137 dm_io_client_destroy(ic
->io
);
3139 dm_put_device(ti
, ic
->dev
);
3140 dm_integrity_free_page_list(ic
, ic
->journal
);
3141 dm_integrity_free_page_list(ic
, ic
->journal_io
);
3142 dm_integrity_free_page_list(ic
, ic
->journal_xor
);
3143 if (ic
->journal_scatterlist
)
3144 dm_integrity_free_journal_scatterlist(ic
, ic
->journal_scatterlist
);
3145 if (ic
->journal_io_scatterlist
)
3146 dm_integrity_free_journal_scatterlist(ic
, ic
->journal_io_scatterlist
);
3147 if (ic
->sk_requests
) {
3150 for (i
= 0; i
< ic
->journal_sections
; i
++) {
3151 struct skcipher_request
*req
= ic
->sk_requests
[i
];
3154 skcipher_request_free(req
);
3157 kvfree(ic
->sk_requests
);
3159 kvfree(ic
->journal_tree
);
3161 free_pages_exact(ic
->sb
, SB_SECTORS
<< SECTOR_SHIFT
);
3163 if (ic
->internal_hash
)
3164 crypto_free_shash(ic
->internal_hash
);
3165 free_alg(&ic
->internal_hash_alg
);
3167 if (ic
->journal_crypt
)
3168 crypto_free_skcipher(ic
->journal_crypt
);
3169 free_alg(&ic
->journal_crypt_alg
);
3171 if (ic
->journal_mac
)
3172 crypto_free_shash(ic
->journal_mac
);
3173 free_alg(&ic
->journal_mac_alg
);
3178 static struct target_type integrity_target
= {
3179 .name
= "integrity",
3180 .version
= {1, 0, 0},
3181 .module
= THIS_MODULE
,
3182 .features
= DM_TARGET_SINGLETON
| DM_TARGET_INTEGRITY
,
3183 .ctr
= dm_integrity_ctr
,
3184 .dtr
= dm_integrity_dtr
,
3185 .map
= dm_integrity_map
,
3186 .postsuspend
= dm_integrity_postsuspend
,
3187 .resume
= dm_integrity_resume
,
3188 .status
= dm_integrity_status
,
3189 .iterate_devices
= dm_integrity_iterate_devices
,
3190 .io_hints
= dm_integrity_io_hints
,
3193 int __init
dm_integrity_init(void)
3197 journal_io_cache
= kmem_cache_create("integrity_journal_io",
3198 sizeof(struct journal_io
), 0, 0, NULL
);
3199 if (!journal_io_cache
) {
3200 DMERR("can't allocate journal io cache");
3204 r
= dm_register_target(&integrity_target
);
3207 DMERR("register failed %d", r
);
3212 void dm_integrity_exit(void)
3214 dm_unregister_target(&integrity_target
);
3215 kmem_cache_destroy(journal_io_cache
);
3218 module_init(dm_integrity_init
);
3219 module_exit(dm_integrity_exit
);
3221 MODULE_AUTHOR("Milan Broz");
3222 MODULE_AUTHOR("Mikulas Patocka");
3223 MODULE_DESCRIPTION(DM_NAME
" target for integrity tags extension");
3224 MODULE_LICENSE("GPL");