]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/md/dm-integrity.c
Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-hirsute-kernel.git] / drivers / md / dm-integrity.c
CommitLineData
7eada909
MP
1/*
2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
5 *
6 * This file is released under the GPL.
7 */
8
248aa264
MS
9#include "dm-bio-record.h"
10
d3e632f0 11#include <linux/compiler.h>
7eada909
MP
12#include <linux/module.h>
13#include <linux/device-mapper.h>
14#include <linux/dm-io.h>
15#include <linux/vmalloc.h>
16#include <linux/sort.h>
17#include <linux/rbtree.h>
18#include <linux/delay.h>
19#include <linux/random.h>
1f5a7759 20#include <linux/reboot.h>
7eada909
MP
21#include <crypto/hash.h>
22#include <crypto/skcipher.h>
23#include <linux/async_tx.h>
afa53df8 24#include <linux/dm-bufio.h>
7eada909
MP
25
26#define DM_MSG_PREFIX "integrity"
27
28#define DEFAULT_INTERLEAVE_SECTORS 32768
29#define DEFAULT_JOURNAL_SIZE_FACTOR 7
468dfca3 30#define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
7eada909
MP
31#define DEFAULT_BUFFER_SECTORS 128
32#define DEFAULT_JOURNAL_WATERMARK 50
33#define DEFAULT_SYNC_MSEC 10000
34#define DEFAULT_MAX_JOURNAL_SECTORS 131072
56b67a4f
MP
35#define MIN_LOG2_INTERLEAVE_SECTORS 3
36#define MAX_LOG2_INTERLEAVE_SECTORS 31
7eada909 37#define METADATA_WORKQUEUE_MAX_ACTIVE 16
a3fcf725
MP
38#define RECALC_SECTORS 8192
39#define RECALC_WRITE_SUPER 16
468dfca3
MP
40#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
41#define BITMAP_FLUSH_INTERVAL (10 * HZ)
7eada909
MP
42
43/*
44 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
45 * so it should not be enabled in the official kernel
46 */
47//#define DEBUG_PRINT
48//#define INTERNAL_VERIFY
49
50/*
51 * On disk structures
52 */
53
54#define SB_MAGIC "integrt"
1f9fc0b8
MP
55#define SB_VERSION_1 1
56#define SB_VERSION_2 2
468dfca3 57#define SB_VERSION_3 3
d537858a 58#define SB_VERSION_4 4
7eada909 59#define SB_SECTORS 8
9d609f85 60#define MAX_SECTORS_PER_BLOCK 8
7eada909
MP
61
62struct superblock {
63 __u8 magic[8];
64 __u8 version;
65 __u8 log2_interleave_sectors;
66 __u16 integrity_tag_size;
67 __u32 journal_sections;
68 __u64 provided_data_sectors; /* userspace uses this value */
69 __u32 flags;
9d609f85 70 __u8 log2_sectors_per_block;
468dfca3
MP
71 __u8 log2_blocks_per_bitmap_bit;
72 __u8 pad[2];
a3fcf725 73 __u64 recalc_sector;
7eada909
MP
74};
75
76#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
a3fcf725 77#define SB_FLAG_RECALCULATING 0x2
468dfca3 78#define SB_FLAG_DIRTY_BITMAP 0x4
d537858a 79#define SB_FLAG_FIXED_PADDING 0x8
7eada909
MP
80
81#define JOURNAL_ENTRY_ROUNDUP 8
82
83typedef __u64 commit_id_t;
84#define JOURNAL_MAC_PER_SECTOR 8
85
86struct journal_entry {
87 union {
88 struct {
89 __u32 sector_lo;
90 __u32 sector_hi;
91 } s;
92 __u64 sector;
93 } u;
9d609f85
MP
94 commit_id_t last_bytes[0];
95 /* __u8 tag[0]; */
7eada909
MP
96};
97
9d609f85
MP
98#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
99
7eada909 100#if BITS_PER_LONG == 64
d3e632f0 101#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
7eada909 102#else
72deb455 103#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
7eada909 104#endif
72deb455 105#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
7eada909
MP
106#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
107#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
108#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
109#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
110
111#define JOURNAL_BLOCK_SECTORS 8
112#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
113#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
114
115struct journal_sector {
116 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
117 __u8 mac[JOURNAL_MAC_PER_SECTOR];
118 commit_id_t commit_id;
119};
120
9d609f85 121#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
7eada909
MP
122
123#define METADATA_PADDING_SECTORS 8
124
125#define N_COMMIT_IDS 4
126
127static unsigned char prev_commit_seq(unsigned char seq)
128{
129 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
130}
131
132static unsigned char next_commit_seq(unsigned char seq)
133{
134 return (seq + 1) % N_COMMIT_IDS;
135}
136
137/*
138 * In-memory structures
139 */
140
141struct journal_node {
142 struct rb_node node;
143 sector_t sector;
144};
145
146struct alg_spec {
147 char *alg_string;
148 char *key_string;
149 __u8 *key;
150 unsigned key_size;
151};
152
153struct dm_integrity_c {
154 struct dm_dev *dev;
356d9d52 155 struct dm_dev *meta_dev;
7eada909
MP
156 unsigned tag_size;
157 __s8 log2_tag_size;
158 sector_t start;
6f1c819c 159 mempool_t journal_io_mempool;
7eada909
MP
160 struct dm_io_client *io;
161 struct dm_bufio_client *bufio;
162 struct workqueue_struct *metadata_wq;
163 struct superblock *sb;
164 unsigned journal_pages;
468dfca3
MP
165 unsigned n_bitmap_blocks;
166
7eada909
MP
167 struct page_list *journal;
168 struct page_list *journal_io;
169 struct page_list *journal_xor;
468dfca3
MP
170 struct page_list *recalc_bitmap;
171 struct page_list *may_write_bitmap;
172 struct bitmap_block_status *bbs;
173 unsigned bitmap_flush_interval;
48271493
MP
174 int synchronous_mode;
175 struct bio_list synchronous_bios;
468dfca3 176 struct delayed_work bitmap_flush_work;
7eada909
MP
177
178 struct crypto_skcipher *journal_crypt;
179 struct scatterlist **journal_scatterlist;
180 struct scatterlist **journal_io_scatterlist;
181 struct skcipher_request **sk_requests;
182
183 struct crypto_shash *journal_mac;
184
185 struct journal_node *journal_tree;
186 struct rb_root journal_tree_root;
187
188 sector_t provided_data_sectors;
189
190 unsigned short journal_entry_size;
191 unsigned char journal_entries_per_sector;
192 unsigned char journal_section_entries;
9d609f85 193 unsigned short journal_section_sectors;
7eada909
MP
194 unsigned journal_sections;
195 unsigned journal_entries;
356d9d52
MP
196 sector_t data_device_sectors;
197 sector_t meta_device_sectors;
7eada909
MP
198 unsigned initial_sectors;
199 unsigned metadata_run;
200 __s8 log2_metadata_run;
201 __u8 log2_buffer_sectors;
9d609f85 202 __u8 sectors_per_block;
468dfca3 203 __u8 log2_blocks_per_bitmap_bit;
7eada909
MP
204
205 unsigned char mode;
7eada909
MP
206
207 int failed;
208
209 struct crypto_shash *internal_hash;
210
adc0daad
MP
211 struct dm_target *ti;
212
7eada909
MP
213 /* these variables are locked with endio_wait.lock */
214 struct rb_root in_progress;
724376a0 215 struct list_head wait_list;
7eada909
MP
216 wait_queue_head_t endio_wait;
217 struct workqueue_struct *wait_wq;
53770f0e 218 struct workqueue_struct *offload_wq;
7eada909
MP
219
220 unsigned char commit_seq;
221 commit_id_t commit_ids[N_COMMIT_IDS];
222
223 unsigned committed_section;
224 unsigned n_committed_sections;
225
226 unsigned uncommitted_section;
227 unsigned n_uncommitted_sections;
228
229 unsigned free_section;
230 unsigned char free_section_entry;
231 unsigned free_sectors;
232
233 unsigned free_sectors_threshold;
234
235 struct workqueue_struct *commit_wq;
236 struct work_struct commit_work;
237
238 struct workqueue_struct *writer_wq;
239 struct work_struct writer_work;
240
a3fcf725
MP
241 struct workqueue_struct *recalc_wq;
242 struct work_struct recalc_work;
243 u8 *recalc_buffer;
244 u8 *recalc_tags;
245
7eada909
MP
246 struct bio_list flush_bio_list;
247
248 unsigned long autocommit_jiffies;
249 struct timer_list autocommit_timer;
250 unsigned autocommit_msec;
251
252 wait_queue_head_t copy_to_journal_wait;
253
254 struct completion crypto_backoff;
255
256 bool journal_uptodate;
257 bool just_formatted;
468dfca3 258 bool recalculate_flag;
d537858a 259 bool fix_padding;
7eada909
MP
260
261 struct alg_spec internal_hash_alg;
262 struct alg_spec journal_crypt_alg;
263 struct alg_spec journal_mac_alg;
3f2e5393
MP
264
265 atomic64_t number_of_mismatches;
1f5a7759
MP
266
267 struct notifier_block reboot_notifier;
7eada909
MP
268};
269
270struct dm_integrity_range {
271 sector_t logical_sector;
4f43446d 272 sector_t n_sectors;
724376a0
MP
273 bool waiting;
274 union {
275 struct rb_node node;
276 struct {
277 struct task_struct *task;
278 struct list_head wait_entry;
279 };
280 };
7eada909
MP
281};
282
283struct dm_integrity_io {
284 struct work_struct work;
285
286 struct dm_integrity_c *ic;
287 bool write;
288 bool fua;
289
290 struct dm_integrity_range range;
291
292 sector_t metadata_block;
293 unsigned metadata_offset;
294
295 atomic_t in_flight;
4e4cbee9 296 blk_status_t bi_status;
7eada909
MP
297
298 struct completion *completion;
299
248aa264 300 struct dm_bio_details bio_details;
7eada909
MP
301};
302
303struct journal_completion {
304 struct dm_integrity_c *ic;
305 atomic_t in_flight;
306 struct completion comp;
307};
308
309struct journal_io {
310 struct dm_integrity_range range;
311 struct journal_completion *comp;
312};
313
468dfca3
MP
314struct bitmap_block_status {
315 struct work_struct work;
316 struct dm_integrity_c *ic;
317 unsigned idx;
318 unsigned long *bitmap;
319 struct bio_list bio_queue;
320 spinlock_t bio_queue_lock;
321
322};
323
7eada909
MP
324static struct kmem_cache *journal_io_cache;
325
326#define JOURNAL_IO_MEMPOOL 32
327
328#ifdef DEBUG_PRINT
329#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
330static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
331{
332 va_list args;
333 va_start(args, msg);
334 vprintk(msg, args);
335 va_end(args);
336 if (len)
337 pr_cont(":");
338 while (len) {
339 pr_cont(" %02x", *bytes);
340 bytes++;
341 len--;
342 }
343 pr_cont("\n");
344}
345#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
346#else
347#define DEBUG_print(x, ...) do { } while (0)
348#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
349#endif
350
54d4e6ab
MG
351static void dm_integrity_prepare(struct request *rq)
352{
353}
354
355static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
356{
357}
358
7eada909
MP
359/*
360 * DM Integrity profile, protection is performed layer above (dm-crypt)
361 */
7c373d66 362static const struct blk_integrity_profile dm_integrity_profile = {
7eada909
MP
363 .name = "DM-DIF-EXT-TAG",
364 .generate_fn = NULL,
365 .verify_fn = NULL,
54d4e6ab
MG
366 .prepare_fn = dm_integrity_prepare,
367 .complete_fn = dm_integrity_complete,
7eada909
MP
368};
369
370static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
371static void integrity_bio_wait(struct work_struct *w);
372static void dm_integrity_dtr(struct dm_target *ti);
373
374static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
375{
3f2e5393
MP
376 if (err == -EILSEQ)
377 atomic64_inc(&ic->number_of_mismatches);
7eada909
MP
378 if (!cmpxchg(&ic->failed, 0, err))
379 DMERR("Error on %s: %d", msg, err);
380}
381
382static int dm_integrity_failed(struct dm_integrity_c *ic)
383{
d3e632f0 384 return READ_ONCE(ic->failed);
7eada909
MP
385}
386
387static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
388 unsigned j, unsigned char seq)
389{
390 /*
391 * Xor the number with section and sector, so that if a piece of
392 * journal is written at wrong place, it is detected.
393 */
394 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
395}
396
397static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
398 sector_t *area, sector_t *offset)
399{
356d9d52
MP
400 if (!ic->meta_dev) {
401 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
402 *area = data_sector >> log2_interleave_sectors;
403 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
404 } else {
405 *area = 0;
406 *offset = data_sector;
407 }
7eada909
MP
408}
409
9d609f85
MP
410#define sector_to_block(ic, n) \
411do { \
412 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
413 (n) >>= (ic)->sb->log2_sectors_per_block; \
414} while (0)
415
7eada909
MP
416static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
417 sector_t offset, unsigned *metadata_offset)
418{
419 __u64 ms;
420 unsigned mo;
421
422 ms = area << ic->sb->log2_interleave_sectors;
423 if (likely(ic->log2_metadata_run >= 0))
424 ms += area << ic->log2_metadata_run;
425 else
426 ms += area * ic->metadata_run;
427 ms >>= ic->log2_buffer_sectors;
428
9d609f85
MP
429 sector_to_block(ic, offset);
430
7eada909
MP
431 if (likely(ic->log2_tag_size >= 0)) {
432 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
433 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
434 } else {
435 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
436 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
437 }
438 *metadata_offset = mo;
439 return ms;
440}
441
442static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
443{
444 sector_t result;
445
356d9d52
MP
446 if (ic->meta_dev)
447 return offset;
448
7eada909
MP
449 result = area << ic->sb->log2_interleave_sectors;
450 if (likely(ic->log2_metadata_run >= 0))
451 result += (area + 1) << ic->log2_metadata_run;
452 else
453 result += (area + 1) * ic->metadata_run;
454
455 result += (sector_t)ic->initial_sectors + offset;
71e9ddbc
MP
456 result += ic->start;
457
7eada909
MP
458 return result;
459}
460
461static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
462{
463 if (unlikely(*sec_ptr >= ic->journal_sections))
464 *sec_ptr -= ic->journal_sections;
465}
466
1f9fc0b8
MP
467static void sb_set_version(struct dm_integrity_c *ic)
468{
d537858a
MP
469 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
470 ic->sb->version = SB_VERSION_4;
471 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
468dfca3
MP
472 ic->sb->version = SB_VERSION_3;
473 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
1f9fc0b8
MP
474 ic->sb->version = SB_VERSION_2;
475 else
476 ic->sb->version = SB_VERSION_1;
477}
478
7eada909
MP
479static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
480{
481 struct dm_io_request io_req;
482 struct dm_io_region io_loc;
483
484 io_req.bi_op = op;
485 io_req.bi_op_flags = op_flags;
486 io_req.mem.type = DM_IO_KMEM;
487 io_req.mem.ptr.addr = ic->sb;
488 io_req.notify.fn = NULL;
489 io_req.client = ic->io;
356d9d52 490 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
7eada909
MP
491 io_loc.sector = ic->start;
492 io_loc.count = SB_SECTORS;
493
5f1c56b3
MB
494 if (op == REQ_OP_WRITE)
495 sb_set_version(ic);
496
7eada909
MP
497 return dm_io(&io_req, 1, &io_loc, NULL);
498}
499
468dfca3
MP
500#define BITMAP_OP_TEST_ALL_SET 0
501#define BITMAP_OP_TEST_ALL_CLEAR 1
502#define BITMAP_OP_SET 2
503#define BITMAP_OP_CLEAR 3
504
05d6909e
MS
505static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
506 sector_t sector, sector_t n_sectors, int mode)
468dfca3
MP
507{
508 unsigned long bit, end_bit, this_end_bit, page, end_page;
509 unsigned long *data;
510
511 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
05d6909e 512 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
468dfca3
MP
513 (unsigned long long)sector,
514 (unsigned long long)n_sectors,
515 ic->sb->log2_sectors_per_block,
516 ic->log2_blocks_per_bitmap_bit,
517 mode);
518 BUG();
519 }
520
521 if (unlikely(!n_sectors))
522 return true;
523
524 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
05d6909e
MS
525 end_bit = (sector + n_sectors - 1) >>
526 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
468dfca3
MP
527
528 page = bit / (PAGE_SIZE * 8);
529 bit %= PAGE_SIZE * 8;
530
531 end_page = end_bit / (PAGE_SIZE * 8);
532 end_bit %= PAGE_SIZE * 8;
533
534repeat:
535 if (page < end_page) {
536 this_end_bit = PAGE_SIZE * 8 - 1;
537 } else {
538 this_end_bit = end_bit;
539 }
540
541 data = lowmem_page_address(bitmap[page].page);
542
543 if (mode == BITMAP_OP_TEST_ALL_SET) {
544 while (bit <= this_end_bit) {
545 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
546 do {
547 if (data[bit / BITS_PER_LONG] != -1)
548 return false;
549 bit += BITS_PER_LONG;
550 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
551 continue;
552 }
553 if (!test_bit(bit, data))
554 return false;
555 bit++;
556 }
557 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
558 while (bit <= this_end_bit) {
559 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
560 do {
561 if (data[bit / BITS_PER_LONG] != 0)
562 return false;
563 bit += BITS_PER_LONG;
564 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
565 continue;
566 }
567 if (test_bit(bit, data))
568 return false;
569 bit++;
570 }
571 } else if (mode == BITMAP_OP_SET) {
572 while (bit <= this_end_bit) {
573 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
574 do {
575 data[bit / BITS_PER_LONG] = -1;
576 bit += BITS_PER_LONG;
577 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
578 continue;
579 }
580 __set_bit(bit, data);
581 bit++;
582 }
583 } else if (mode == BITMAP_OP_CLEAR) {
584 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
585 clear_page(data);
586 else while (bit <= this_end_bit) {
587 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
588 do {
589 data[bit / BITS_PER_LONG] = 0;
590 bit += BITS_PER_LONG;
591 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
592 continue;
593 }
594 __clear_bit(bit, data);
595 bit++;
596 }
597 } else {
598 BUG();
599 }
600
601 if (unlikely(page < end_page)) {
602 bit = 0;
603 page++;
604 goto repeat;
605 }
606
607 return true;
608}
609
610static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
611{
612 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
613 unsigned i;
614
615 for (i = 0; i < n_bitmap_pages; i++) {
616 unsigned long *dst_data = lowmem_page_address(dst[i].page);
617 unsigned long *src_data = lowmem_page_address(src[i].page);
618 copy_page(dst_data, src_data);
619 }
620}
621
622static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
623{
624 unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
625 unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
626
627 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
628 return &ic->bbs[bitmap_block];
629}
630
7eada909
MP
631static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
632 bool e, const char *function)
633{
634#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
635 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
636
637 if (unlikely(section >= ic->journal_sections) ||
638 unlikely(offset >= limit)) {
05d6909e
MS
639 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
640 function, section, offset, ic->journal_sections, limit);
7eada909
MP
641 BUG();
642 }
643#endif
644}
645
646static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
647 unsigned *pl_index, unsigned *pl_offset)
648{
649 unsigned sector;
650
56b67a4f 651 access_journal_check(ic, section, offset, false, "page_list_location");
7eada909
MP
652
653 sector = section * ic->journal_section_sectors + offset;
654
655 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
656 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
657}
658
659static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
660 unsigned section, unsigned offset, unsigned *n_sectors)
661{
662 unsigned pl_index, pl_offset;
663 char *va;
664
665 page_list_location(ic, section, offset, &pl_index, &pl_offset);
666
667 if (n_sectors)
668 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
669
670 va = lowmem_page_address(pl[pl_index].page);
671
672 return (struct journal_sector *)(va + pl_offset);
673}
674
675static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
676{
677 return access_page_list(ic, ic->journal, section, offset, NULL);
678}
679
680static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
681{
682 unsigned rel_sector, offset;
683 struct journal_sector *js;
684
685 access_journal_check(ic, section, n, true, "access_journal_entry");
686
687 rel_sector = n % JOURNAL_BLOCK_SECTORS;
688 offset = n / JOURNAL_BLOCK_SECTORS;
689
690 js = access_journal(ic, section, rel_sector);
691 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
692}
693
694static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
695{
9d609f85 696 n <<= ic->sb->log2_sectors_per_block;
7eada909 697
9d609f85
MP
698 n += JOURNAL_BLOCK_SECTORS;
699
700 access_journal_check(ic, section, n, false, "access_journal_data");
701
702 return access_journal(ic, section, n);
7eada909
MP
703}
704
705static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
706{
707 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
708 int r;
709 unsigned j, size;
710
711 desc->tfm = ic->journal_mac;
7eada909
MP
712
713 r = crypto_shash_init(desc);
714 if (unlikely(r)) {
715 dm_integrity_io_error(ic, "crypto_shash_init", r);
716 goto err;
717 }
718
719 for (j = 0; j < ic->journal_section_entries; j++) {
720 struct journal_entry *je = access_journal_entry(ic, section, j);
721 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
722 if (unlikely(r)) {
723 dm_integrity_io_error(ic, "crypto_shash_update", r);
724 goto err;
725 }
726 }
727
728 size = crypto_shash_digestsize(ic->journal_mac);
729
730 if (likely(size <= JOURNAL_MAC_SIZE)) {
731 r = crypto_shash_final(desc, result);
732 if (unlikely(r)) {
733 dm_integrity_io_error(ic, "crypto_shash_final", r);
734 goto err;
735 }
736 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
737 } else {
6d39a124
KC
738 __u8 digest[HASH_MAX_DIGESTSIZE];
739
740 if (WARN_ON(size > sizeof(digest))) {
741 dm_integrity_io_error(ic, "digest_size", -EINVAL);
742 goto err;
743 }
7eada909
MP
744 r = crypto_shash_final(desc, digest);
745 if (unlikely(r)) {
746 dm_integrity_io_error(ic, "crypto_shash_final", r);
747 goto err;
748 }
749 memcpy(result, digest, JOURNAL_MAC_SIZE);
750 }
751
752 return;
753err:
754 memset(result, 0, JOURNAL_MAC_SIZE);
755}
756
757static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
758{
759 __u8 result[JOURNAL_MAC_SIZE];
760 unsigned j;
761
762 if (!ic->journal_mac)
763 return;
764
765 section_mac(ic, section, result);
766
767 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
768 struct journal_sector *js = access_journal(ic, section, j);
769
770 if (likely(wr))
771 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
772 else {
773 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
774 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
775 }
776 }
777}
778
779static void complete_journal_op(void *context)
780{
781 struct journal_completion *comp = context;
782 BUG_ON(!atomic_read(&comp->in_flight));
783 if (likely(atomic_dec_and_test(&comp->in_flight)))
784 complete(&comp->comp);
785}
786
787static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
788 unsigned n_sections, struct journal_completion *comp)
789{
790 struct async_submit_ctl submit;
791 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
792 unsigned pl_index, pl_offset, section_index;
793 struct page_list *source_pl, *target_pl;
794
795 if (likely(encrypt)) {
796 source_pl = ic->journal;
797 target_pl = ic->journal_io;
798 } else {
799 source_pl = ic->journal_io;
800 target_pl = ic->journal;
801 }
802
803 page_list_location(ic, section, 0, &pl_index, &pl_offset);
804
805 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
806
807 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
808
809 section_index = pl_index;
810
811 do {
812 size_t this_step;
813 struct page *src_pages[2];
814 struct page *dst_page;
815
816 while (unlikely(pl_index == section_index)) {
817 unsigned dummy;
818 if (likely(encrypt))
819 rw_section_mac(ic, section, true);
820 section++;
821 n_sections--;
822 if (!n_sections)
823 break;
824 page_list_location(ic, section, 0, &section_index, &dummy);
825 }
826
827 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
828 dst_page = target_pl[pl_index].page;
829 src_pages[0] = source_pl[pl_index].page;
830 src_pages[1] = ic->journal_xor[pl_index].page;
831
832 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
833
834 pl_index++;
835 pl_offset = 0;
836 n_bytes -= this_step;
837 } while (n_bytes);
838
839 BUG_ON(n_sections);
840
841 async_tx_issue_pending_all();
842}
843
844static void complete_journal_encrypt(struct crypto_async_request *req, int err)
845{
846 struct journal_completion *comp = req->data;
847 if (unlikely(err)) {
848 if (likely(err == -EINPROGRESS)) {
849 complete(&comp->ic->crypto_backoff);
850 return;
851 }
852 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
853 }
854 complete_journal_op(comp);
855}
856
857static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
858{
859 int r;
432061b3 860 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
7eada909
MP
861 complete_journal_encrypt, comp);
862 if (likely(encrypt))
863 r = crypto_skcipher_encrypt(req);
864 else
865 r = crypto_skcipher_decrypt(req);
866 if (likely(!r))
867 return false;
868 if (likely(r == -EINPROGRESS))
869 return true;
870 if (likely(r == -EBUSY)) {
871 wait_for_completion(&comp->ic->crypto_backoff);
872 reinit_completion(&comp->ic->crypto_backoff);
873 return true;
874 }
875 dm_integrity_io_error(comp->ic, "encrypt", r);
876 return false;
877}
878
879static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
880 unsigned n_sections, struct journal_completion *comp)
881{
882 struct scatterlist **source_sg;
883 struct scatterlist **target_sg;
884
885 atomic_add(2, &comp->in_flight);
886
887 if (likely(encrypt)) {
888 source_sg = ic->journal_scatterlist;
889 target_sg = ic->journal_io_scatterlist;
890 } else {
891 source_sg = ic->journal_io_scatterlist;
892 target_sg = ic->journal_scatterlist;
893 }
894
895 do {
896 struct skcipher_request *req;
897 unsigned ivsize;
898 char *iv;
899
900 if (likely(encrypt))
901 rw_section_mac(ic, section, true);
902
903 req = ic->sk_requests[section];
904 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
905 iv = req->iv;
906
907 memcpy(iv, iv + ivsize, ivsize);
908
909 req->src = source_sg[section];
910 req->dst = target_sg[section];
911
912 if (unlikely(do_crypt(encrypt, req, comp)))
913 atomic_inc(&comp->in_flight);
914
915 section++;
916 n_sections--;
917 } while (n_sections);
918
919 atomic_dec(&comp->in_flight);
920 complete_journal_op(comp);
921}
922
923static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
924 unsigned n_sections, struct journal_completion *comp)
925{
926 if (ic->journal_xor)
927 return xor_journal(ic, encrypt, section, n_sections, comp);
928 else
929 return crypt_journal(ic, encrypt, section, n_sections, comp);
930}
931
932static void complete_journal_io(unsigned long error, void *context)
933{
934 struct journal_completion *comp = context;
935 if (unlikely(error != 0))
936 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
937 complete_journal_op(comp);
938}
939
981e8a98
MP
940static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
941 unsigned sector, unsigned n_sectors, struct journal_completion *comp)
7eada909
MP
942{
943 struct dm_io_request io_req;
944 struct dm_io_region io_loc;
981e8a98 945 unsigned pl_index, pl_offset;
7eada909
MP
946 int r;
947
948 if (unlikely(dm_integrity_failed(ic))) {
949 if (comp)
950 complete_journal_io(-1UL, comp);
951 return;
952 }
953
7eada909
MP
954 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
955 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
956
957 io_req.bi_op = op;
958 io_req.bi_op_flags = op_flags;
959 io_req.mem.type = DM_IO_PAGE_LIST;
960 if (ic->journal_io)
961 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
962 else
963 io_req.mem.ptr.pl = &ic->journal[pl_index];
964 io_req.mem.offset = pl_offset;
965 if (likely(comp != NULL)) {
966 io_req.notify.fn = complete_journal_io;
967 io_req.notify.context = comp;
968 } else {
969 io_req.notify.fn = NULL;
970 }
971 io_req.client = ic->io;
356d9d52 972 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
7eada909
MP
973 io_loc.sector = ic->start + SB_SECTORS + sector;
974 io_loc.count = n_sectors;
975
976 r = dm_io(&io_req, 1, &io_loc, NULL);
977 if (unlikely(r)) {
978 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
979 if (comp) {
980 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
981 complete_journal_io(-1UL, comp);
982 }
983 }
984}
985
981e8a98
MP
986static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
987 unsigned n_sections, struct journal_completion *comp)
988{
989 unsigned sector, n_sectors;
990
991 sector = section * ic->journal_section_sectors;
992 n_sectors = n_sections * ic->journal_section_sectors;
993
994 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
995}
996
7eada909
MP
997static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
998{
999 struct journal_completion io_comp;
1000 struct journal_completion crypt_comp_1;
1001 struct journal_completion crypt_comp_2;
1002 unsigned i;
1003
1004 io_comp.ic = ic;
b5e8ad92 1005 init_completion(&io_comp.comp);
7eada909
MP
1006
1007 if (commit_start + commit_sections <= ic->journal_sections) {
1008 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1009 if (ic->journal_io) {
1010 crypt_comp_1.ic = ic;
b5e8ad92 1011 init_completion(&crypt_comp_1.comp);
7eada909
MP
1012 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1013 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1014 wait_for_completion_io(&crypt_comp_1.comp);
1015 } else {
1016 for (i = 0; i < commit_sections; i++)
1017 rw_section_mac(ic, commit_start + i, true);
1018 }
ff0361b3
JK
1019 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1020 commit_sections, &io_comp);
7eada909
MP
1021 } else {
1022 unsigned to_end;
1023 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1024 to_end = ic->journal_sections - commit_start;
1025 if (ic->journal_io) {
1026 crypt_comp_1.ic = ic;
b5e8ad92 1027 init_completion(&crypt_comp_1.comp);
7eada909
MP
1028 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1029 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1030 if (try_wait_for_completion(&crypt_comp_1.comp)) {
1031 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
b5e8ad92 1032 reinit_completion(&crypt_comp_1.comp);
7eada909
MP
1033 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1034 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1035 wait_for_completion_io(&crypt_comp_1.comp);
1036 } else {
1037 crypt_comp_2.ic = ic;
b5e8ad92 1038 init_completion(&crypt_comp_2.comp);
7eada909
MP
1039 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1040 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1041 wait_for_completion_io(&crypt_comp_1.comp);
1042 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1043 wait_for_completion_io(&crypt_comp_2.comp);
1044 }
1045 } else {
1046 for (i = 0; i < to_end; i++)
1047 rw_section_mac(ic, commit_start + i, true);
1048 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1049 for (i = 0; i < commit_sections - to_end; i++)
1050 rw_section_mac(ic, i, true);
1051 }
1052 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1053 }
1054
1055 wait_for_completion_io(&io_comp.comp);
1056}
1057
1058static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1059 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1060{
1061 struct dm_io_request io_req;
1062 struct dm_io_region io_loc;
1063 int r;
1064 unsigned sector, pl_index, pl_offset;
1065
9d609f85
MP
1066 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1067
7eada909
MP
1068 if (unlikely(dm_integrity_failed(ic))) {
1069 fn(-1UL, data);
1070 return;
1071 }
1072
1073 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1074
1075 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1076 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1077
1078 io_req.bi_op = REQ_OP_WRITE;
1079 io_req.bi_op_flags = 0;
1080 io_req.mem.type = DM_IO_PAGE_LIST;
1081 io_req.mem.ptr.pl = &ic->journal[pl_index];
1082 io_req.mem.offset = pl_offset;
1083 io_req.notify.fn = fn;
1084 io_req.notify.context = data;
1085 io_req.client = ic->io;
1086 io_loc.bdev = ic->dev->bdev;
71e9ddbc 1087 io_loc.sector = target;
7eada909
MP
1088 io_loc.count = n_sectors;
1089
1090 r = dm_io(&io_req, 1, &io_loc, NULL);
1091 if (unlikely(r)) {
1092 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1093 fn(-1UL, data);
1094 }
1095}
1096
724376a0
MP
1097static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1098{
1099 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
4ed319c6 1100 range1->logical_sector + range1->n_sectors > range2->logical_sector;
724376a0
MP
1101}
1102
1103static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
7eada909
MP
1104{
1105 struct rb_node **n = &ic->in_progress.rb_node;
1106 struct rb_node *parent;
1107
9d609f85
MP
1108 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1109
724376a0
MP
1110 if (likely(check_waiting)) {
1111 struct dm_integrity_range *range;
1112 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1113 if (unlikely(ranges_overlap(range, new_range)))
1114 return false;
1115 }
1116 }
1117
7eada909
MP
1118 parent = NULL;
1119
1120 while (*n) {
1121 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1122
1123 parent = *n;
1124 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1125 n = &range->node.rb_left;
1126 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1127 n = &range->node.rb_right;
1128 } else {
1129 return false;
1130 }
1131 }
1132
1133 rb_link_node(&new_range->node, parent, n);
1134 rb_insert_color(&new_range->node, &ic->in_progress);
1135
1136 return true;
1137}
1138
1139static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1140{
1141 rb_erase(&range->node, &ic->in_progress);
724376a0
MP
1142 while (unlikely(!list_empty(&ic->wait_list))) {
1143 struct dm_integrity_range *last_range =
1144 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1145 struct task_struct *last_range_task;
724376a0
MP
1146 last_range_task = last_range->task;
1147 list_del(&last_range->wait_entry);
1148 if (!add_new_range(ic, last_range, false)) {
1149 last_range->task = last_range_task;
1150 list_add(&last_range->wait_entry, &ic->wait_list);
1151 break;
1152 }
1153 last_range->waiting = false;
1154 wake_up_process(last_range_task);
1155 }
7eada909
MP
1156}
1157
1158static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1159{
1160 unsigned long flags;
1161
1162 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1163 remove_range_unlocked(ic, range);
1164 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1165}
1166
724376a0
MP
1167static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1168{
1169 new_range->waiting = true;
1170 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1171 new_range->task = current;
1172 do {
1173 __set_current_state(TASK_UNINTERRUPTIBLE);
1174 spin_unlock_irq(&ic->endio_wait.lock);
1175 io_schedule();
1176 spin_lock_irq(&ic->endio_wait.lock);
1177 } while (unlikely(new_range->waiting));
1178}
1179
8b3bbd49
MP
1180static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1181{
1182 if (unlikely(!add_new_range(ic, new_range, true)))
1183 wait_and_add_new_range(ic, new_range);
1184}
1185
7eada909
MP
1186static void init_journal_node(struct journal_node *node)
1187{
1188 RB_CLEAR_NODE(&node->node);
1189 node->sector = (sector_t)-1;
1190}
1191
1192static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1193{
1194 struct rb_node **link;
1195 struct rb_node *parent;
1196
1197 node->sector = sector;
1198 BUG_ON(!RB_EMPTY_NODE(&node->node));
1199
1200 link = &ic->journal_tree_root.rb_node;
1201 parent = NULL;
1202
1203 while (*link) {
1204 struct journal_node *j;
1205 parent = *link;
1206 j = container_of(parent, struct journal_node, node);
1207 if (sector < j->sector)
1208 link = &j->node.rb_left;
1209 else
1210 link = &j->node.rb_right;
1211 }
1212
1213 rb_link_node(&node->node, parent, link);
1214 rb_insert_color(&node->node, &ic->journal_tree_root);
1215}
1216
1217static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1218{
1219 BUG_ON(RB_EMPTY_NODE(&node->node));
1220 rb_erase(&node->node, &ic->journal_tree_root);
1221 init_journal_node(node);
1222}
1223
1224#define NOT_FOUND (-1U)
1225
1226static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1227{
1228 struct rb_node *n = ic->journal_tree_root.rb_node;
1229 unsigned found = NOT_FOUND;
1230 *next_sector = (sector_t)-1;
1231 while (n) {
1232 struct journal_node *j = container_of(n, struct journal_node, node);
1233 if (sector == j->sector) {
1234 found = j - ic->journal_tree;
1235 }
1236 if (sector < j->sector) {
1237 *next_sector = j->sector;
1238 n = j->node.rb_left;
1239 } else {
1240 n = j->node.rb_right;
1241 }
1242 }
1243
1244 return found;
1245}
1246
1247static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1248{
1249 struct journal_node *node, *next_node;
1250 struct rb_node *next;
1251
1252 if (unlikely(pos >= ic->journal_entries))
1253 return false;
1254 node = &ic->journal_tree[pos];
1255 if (unlikely(RB_EMPTY_NODE(&node->node)))
1256 return false;
1257 if (unlikely(node->sector != sector))
1258 return false;
1259
1260 next = rb_next(&node->node);
1261 if (unlikely(!next))
1262 return true;
1263
1264 next_node = container_of(next, struct journal_node, node);
1265 return next_node->sector != sector;
1266}
1267
1268static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1269{
1270 struct rb_node *next;
1271 struct journal_node *next_node;
1272 unsigned next_section;
1273
1274 BUG_ON(RB_EMPTY_NODE(&node->node));
1275
1276 next = rb_next(&node->node);
1277 if (unlikely(!next))
1278 return false;
1279
1280 next_node = container_of(next, struct journal_node, node);
1281
1282 if (next_node->sector != node->sector)
1283 return false;
1284
1285 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1286 if (next_section >= ic->committed_section &&
1287 next_section < ic->committed_section + ic->n_committed_sections)
1288 return true;
1289 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1290 return true;
1291
1292 return false;
1293}
1294
1295#define TAG_READ 0
1296#define TAG_WRITE 1
1297#define TAG_CMP 2
1298
1299static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1300 unsigned *metadata_offset, unsigned total_size, int op)
1301{
1302 do {
1303 unsigned char *data, *dp;
1304 struct dm_buffer *b;
1305 unsigned to_copy;
1306 int r;
1307
1308 r = dm_integrity_failed(ic);
1309 if (unlikely(r))
1310 return r;
1311
1312 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
5e3d0e37 1313 if (IS_ERR(data))
7eada909
MP
1314 return PTR_ERR(data);
1315
1316 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1317 dp = data + *metadata_offset;
1318 if (op == TAG_READ) {
1319 memcpy(tag, dp, to_copy);
1320 } else if (op == TAG_WRITE) {
1321 memcpy(dp, tag, to_copy);
1e3b21c6 1322 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
7eada909
MP
1323 } else {
1324 /* e.g.: op == TAG_CMP */
1325 if (unlikely(memcmp(dp, tag, to_copy))) {
1326 unsigned i;
1327
1328 for (i = 0; i < to_copy; i++) {
1329 if (dp[i] != tag[i])
1330 break;
1331 total_size--;
1332 }
1333 dm_bufio_release(b);
1334 return total_size;
1335 }
1336 }
1337 dm_bufio_release(b);
1338
1339 tag += to_copy;
1340 *metadata_offset += to_copy;
1341 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1342 (*metadata_block)++;
1343 *metadata_offset = 0;
1344 }
1345 total_size -= to_copy;
1346 } while (unlikely(total_size));
1347
1348 return 0;
1349}
1350
1351static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1352{
1353 int r;
1354 r = dm_bufio_write_dirty_buffers(ic->bufio);
1355 if (unlikely(r))
1356 dm_integrity_io_error(ic, "writing tags", r);
1357}
1358
1359static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1360{
1361 DECLARE_WAITQUEUE(wait, current);
1362 __add_wait_queue(&ic->endio_wait, &wait);
1363 __set_current_state(TASK_UNINTERRUPTIBLE);
1364 spin_unlock_irq(&ic->endio_wait.lock);
1365 io_schedule();
1366 spin_lock_irq(&ic->endio_wait.lock);
1367 __remove_wait_queue(&ic->endio_wait, &wait);
1368}
1369
8376d3c1 1370static void autocommit_fn(struct timer_list *t)
7eada909 1371{
8376d3c1 1372 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
7eada909
MP
1373
1374 if (likely(!dm_integrity_failed(ic)))
1375 queue_work(ic->commit_wq, &ic->commit_work);
1376}
1377
1378static void schedule_autocommit(struct dm_integrity_c *ic)
1379{
1380 if (!timer_pending(&ic->autocommit_timer))
1381 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1382}
1383
1384static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1385{
1386 struct bio *bio;
7def52b7
MS
1387 unsigned long flags;
1388
1389 spin_lock_irqsave(&ic->endio_wait.lock, flags);
7eada909
MP
1390 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1391 bio_list_add(&ic->flush_bio_list, bio);
7def52b7
MS
1392 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1393
7eada909
MP
1394 queue_work(ic->commit_wq, &ic->commit_work);
1395}
1396
1397static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1398{
1399 int r = dm_integrity_failed(ic);
4e4cbee9
CH
1400 if (unlikely(r) && !bio->bi_status)
1401 bio->bi_status = errno_to_blk_status(r);
48271493
MP
1402 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1403 unsigned long flags;
1404 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1405 bio_list_add(&ic->synchronous_bios, bio);
1406 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1407 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1408 return;
1409 }
7eada909
MP
1410 bio_endio(bio);
1411}
1412
1413static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1414{
1415 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1416
4e4cbee9 1417 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
7eada909
MP
1418 submit_flush_bio(ic, dio);
1419 else
1420 do_endio(ic, bio);
1421}
1422
1423static void dec_in_flight(struct dm_integrity_io *dio)
1424{
1425 if (atomic_dec_and_test(&dio->in_flight)) {
1426 struct dm_integrity_c *ic = dio->ic;
1427 struct bio *bio;
1428
1429 remove_range(ic, &dio->range);
1430
1431 if (unlikely(dio->write))
1432 schedule_autocommit(ic);
1433
1434 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1435
4e4cbee9
CH
1436 if (unlikely(dio->bi_status) && !bio->bi_status)
1437 bio->bi_status = dio->bi_status;
1438 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
7eada909
MP
1439 dio->range.logical_sector += dio->range.n_sectors;
1440 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1441 INIT_WORK(&dio->work, integrity_bio_wait);
53770f0e 1442 queue_work(ic->offload_wq, &dio->work);
7eada909
MP
1443 return;
1444 }
1445 do_endio_flush(ic, dio);
1446 }
1447}
1448
1449static void integrity_end_io(struct bio *bio)
1450{
1451 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1452
248aa264
MS
1453 dm_bio_restore(&dio->bio_details, bio);
1454 if (bio->bi_integrity)
7eada909 1455 bio->bi_opf |= REQ_INTEGRITY;
7eada909
MP
1456
1457 if (dio->completion)
1458 complete(dio->completion);
1459
1460 dec_in_flight(dio);
1461}
1462
1463static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1464 const char *data, char *result)
1465{
1466 __u64 sector_le = cpu_to_le64(sector);
1467 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1468 int r;
1469 unsigned digest_size;
1470
1471 req->tfm = ic->internal_hash;
7eada909
MP
1472
1473 r = crypto_shash_init(req);
1474 if (unlikely(r < 0)) {
1475 dm_integrity_io_error(ic, "crypto_shash_init", r);
1476 goto failed;
1477 }
1478
1479 r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1480 if (unlikely(r < 0)) {
1481 dm_integrity_io_error(ic, "crypto_shash_update", r);
1482 goto failed;
1483 }
1484
9d609f85 1485 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
1486 if (unlikely(r < 0)) {
1487 dm_integrity_io_error(ic, "crypto_shash_update", r);
1488 goto failed;
1489 }
1490
1491 r = crypto_shash_final(req, result);
1492 if (unlikely(r < 0)) {
1493 dm_integrity_io_error(ic, "crypto_shash_final", r);
1494 goto failed;
1495 }
1496
1497 digest_size = crypto_shash_digestsize(ic->internal_hash);
1498 if (unlikely(digest_size < ic->tag_size))
1499 memset(result + digest_size, 0, ic->tag_size - digest_size);
1500
1501 return;
1502
1503failed:
1504 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1505 get_random_bytes(result, ic->tag_size);
1506}
1507
1508static void integrity_metadata(struct work_struct *w)
1509{
1510 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1511 struct dm_integrity_c *ic = dio->ic;
1512
1513 int r;
1514
1515 if (ic->internal_hash) {
1516 struct bvec_iter iter;
1517 struct bio_vec bv;
1518 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1519 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1520 char *checksums;
56b67a4f 1521 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
6d39a124 1522 char checksums_onstack[HASH_MAX_DIGESTSIZE];
7eada909
MP
1523 unsigned sectors_to_process = dio->range.n_sectors;
1524 sector_t sector = dio->range.logical_sector;
1525
c2bcb2b7
MP
1526 if (unlikely(ic->mode == 'R'))
1527 goto skip_io;
1528
9d609f85 1529 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
7eada909 1530 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
6d39a124 1531 if (!checksums) {
7eada909 1532 checksums = checksums_onstack;
6d39a124
KC
1533 if (WARN_ON(extra_space &&
1534 digest_size > sizeof(checksums_onstack))) {
1535 r = -EINVAL;
1536 goto error;
1537 }
1538 }
7eada909 1539
248aa264 1540 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
7eada909
MP
1541 unsigned pos;
1542 char *mem, *checksums_ptr;
1543
1544again:
1545 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1546 pos = 0;
1547 checksums_ptr = checksums;
1548 do {
1549 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1550 checksums_ptr += ic->tag_size;
9d609f85
MP
1551 sectors_to_process -= ic->sectors_per_block;
1552 pos += ic->sectors_per_block << SECTOR_SHIFT;
1553 sector += ic->sectors_per_block;
7eada909
MP
1554 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1555 kunmap_atomic(mem);
1556
1557 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1558 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1559 if (unlikely(r)) {
1560 if (r > 0) {
22555744
MP
1561 DMERR_LIMIT("Checksum failed at sector 0x%llx",
1562 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
7eada909 1563 r = -EILSEQ;
3f2e5393 1564 atomic64_inc(&ic->number_of_mismatches);
7eada909
MP
1565 }
1566 if (likely(checksums != checksums_onstack))
1567 kfree(checksums);
1568 goto error;
1569 }
1570
1571 if (!sectors_to_process)
1572 break;
1573
1574 if (unlikely(pos < bv.bv_len)) {
1575 bv.bv_offset += pos;
1576 bv.bv_len -= pos;
1577 goto again;
1578 }
1579 }
1580
1581 if (likely(checksums != checksums_onstack))
1582 kfree(checksums);
1583 } else {
248aa264 1584 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
7eada909
MP
1585
1586 if (bip) {
1587 struct bio_vec biv;
1588 struct bvec_iter iter;
9d609f85
MP
1589 unsigned data_to_process = dio->range.n_sectors;
1590 sector_to_block(ic, data_to_process);
1591 data_to_process *= ic->tag_size;
7eada909
MP
1592
1593 bip_for_each_vec(biv, bip, iter) {
1594 unsigned char *tag;
1595 unsigned this_len;
1596
1597 BUG_ON(PageHighMem(biv.bv_page));
1598 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1599 this_len = min(biv.bv_len, data_to_process);
1600 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1601 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1602 if (unlikely(r))
1603 goto error;
1604 data_to_process -= this_len;
1605 if (!data_to_process)
1606 break;
1607 }
1608 }
1609 }
c2bcb2b7 1610skip_io:
7eada909
MP
1611 dec_in_flight(dio);
1612 return;
1613error:
4e4cbee9 1614 dio->bi_status = errno_to_blk_status(r);
7eada909
MP
1615 dec_in_flight(dio);
1616}
1617
1618static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1619{
1620 struct dm_integrity_c *ic = ti->private;
1621 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
9d609f85 1622 struct bio_integrity_payload *bip;
7eada909
MP
1623
1624 sector_t area, offset;
1625
1626 dio->ic = ic;
4e4cbee9 1627 dio->bi_status = 0;
7eada909
MP
1628
1629 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1630 submit_flush_bio(ic, dio);
1631 return DM_MAPIO_SUBMITTED;
1632 }
1633
1634 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1635 dio->write = bio_op(bio) == REQ_OP_WRITE;
1636 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1637 if (unlikely(dio->fua)) {
1638 /*
1639 * Don't pass down the FUA flag because we have to flush
1640 * disk cache anyway.
1641 */
1642 bio->bi_opf &= ~REQ_FUA;
1643 }
1644 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1645 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1646 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1647 (unsigned long long)ic->provided_data_sectors);
846785e6 1648 return DM_MAPIO_KILL;
7eada909 1649 }
9d609f85
MP
1650 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1651 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1652 ic->sectors_per_block,
1653 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
846785e6 1654 return DM_MAPIO_KILL;
9d609f85
MP
1655 }
1656
1657 if (ic->sectors_per_block > 1) {
1658 struct bvec_iter iter;
1659 struct bio_vec bv;
1660 bio_for_each_segment(bv, bio, iter) {
95b1369a 1661 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
9d609f85
MP
1662 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1663 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
846785e6 1664 return DM_MAPIO_KILL;
9d609f85
MP
1665 }
1666 }
1667 }
1668
1669 bip = bio_integrity(bio);
1670 if (!ic->internal_hash) {
1671 if (bip) {
1672 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1673 if (ic->log2_tag_size >= 0)
1674 wanted_tag_size <<= ic->log2_tag_size;
1675 else
1676 wanted_tag_size *= ic->tag_size;
1677 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
05d6909e
MS
1678 DMERR("Invalid integrity data size %u, expected %u",
1679 bip->bip_iter.bi_size, wanted_tag_size);
846785e6 1680 return DM_MAPIO_KILL;
9d609f85
MP
1681 }
1682 }
1683 } else {
1684 if (unlikely(bip != NULL)) {
1685 DMERR("Unexpected integrity data when using internal hash");
846785e6 1686 return DM_MAPIO_KILL;
9d609f85
MP
1687 }
1688 }
7eada909 1689
c2bcb2b7 1690 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
846785e6 1691 return DM_MAPIO_KILL;
c2bcb2b7 1692
7eada909
MP
1693 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1694 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1695 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1696
1697 dm_integrity_map_continue(dio, true);
1698 return DM_MAPIO_SUBMITTED;
1699}
1700
1701static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1702 unsigned journal_section, unsigned journal_entry)
1703{
1704 struct dm_integrity_c *ic = dio->ic;
1705 sector_t logical_sector;
1706 unsigned n_sectors;
1707
1708 logical_sector = dio->range.logical_sector;
1709 n_sectors = dio->range.n_sectors;
1710 do {
1711 struct bio_vec bv = bio_iovec(bio);
1712 char *mem;
1713
1714 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1715 bv.bv_len = n_sectors << SECTOR_SHIFT;
1716 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1717 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1718retry_kmap:
1719 mem = kmap_atomic(bv.bv_page);
1720 if (likely(dio->write))
1721 flush_dcache_page(bv.bv_page);
1722
1723 do {
1724 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1725
1726 if (unlikely(!dio->write)) {
1727 struct journal_sector *js;
9d609f85
MP
1728 char *mem_ptr;
1729 unsigned s;
7eada909
MP
1730
1731 if (unlikely(journal_entry_is_inprogress(je))) {
1732 flush_dcache_page(bv.bv_page);
1733 kunmap_atomic(mem);
1734
1735 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1736 goto retry_kmap;
1737 }
1738 smp_rmb();
1739 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1740 js = access_journal_data(ic, journal_section, journal_entry);
9d609f85
MP
1741 mem_ptr = mem + bv.bv_offset;
1742 s = 0;
1743 do {
1744 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1745 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1746 js++;
1747 mem_ptr += 1 << SECTOR_SHIFT;
1748 } while (++s < ic->sectors_per_block);
7eada909
MP
1749#ifdef INTERNAL_VERIFY
1750 if (ic->internal_hash) {
6d39a124 1751 char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
7eada909
MP
1752
1753 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
9d609f85 1754 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
22555744
MP
1755 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1756 (unsigned long long)logical_sector);
7eada909
MP
1757 }
1758 }
1759#endif
1760 }
1761
1762 if (!ic->internal_hash) {
1763 struct bio_integrity_payload *bip = bio_integrity(bio);
1764 unsigned tag_todo = ic->tag_size;
9d609f85 1765 char *tag_ptr = journal_entry_tag(ic, je);
7eada909
MP
1766
1767 if (bip) do {
1768 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1769 unsigned tag_now = min(biv.bv_len, tag_todo);
1770 char *tag_addr;
1771 BUG_ON(PageHighMem(biv.bv_page));
1772 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1773 if (likely(dio->write))
1774 memcpy(tag_ptr, tag_addr, tag_now);
1775 else
1776 memcpy(tag_addr, tag_ptr, tag_now);
1777 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1778 tag_ptr += tag_now;
1779 tag_todo -= tag_now;
1780 } while (unlikely(tag_todo)); else {
1781 if (likely(dio->write))
1782 memset(tag_ptr, 0, tag_todo);
1783 }
1784 }
1785
1786 if (likely(dio->write)) {
1787 struct journal_sector *js;
9d609f85 1788 unsigned s;
7eada909
MP
1789
1790 js = access_journal_data(ic, journal_section, journal_entry);
9d609f85
MP
1791 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1792
1793 s = 0;
1794 do {
1795 je->last_bytes[s] = js[s].commit_id;
1796 } while (++s < ic->sectors_per_block);
7eada909
MP
1797
1798 if (ic->internal_hash) {
1799 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1800 if (unlikely(digest_size > ic->tag_size)) {
6d39a124 1801 char checksums_onstack[HASH_MAX_DIGESTSIZE];
7eada909 1802 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
9d609f85 1803 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
7eada909 1804 } else
9d609f85 1805 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
7eada909
MP
1806 }
1807
1808 journal_entry_set_sector(je, logical_sector);
1809 }
9d609f85 1810 logical_sector += ic->sectors_per_block;
7eada909
MP
1811
1812 journal_entry++;
1813 if (unlikely(journal_entry == ic->journal_section_entries)) {
1814 journal_entry = 0;
1815 journal_section++;
1816 wraparound_section(ic, &journal_section);
1817 }
1818
9d609f85
MP
1819 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1820 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
1821
1822 if (unlikely(!dio->write))
1823 flush_dcache_page(bv.bv_page);
1824 kunmap_atomic(mem);
1825 } while (n_sectors);
1826
1827 if (likely(dio->write)) {
1828 smp_mb();
1829 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1830 wake_up(&ic->copy_to_journal_wait);
d3e632f0 1831 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
7eada909
MP
1832 queue_work(ic->commit_wq, &ic->commit_work);
1833 } else {
1834 schedule_autocommit(ic);
1835 }
1836 } else {
1837 remove_range(ic, &dio->range);
1838 }
1839
1840 if (unlikely(bio->bi_iter.bi_size)) {
1841 sector_t area, offset;
1842
1843 dio->range.logical_sector = logical_sector;
1844 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1845 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1846 return true;
1847 }
1848
1849 return false;
1850}
1851
1852static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1853{
1854 struct dm_integrity_c *ic = dio->ic;
1855 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1856 unsigned journal_section, journal_entry;
1857 unsigned journal_read_pos;
1858 struct completion read_comp;
1859 bool need_sync_io = ic->internal_hash && !dio->write;
1860
1861 if (need_sync_io && from_map) {
1862 INIT_WORK(&dio->work, integrity_bio_wait);
53770f0e 1863 queue_work(ic->offload_wq, &dio->work);
7eada909
MP
1864 return;
1865 }
1866
1867lock_retry:
1868 spin_lock_irq(&ic->endio_wait.lock);
1869retry:
1870 if (unlikely(dm_integrity_failed(ic))) {
1871 spin_unlock_irq(&ic->endio_wait.lock);
1872 do_endio(ic, bio);
1873 return;
1874 }
1875 dio->range.n_sectors = bio_sectors(bio);
1876 journal_read_pos = NOT_FOUND;
1877 if (likely(ic->mode == 'J')) {
1878 if (dio->write) {
1879 unsigned next_entry, i, pos;
9dd59727 1880 unsigned ws, we, range_sectors;
7eada909 1881
9dd59727 1882 dio->range.n_sectors = min(dio->range.n_sectors,
4f43446d 1883 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
518748b1
MP
1884 if (unlikely(!dio->range.n_sectors)) {
1885 if (from_map)
1886 goto offload_to_thread;
1887 sleep_on_endio_wait(ic);
1888 goto retry;
1889 }
9dd59727
MP
1890 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1891 ic->free_sectors -= range_sectors;
7eada909
MP
1892 journal_section = ic->free_section;
1893 journal_entry = ic->free_section_entry;
1894
9dd59727 1895 next_entry = ic->free_section_entry + range_sectors;
7eada909
MP
1896 ic->free_section_entry = next_entry % ic->journal_section_entries;
1897 ic->free_section += next_entry / ic->journal_section_entries;
1898 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1899 wraparound_section(ic, &ic->free_section);
1900
1901 pos = journal_section * ic->journal_section_entries + journal_entry;
1902 ws = journal_section;
1903 we = journal_entry;
9d609f85
MP
1904 i = 0;
1905 do {
7eada909
MP
1906 struct journal_entry *je;
1907
1908 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1909 pos++;
1910 if (unlikely(pos >= ic->journal_entries))
1911 pos = 0;
1912
1913 je = access_journal_entry(ic, ws, we);
1914 BUG_ON(!journal_entry_is_unused(je));
1915 journal_entry_set_inprogress(je);
1916 we++;
1917 if (unlikely(we == ic->journal_section_entries)) {
1918 we = 0;
1919 ws++;
1920 wraparound_section(ic, &ws);
1921 }
9d609f85 1922 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
7eada909
MP
1923
1924 spin_unlock_irq(&ic->endio_wait.lock);
1925 goto journal_read_write;
1926 } else {
1927 sector_t next_sector;
1928 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1929 if (likely(journal_read_pos == NOT_FOUND)) {
1930 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1931 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1932 } else {
1933 unsigned i;
9d609f85
MP
1934 unsigned jp = journal_read_pos + 1;
1935 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1936 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
7eada909
MP
1937 break;
1938 }
1939 dio->range.n_sectors = i;
1940 }
1941 }
1942 }
724376a0 1943 if (unlikely(!add_new_range(ic, &dio->range, true))) {
7eada909
MP
1944 /*
1945 * We must not sleep in the request routine because it could
1946 * stall bios on current->bio_list.
1947 * So, we offload the bio to a workqueue if we have to sleep.
1948 */
7eada909 1949 if (from_map) {
518748b1 1950offload_to_thread:
7eada909
MP
1951 spin_unlock_irq(&ic->endio_wait.lock);
1952 INIT_WORK(&dio->work, integrity_bio_wait);
1953 queue_work(ic->wait_wq, &dio->work);
1954 return;
7eada909 1955 }
5729b6e5
MP
1956 if (journal_read_pos != NOT_FOUND)
1957 dio->range.n_sectors = ic->sectors_per_block;
724376a0 1958 wait_and_add_new_range(ic, &dio->range);
5729b6e5
MP
1959 /*
1960 * wait_and_add_new_range drops the spinlock, so the journal
1961 * may have been changed arbitrarily. We need to recheck.
1962 * To simplify the code, we restrict I/O size to just one block.
1963 */
1964 if (journal_read_pos != NOT_FOUND) {
1965 sector_t next_sector;
1966 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1967 if (unlikely(new_pos != journal_read_pos)) {
1968 remove_range_unlocked(ic, &dio->range);
1969 goto retry;
1970 }
1971 }
7eada909
MP
1972 }
1973 spin_unlock_irq(&ic->endio_wait.lock);
1974
1975 if (unlikely(journal_read_pos != NOT_FOUND)) {
1976 journal_section = journal_read_pos / ic->journal_section_entries;
1977 journal_entry = journal_read_pos % ic->journal_section_entries;
1978 goto journal_read_write;
1979 }
1980
468dfca3 1981 if (ic->mode == 'B' && dio->write) {
05d6909e
MS
1982 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
1983 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
1984 struct bitmap_block_status *bbs;
468dfca3 1985
05d6909e 1986 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
468dfca3
MP
1987 spin_lock(&bbs->bio_queue_lock);
1988 bio_list_add(&bbs->bio_queue, bio);
1989 spin_unlock(&bbs->bio_queue_lock);
468dfca3 1990 queue_work(ic->writer_wq, &bbs->work);
468dfca3
MP
1991 return;
1992 }
1993 }
1994
7eada909
MP
1995 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1996
1997 if (need_sync_io) {
b5e8ad92 1998 init_completion(&read_comp);
7eada909
MP
1999 dio->completion = &read_comp;
2000 } else
2001 dio->completion = NULL;
2002
248aa264 2003 dm_bio_record(&dio->bio_details, bio);
74d46992 2004 bio_set_dev(bio, ic->dev->bdev);
7eada909
MP
2005 bio->bi_integrity = NULL;
2006 bio->bi_opf &= ~REQ_INTEGRITY;
7eada909 2007 bio->bi_end_io = integrity_end_io;
7eada909 2008 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
248aa264 2009
7eada909
MP
2010 generic_make_request(bio);
2011
2012 if (need_sync_io) {
2013 wait_for_completion_io(&read_comp);
468dfca3 2014 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
a3fcf725
MP
2015 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2016 goto skip_check;
468dfca3 2017 if (ic->mode == 'B') {
05d6909e
MS
2018 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2019 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
468dfca3
MP
2020 goto skip_check;
2021 }
2022
b7e326f7
HL
2023 if (likely(!bio->bi_status))
2024 integrity_metadata(&dio->work);
2025 else
a3fcf725 2026skip_check:
b7e326f7
HL
2027 dec_in_flight(dio);
2028
7eada909
MP
2029 } else {
2030 INIT_WORK(&dio->work, integrity_metadata);
2031 queue_work(ic->metadata_wq, &dio->work);
2032 }
2033
2034 return;
2035
2036journal_read_write:
2037 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2038 goto lock_retry;
2039
2040 do_endio_flush(ic, dio);
2041}
2042
2043
2044static void integrity_bio_wait(struct work_struct *w)
2045{
2046 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2047
2048 dm_integrity_map_continue(dio, false);
2049}
2050
2051static void pad_uncommitted(struct dm_integrity_c *ic)
2052{
2053 if (ic->free_section_entry) {
2054 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2055 ic->free_section_entry = 0;
2056 ic->free_section++;
2057 wraparound_section(ic, &ic->free_section);
2058 ic->n_uncommitted_sections++;
2059 }
468dfca3 2060 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
05d6909e
MS
2061 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2062 ic->journal_section_entries + ic->free_sectors)) {
2063 DMCRIT("journal_sections %u, journal_section_entries %u, "
2064 "n_uncommitted_sections %u, n_committed_sections %u, "
2065 "journal_section_entries %u, free_sectors %u",
2066 ic->journal_sections, ic->journal_section_entries,
2067 ic->n_uncommitted_sections, ic->n_committed_sections,
2068 ic->journal_section_entries, ic->free_sectors);
468dfca3 2069 }
7eada909
MP
2070}
2071
2072static void integrity_commit(struct work_struct *w)
2073{
2074 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2075 unsigned commit_start, commit_sections;
2076 unsigned i, j, n;
2077 struct bio *flushes;
2078
2079 del_timer(&ic->autocommit_timer);
2080
2081 spin_lock_irq(&ic->endio_wait.lock);
2082 flushes = bio_list_get(&ic->flush_bio_list);
2083 if (unlikely(ic->mode != 'J')) {
2084 spin_unlock_irq(&ic->endio_wait.lock);
2085 dm_integrity_flush_buffers(ic);
2086 goto release_flush_bios;
2087 }
2088
2089 pad_uncommitted(ic);
2090 commit_start = ic->uncommitted_section;
2091 commit_sections = ic->n_uncommitted_sections;
2092 spin_unlock_irq(&ic->endio_wait.lock);
2093
2094 if (!commit_sections)
2095 goto release_flush_bios;
2096
2097 i = commit_start;
2098 for (n = 0; n < commit_sections; n++) {
2099 for (j = 0; j < ic->journal_section_entries; j++) {
2100 struct journal_entry *je;
2101 je = access_journal_entry(ic, i, j);
2102 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2103 }
2104 for (j = 0; j < ic->journal_section_sectors; j++) {
2105 struct journal_sector *js;
2106 js = access_journal(ic, i, j);
2107 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2108 }
2109 i++;
2110 if (unlikely(i >= ic->journal_sections))
2111 ic->commit_seq = next_commit_seq(ic->commit_seq);
2112 wraparound_section(ic, &i);
2113 }
2114 smp_rmb();
2115
2116 write_journal(ic, commit_start, commit_sections);
2117
2118 spin_lock_irq(&ic->endio_wait.lock);
2119 ic->uncommitted_section += commit_sections;
2120 wraparound_section(ic, &ic->uncommitted_section);
2121 ic->n_uncommitted_sections -= commit_sections;
2122 ic->n_committed_sections += commit_sections;
2123 spin_unlock_irq(&ic->endio_wait.lock);
2124
d3e632f0 2125 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
7eada909
MP
2126 queue_work(ic->writer_wq, &ic->writer_work);
2127
2128release_flush_bios:
2129 while (flushes) {
2130 struct bio *next = flushes->bi_next;
2131 flushes->bi_next = NULL;
2132 do_endio(ic, flushes);
2133 flushes = next;
2134 }
2135}
2136
2137static void complete_copy_from_journal(unsigned long error, void *context)
2138{
2139 struct journal_io *io = context;
2140 struct journal_completion *comp = io->comp;
2141 struct dm_integrity_c *ic = comp->ic;
2142 remove_range(ic, &io->range);
6f1c819c 2143 mempool_free(io, &ic->journal_io_mempool);
7eada909
MP
2144 if (unlikely(error != 0))
2145 dm_integrity_io_error(ic, "copying from journal", -EIO);
2146 complete_journal_op(comp);
2147}
2148
9d609f85
MP
2149static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2150 struct journal_entry *je)
2151{
2152 unsigned s = 0;
2153 do {
2154 js->commit_id = je->last_bytes[s];
2155 js++;
2156 } while (++s < ic->sectors_per_block);
2157}
2158
7eada909
MP
2159static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2160 unsigned write_sections, bool from_replay)
2161{
2162 unsigned i, j, n;
2163 struct journal_completion comp;
a7c3e62b
MP
2164 struct blk_plug plug;
2165
2166 blk_start_plug(&plug);
7eada909
MP
2167
2168 comp.ic = ic;
2169 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
b5e8ad92 2170 init_completion(&comp.comp);
7eada909
MP
2171
2172 i = write_start;
2173 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2174#ifndef INTERNAL_VERIFY
2175 if (unlikely(from_replay))
2176#endif
2177 rw_section_mac(ic, i, false);
2178 for (j = 0; j < ic->journal_section_entries; j++) {
2179 struct journal_entry *je = access_journal_entry(ic, i, j);
2180 sector_t sec, area, offset;
2181 unsigned k, l, next_loop;
2182 sector_t metadata_block;
2183 unsigned metadata_offset;
2184 struct journal_io *io;
2185
2186 if (journal_entry_is_unused(je))
2187 continue;
2188 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2189 sec = journal_entry_get_sector(je);
9d609f85
MP
2190 if (unlikely(from_replay)) {
2191 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2192 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2193 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2194 }
2195 }
7eada909 2196 get_area_and_offset(ic, sec, &area, &offset);
9d609f85 2197 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
7eada909
MP
2198 for (k = j + 1; k < ic->journal_section_entries; k++) {
2199 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2200 sector_t sec2, area2, offset2;
2201 if (journal_entry_is_unused(je2))
2202 break;
2203 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2204 sec2 = journal_entry_get_sector(je2);
2205 get_area_and_offset(ic, sec2, &area2, &offset2);
9d609f85 2206 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
7eada909 2207 break;
9d609f85 2208 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
7eada909
MP
2209 }
2210 next_loop = k - 1;
2211
6f1c819c 2212 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
7eada909
MP
2213 io->comp = &comp;
2214 io->range.logical_sector = sec;
9d609f85 2215 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
7eada909
MP
2216
2217 spin_lock_irq(&ic->endio_wait.lock);
8b3bbd49 2218 add_new_range_and_wait(ic, &io->range);
7eada909
MP
2219
2220 if (likely(!from_replay)) {
2221 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2222
2223 /* don't write if there is newer committed sector */
2224 while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2225 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2226
2227 journal_entry_set_unused(je2);
2228 remove_journal_node(ic, &section_node[j]);
2229 j++;
9d609f85
MP
2230 sec += ic->sectors_per_block;
2231 offset += ic->sectors_per_block;
7eada909
MP
2232 }
2233 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2234 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2235
2236 journal_entry_set_unused(je2);
2237 remove_journal_node(ic, &section_node[k - 1]);
2238 k--;
2239 }
2240 if (j == k) {
2241 remove_range_unlocked(ic, &io->range);
2242 spin_unlock_irq(&ic->endio_wait.lock);
6f1c819c 2243 mempool_free(io, &ic->journal_io_mempool);
7eada909
MP
2244 goto skip_io;
2245 }
2246 for (l = j; l < k; l++) {
2247 remove_journal_node(ic, &section_node[l]);
2248 }
2249 }
2250 spin_unlock_irq(&ic->endio_wait.lock);
2251
2252 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2253 for (l = j; l < k; l++) {
2254 int r;
2255 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2256
2257 if (
2258#ifndef INTERNAL_VERIFY
2259 unlikely(from_replay) &&
2260#endif
2261 ic->internal_hash) {
6d39a124 2262 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
7eada909 2263
9d609f85 2264 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
7eada909 2265 (char *)access_journal_data(ic, i, l), test_tag);
9d609f85 2266 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
7eada909
MP
2267 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2268 }
2269
2270 journal_entry_set_unused(je2);
9d609f85 2271 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
7eada909
MP
2272 ic->tag_size, TAG_WRITE);
2273 if (unlikely(r)) {
2274 dm_integrity_io_error(ic, "reading tags", r);
2275 }
2276 }
2277
2278 atomic_inc(&comp.in_flight);
9d609f85
MP
2279 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2280 (k - j) << ic->sb->log2_sectors_per_block,
2281 get_data_sector(ic, area, offset),
7eada909
MP
2282 complete_copy_from_journal, io);
2283skip_io:
2284 j = next_loop;
2285 }
2286 }
2287
2288 dm_bufio_write_dirty_buffers_async(ic->bufio);
2289
a7c3e62b
MP
2290 blk_finish_plug(&plug);
2291
7eada909
MP
2292 complete_journal_op(&comp);
2293 wait_for_completion_io(&comp.comp);
2294
2295 dm_integrity_flush_buffers(ic);
2296}
2297
2298static void integrity_writer(struct work_struct *w)
2299{
2300 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2301 unsigned write_start, write_sections;
2302
2303 unsigned prev_free_sectors;
2304
2305 /* the following test is not needed, but it tests the replay code */
adc0daad 2306 if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
7eada909
MP
2307 return;
2308
2309 spin_lock_irq(&ic->endio_wait.lock);
2310 write_start = ic->committed_section;
2311 write_sections = ic->n_committed_sections;
2312 spin_unlock_irq(&ic->endio_wait.lock);
2313
2314 if (!write_sections)
2315 return;
2316
2317 do_journal_write(ic, write_start, write_sections, false);
2318
2319 spin_lock_irq(&ic->endio_wait.lock);
2320
2321 ic->committed_section += write_sections;
2322 wraparound_section(ic, &ic->committed_section);
2323 ic->n_committed_sections -= write_sections;
2324
2325 prev_free_sectors = ic->free_sectors;
2326 ic->free_sectors += write_sections * ic->journal_section_entries;
2327 if (unlikely(!prev_free_sectors))
2328 wake_up_locked(&ic->endio_wait);
2329
2330 spin_unlock_irq(&ic->endio_wait.lock);
2331}
2332
a3fcf725
MP
2333static void recalc_write_super(struct dm_integrity_c *ic)
2334{
2335 int r;
2336
2337 dm_integrity_flush_buffers(ic);
2338 if (dm_integrity_failed(ic))
2339 return;
2340
a3fcf725
MP
2341 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2342 if (unlikely(r))
2343 dm_integrity_io_error(ic, "writing superblock", r);
2344}
2345
2346static void integrity_recalc(struct work_struct *w)
2347{
2348 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2349 struct dm_integrity_range range;
2350 struct dm_io_request io_req;
2351 struct dm_io_region io_loc;
2352 sector_t area, offset;
2353 sector_t metadata_block;
2354 unsigned metadata_offset;
468dfca3 2355 sector_t logical_sector, n_sectors;
a3fcf725
MP
2356 __u8 *t;
2357 unsigned i;
2358 int r;
2359 unsigned super_counter = 0;
2360
468dfca3
MP
2361 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2362
a3fcf725
MP
2363 spin_lock_irq(&ic->endio_wait.lock);
2364
2365next_chunk:
2366
adc0daad 2367 if (unlikely(dm_suspended(ic->ti)))
a3fcf725
MP
2368 goto unlock_ret;
2369
2370 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
468dfca3
MP
2371 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2372 if (ic->mode == 'B') {
2373 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2374 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2375 }
a3fcf725 2376 goto unlock_ret;
468dfca3 2377 }
a3fcf725
MP
2378
2379 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2380 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2381 if (!ic->meta_dev)
4f43446d 2382 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
a3fcf725 2383
8b3bbd49 2384 add_new_range_and_wait(ic, &range);
a3fcf725 2385 spin_unlock_irq(&ic->endio_wait.lock);
468dfca3
MP
2386 logical_sector = range.logical_sector;
2387 n_sectors = range.n_sectors;
2388
2389 if (ic->mode == 'B') {
2390 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2391 goto advance_and_next;
2392 }
05d6909e
MS
2393 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2394 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
468dfca3
MP
2395 logical_sector += ic->sectors_per_block;
2396 n_sectors -= ic->sectors_per_block;
2397 cond_resched();
2398 }
05d6909e
MS
2399 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2400 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
468dfca3
MP
2401 n_sectors -= ic->sectors_per_block;
2402 cond_resched();
2403 }
2404 get_area_and_offset(ic, logical_sector, &area, &offset);
2405 }
2406
2407 DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
a3fcf725
MP
2408
2409 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2410 recalc_write_super(ic);
468dfca3
MP
2411 if (ic->mode == 'B') {
2412 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2413 }
a3fcf725
MP
2414 super_counter = 0;
2415 }
2416
2417 if (unlikely(dm_integrity_failed(ic)))
2418 goto err;
2419
2420 io_req.bi_op = REQ_OP_READ;
2421 io_req.bi_op_flags = 0;
2422 io_req.mem.type = DM_IO_VMA;
2423 io_req.mem.ptr.addr = ic->recalc_buffer;
2424 io_req.notify.fn = NULL;
2425 io_req.client = ic->io;
2426 io_loc.bdev = ic->dev->bdev;
2427 io_loc.sector = get_data_sector(ic, area, offset);
468dfca3 2428 io_loc.count = n_sectors;
a3fcf725
MP
2429
2430 r = dm_io(&io_req, 1, &io_loc, NULL);
2431 if (unlikely(r)) {
2432 dm_integrity_io_error(ic, "reading data", r);
2433 goto err;
2434 }
2435
2436 t = ic->recalc_tags;
468dfca3
MP
2437 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2438 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
a3fcf725
MP
2439 t += ic->tag_size;
2440 }
2441
2442 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2443
2444 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2445 if (unlikely(r)) {
2446 dm_integrity_io_error(ic, "writing tags", r);
2447 goto err;
2448 }
2449
468dfca3
MP
2450advance_and_next:
2451 cond_resched();
2452
a3fcf725
MP
2453 spin_lock_irq(&ic->endio_wait.lock);
2454 remove_range_unlocked(ic, &range);
2455 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2456 goto next_chunk;
2457
2458err:
2459 remove_range(ic, &range);
2460 return;
2461
2462unlock_ret:
2463 spin_unlock_irq(&ic->endio_wait.lock);
2464
2465 recalc_write_super(ic);
2466}
2467
468dfca3
MP
2468static void bitmap_block_work(struct work_struct *w)
2469{
2470 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2471 struct dm_integrity_c *ic = bbs->ic;
2472 struct bio *bio;
2473 struct bio_list bio_queue;
2474 struct bio_list waiting;
2475
2476 bio_list_init(&waiting);
2477
2478 spin_lock(&bbs->bio_queue_lock);
2479 bio_queue = bbs->bio_queue;
2480 bio_list_init(&bbs->bio_queue);
2481 spin_unlock(&bbs->bio_queue_lock);
2482
2483 while ((bio = bio_list_pop(&bio_queue))) {
2484 struct dm_integrity_io *dio;
2485
2486 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2487
05d6909e
MS
2488 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2489 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
468dfca3
MP
2490 remove_range(ic, &dio->range);
2491 INIT_WORK(&dio->work, integrity_bio_wait);
53770f0e 2492 queue_work(ic->offload_wq, &dio->work);
468dfca3 2493 } else {
05d6909e
MS
2494 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2495 dio->range.n_sectors, BITMAP_OP_SET);
468dfca3
MP
2496 bio_list_add(&waiting, bio);
2497 }
2498 }
2499
2500 if (bio_list_empty(&waiting))
2501 return;
2502
05d6909e
MS
2503 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2504 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2505 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
468dfca3
MP
2506
2507 while ((bio = bio_list_pop(&waiting))) {
2508 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2509
05d6909e
MS
2510 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2511 dio->range.n_sectors, BITMAP_OP_SET);
468dfca3
MP
2512
2513 remove_range(ic, &dio->range);
2514 INIT_WORK(&dio->work, integrity_bio_wait);
53770f0e 2515 queue_work(ic->offload_wq, &dio->work);
468dfca3
MP
2516 }
2517
2518 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2519}
2520
2521static void bitmap_flush_work(struct work_struct *work)
2522{
2523 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2524 struct dm_integrity_range range;
2525 unsigned long limit;
48271493 2526 struct bio *bio;
468dfca3
MP
2527
2528 dm_integrity_flush_buffers(ic);
2529
2530 range.logical_sector = 0;
2531 range.n_sectors = ic->provided_data_sectors;
2532
2533 spin_lock_irq(&ic->endio_wait.lock);
2534 add_new_range_and_wait(ic, &range);
2535 spin_unlock_irq(&ic->endio_wait.lock);
2536
2537 dm_integrity_flush_buffers(ic);
2538 if (ic->meta_dev)
2539 blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
2540
2541 limit = ic->provided_data_sectors;
2542 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2543 limit = le64_to_cpu(ic->sb->recalc_sector)
2544 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2545 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2546 }
48271493 2547 /*DEBUG_print("zeroing journal\n");*/
468dfca3
MP
2548 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2549 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2550
05d6909e
MS
2551 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2552 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
468dfca3 2553
48271493
MP
2554 spin_lock_irq(&ic->endio_wait.lock);
2555 remove_range_unlocked(ic, &range);
2556 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2557 bio_endio(bio);
2558 spin_unlock_irq(&ic->endio_wait.lock);
2559 spin_lock_irq(&ic->endio_wait.lock);
2560 }
2561 spin_unlock_irq(&ic->endio_wait.lock);
468dfca3
MP
2562}
2563
2564
7eada909
MP
2565static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2566 unsigned n_sections, unsigned char commit_seq)
2567{
2568 unsigned i, j, n;
2569
2570 if (!n_sections)
2571 return;
2572
2573 for (n = 0; n < n_sections; n++) {
2574 i = start_section + n;
2575 wraparound_section(ic, &i);
2576 for (j = 0; j < ic->journal_section_sectors; j++) {
2577 struct journal_sector *js = access_journal(ic, i, j);
2578 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2579 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2580 }
2581 for (j = 0; j < ic->journal_section_entries; j++) {
2582 struct journal_entry *je = access_journal_entry(ic, i, j);
2583 journal_entry_set_unused(je);
2584 }
2585 }
2586
2587 write_journal(ic, start_section, n_sections);
2588}
2589
2590static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2591{
2592 unsigned char k;
2593 for (k = 0; k < N_COMMIT_IDS; k++) {
2594 if (dm_integrity_commit_id(ic, i, j, k) == id)
2595 return k;
2596 }
2597 dm_integrity_io_error(ic, "journal commit id", -EIO);
2598 return -EIO;
2599}
2600
2601static void replay_journal(struct dm_integrity_c *ic)
2602{
2603 unsigned i, j;
2604 bool used_commit_ids[N_COMMIT_IDS];
2605 unsigned max_commit_id_sections[N_COMMIT_IDS];
2606 unsigned write_start, write_sections;
2607 unsigned continue_section;
2608 bool journal_empty;
2609 unsigned char unused, last_used, want_commit_seq;
2610
c2bcb2b7
MP
2611 if (ic->mode == 'R')
2612 return;
2613
7eada909
MP
2614 if (ic->journal_uptodate)
2615 return;
2616
2617 last_used = 0;
2618 write_start = 0;
2619
2620 if (!ic->just_formatted) {
2621 DEBUG_print("reading journal\n");
2622 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2623 if (ic->journal_io)
2624 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2625 if (ic->journal_io) {
2626 struct journal_completion crypt_comp;
2627 crypt_comp.ic = ic;
b5e8ad92 2628 init_completion(&crypt_comp.comp);
7eada909
MP
2629 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2630 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2631 wait_for_completion(&crypt_comp.comp);
2632 }
2633 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2634 }
2635
2636 if (dm_integrity_failed(ic))
2637 goto clear_journal;
2638
2639 journal_empty = true;
2640 memset(used_commit_ids, 0, sizeof used_commit_ids);
2641 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2642 for (i = 0; i < ic->journal_sections; i++) {
2643 for (j = 0; j < ic->journal_section_sectors; j++) {
2644 int k;
2645 struct journal_sector *js = access_journal(ic, i, j);
2646 k = find_commit_seq(ic, i, j, js->commit_id);
2647 if (k < 0)
2648 goto clear_journal;
2649 used_commit_ids[k] = true;
2650 max_commit_id_sections[k] = i;
2651 }
2652 if (journal_empty) {
2653 for (j = 0; j < ic->journal_section_entries; j++) {
2654 struct journal_entry *je = access_journal_entry(ic, i, j);
2655 if (!journal_entry_is_unused(je)) {
2656 journal_empty = false;
2657 break;
2658 }
2659 }
2660 }
2661 }
2662
2663 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2664 unused = N_COMMIT_IDS - 1;
2665 while (unused && !used_commit_ids[unused - 1])
2666 unused--;
2667 } else {
2668 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2669 if (!used_commit_ids[unused])
2670 break;
2671 if (unused == N_COMMIT_IDS) {
2672 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2673 goto clear_journal;
2674 }
2675 }
2676 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2677 unused, used_commit_ids[0], used_commit_ids[1],
2678 used_commit_ids[2], used_commit_ids[3]);
2679
2680 last_used = prev_commit_seq(unused);
2681 want_commit_seq = prev_commit_seq(last_used);
2682
2683 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2684 journal_empty = true;
2685
2686 write_start = max_commit_id_sections[last_used] + 1;
2687 if (unlikely(write_start >= ic->journal_sections))
2688 want_commit_seq = next_commit_seq(want_commit_seq);
2689 wraparound_section(ic, &write_start);
2690
2691 i = write_start;
2692 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2693 for (j = 0; j < ic->journal_section_sectors; j++) {
2694 struct journal_sector *js = access_journal(ic, i, j);
2695
2696 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2697 /*
2698 * This could be caused by crash during writing.
2699 * We won't replay the inconsistent part of the
2700 * journal.
2701 */
2702 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2703 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2704 goto brk;
2705 }
2706 }
2707 i++;
2708 if (unlikely(i >= ic->journal_sections))
2709 want_commit_seq = next_commit_seq(want_commit_seq);
2710 wraparound_section(ic, &i);
2711 }
2712brk:
2713
2714 if (!journal_empty) {
2715 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2716 write_sections, write_start, want_commit_seq);
2717 do_journal_write(ic, write_start, write_sections, true);
2718 }
2719
2720 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2721 continue_section = write_start;
2722 ic->commit_seq = want_commit_seq;
2723 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2724 } else {
2725 unsigned s;
2726 unsigned char erase_seq;
2727clear_journal:
2728 DEBUG_print("clearing journal\n");
2729
2730 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2731 s = write_start;
2732 init_journal(ic, s, 1, erase_seq);
2733 s++;
2734 wraparound_section(ic, &s);
2735 if (ic->journal_sections >= 2) {
2736 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2737 s += ic->journal_sections - 2;
2738 wraparound_section(ic, &s);
2739 init_journal(ic, s, 1, erase_seq);
2740 }
2741
2742 continue_section = 0;
2743 ic->commit_seq = next_commit_seq(erase_seq);
2744 }
2745
2746 ic->committed_section = continue_section;
2747 ic->n_committed_sections = 0;
2748
2749 ic->uncommitted_section = continue_section;
2750 ic->n_uncommitted_sections = 0;
2751
2752 ic->free_section = continue_section;
2753 ic->free_section_entry = 0;
2754 ic->free_sectors = ic->journal_entries;
2755
2756 ic->journal_tree_root = RB_ROOT;
2757 for (i = 0; i < ic->journal_entries; i++)
2758 init_journal_node(&ic->journal_tree[i]);
2759}
2760
48271493 2761static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
1f5a7759 2762{
48271493 2763 DEBUG_print("dm_integrity_enter_synchronous_mode\n");
1f5a7759
MP
2764
2765 if (ic->mode == 'B') {
48271493
MP
2766 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2767 ic->synchronous_mode = 1;
2768
1f5a7759
MP
2769 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2770 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2771 flush_workqueue(ic->commit_wq);
2772 }
48271493
MP
2773}
2774
2775static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2776{
2777 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2778
2779 DEBUG_print("dm_integrity_reboot\n");
2780
2781 dm_integrity_enter_synchronous_mode(ic);
1f5a7759
MP
2782
2783 return NOTIFY_DONE;
2784}
2785
7eada909
MP
2786static void dm_integrity_postsuspend(struct dm_target *ti)
2787{
2788 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
468dfca3 2789 int r;
7eada909 2790
1f5a7759 2791 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
7eada909
MP
2792
2793 del_timer_sync(&ic->autocommit_timer);
2794
a3fcf725
MP
2795 if (ic->recalc_wq)
2796 drain_workqueue(ic->recalc_wq);
2797
468dfca3
MP
2798 if (ic->mode == 'B')
2799 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2800
7eada909
MP
2801 queue_work(ic->commit_wq, &ic->commit_work);
2802 drain_workqueue(ic->commit_wq);
2803
2804 if (ic->mode == 'J') {
747829a8
MP
2805 if (ic->meta_dev)
2806 queue_work(ic->writer_wq, &ic->writer_work);
7eada909
MP
2807 drain_workqueue(ic->writer_wq);
2808 dm_integrity_flush_buffers(ic);
2809 }
2810
468dfca3
MP
2811 if (ic->mode == 'B') {
2812 dm_integrity_flush_buffers(ic);
2813#if 1
05d6909e 2814 /* set to 0 to test bitmap replay code */
468dfca3
MP
2815 init_journal(ic, 0, ic->journal_sections, 0);
2816 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2817 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2818 if (unlikely(r))
2819 dm_integrity_io_error(ic, "writing superblock", r);
2820#endif
2821 }
2822
7eada909
MP
2823 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2824
2825 ic->journal_uptodate = true;
2826}
2827
2828static void dm_integrity_resume(struct dm_target *ti)
2829{
2830 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
468dfca3
MP
2831 int r;
2832 DEBUG_print("resume\n");
2833
2834 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
2835 DEBUG_print("resume dirty_bitmap\n");
05d6909e
MS
2836 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2837 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
468dfca3
MP
2838 if (ic->mode == 'B') {
2839 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2840 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
2841 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
05d6909e
MS
2842 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
2843 BITMAP_OP_TEST_ALL_CLEAR)) {
468dfca3
MP
2844 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2845 ic->sb->recalc_sector = cpu_to_le64(0);
2846 }
2847 } else {
05d6909e
MS
2848 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2849 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
468dfca3
MP
2850 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
2851 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2852 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2853 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
05d6909e
MS
2854 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2855 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
468dfca3
MP
2856 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2857 ic->sb->recalc_sector = cpu_to_le64(0);
2858 }
2859 } else {
2860 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
2861 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
2862 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2863 ic->sb->recalc_sector = cpu_to_le64(0);
2864 }
2865 init_journal(ic, 0, ic->journal_sections, 0);
2866 replay_journal(ic);
2867 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2868 }
2869 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2870 if (unlikely(r))
2871 dm_integrity_io_error(ic, "writing superblock", r);
2872 } else {
2873 replay_journal(ic);
2874 if (ic->mode == 'B') {
468dfca3
MP
2875 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2876 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
2877 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2878 if (unlikely(r))
2879 dm_integrity_io_error(ic, "writing superblock", r);
7eada909 2880
d5bdf661
MP
2881 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2882 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2883 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2884 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2885 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
2886 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
2887 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
2888 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
2889 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
2890 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
2891 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
2892 }
05d6909e
MS
2893 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2894 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
468dfca3
MP
2895 }
2896 }
7eada909 2897
468dfca3
MP
2898 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
2899 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
a3fcf725 2900 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
468dfca3 2901 DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
a3fcf725
MP
2902 if (recalc_pos < ic->provided_data_sectors) {
2903 queue_work(ic->recalc_wq, &ic->recalc_work);
2904 } else if (recalc_pos > ic->provided_data_sectors) {
2905 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
2906 recalc_write_super(ic);
2907 }
2908 }
1f5a7759
MP
2909
2910 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
2911 ic->reboot_notifier.next = NULL;
2912 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
2913 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
48271493
MP
2914
2915#if 0
05d6909e 2916 /* set to 1 to stress test synchronous mode */
48271493
MP
2917 dm_integrity_enter_synchronous_mode(ic);
2918#endif
7eada909
MP
2919}
2920
2921static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2922 unsigned status_flags, char *result, unsigned maxlen)
2923{
2924 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2925 unsigned arg_count;
2926 size_t sz = 0;
2927
2928 switch (type) {
2929 case STATUSTYPE_INFO:
f84fd2c9
MP
2930 DMEMIT("%llu %llu",
2931 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
2932 (unsigned long long)ic->provided_data_sectors);
a3fcf725
MP
2933 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2934 DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
2935 else
2936 DMEMIT(" -");
7eada909
MP
2937 break;
2938
2939 case STATUSTYPE_TABLE: {
2940 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2941 watermark_percentage += ic->journal_entries / 2;
2942 do_div(watermark_percentage, ic->journal_entries);
893e3c39 2943 arg_count = 3;
356d9d52 2944 arg_count += !!ic->meta_dev;
9d609f85 2945 arg_count += ic->sectors_per_block != 1;
a3fcf725 2946 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
893e3c39
MP
2947 arg_count += ic->mode == 'J';
2948 arg_count += ic->mode == 'J';
468dfca3
MP
2949 arg_count += ic->mode == 'B';
2950 arg_count += ic->mode == 'B';
7eada909
MP
2951 arg_count += !!ic->internal_hash_alg.alg_string;
2952 arg_count += !!ic->journal_crypt_alg.alg_string;
2953 arg_count += !!ic->journal_mac_alg.alg_string;
d537858a 2954 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
7eada909
MP
2955 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2956 ic->tag_size, ic->mode, arg_count);
356d9d52
MP
2957 if (ic->meta_dev)
2958 DMEMIT(" meta_device:%s", ic->meta_dev->name);
a3fcf725
MP
2959 if (ic->sectors_per_block != 1)
2960 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
7fc2e47f 2961 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
a3fcf725 2962 DMEMIT(" recalculate");
56b67a4f
MP
2963 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2964 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2965 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
893e3c39
MP
2966 if (ic->mode == 'J') {
2967 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2968 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2969 }
468dfca3
MP
2970 if (ic->mode == 'B') {
2971 DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
2972 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
2973 }
d537858a
MP
2974 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
2975 DMEMIT(" fix_padding");
7eada909
MP
2976
2977#define EMIT_ALG(a, n) \
2978 do { \
2979 if (ic->a.alg_string) { \
2980 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2981 if (ic->a.key_string) \
2982 DMEMIT(":%s", ic->a.key_string);\
2983 } \
2984 } while (0)
56b67a4f
MP
2985 EMIT_ALG(internal_hash_alg, "internal_hash");
2986 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2987 EMIT_ALG(journal_mac_alg, "journal_mac");
7eada909
MP
2988 break;
2989 }
2990 }
2991}
2992
2993static int dm_integrity_iterate_devices(struct dm_target *ti,
2994 iterate_devices_callout_fn fn, void *data)
2995{
2996 struct dm_integrity_c *ic = ti->private;
2997
356d9d52
MP
2998 if (!ic->meta_dev)
2999 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3000 else
3001 return fn(ti, ic->dev, 0, ti->len, data);
7eada909
MP
3002}
3003
9d609f85
MP
3004static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3005{
3006 struct dm_integrity_c *ic = ti->private;
3007
3008 if (ic->sectors_per_block > 1) {
3009 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3010 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3011 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3012 }
3013}
3014
7eada909
MP
3015static void calculate_journal_section_size(struct dm_integrity_c *ic)
3016{
3017 unsigned sector_space = JOURNAL_SECTOR_DATA;
3018
3019 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
9d609f85 3020 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
7eada909
MP
3021 JOURNAL_ENTRY_ROUNDUP);
3022
3023 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3024 sector_space -= JOURNAL_MAC_PER_SECTOR;
3025 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3026 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
9d609f85 3027 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
7eada909
MP
3028 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3029}
3030
3031static int calculate_device_limits(struct dm_integrity_c *ic)
3032{
3033 __u64 initial_sectors;
7eada909
MP
3034
3035 calculate_journal_section_size(ic);
3036 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
356d9d52 3037 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
7eada909
MP
3038 return -EINVAL;
3039 ic->initial_sectors = initial_sectors;
3040
356d9d52
MP
3041 if (!ic->meta_dev) {
3042 sector_t last_sector, last_area, last_offset;
7eada909 3043
d537858a
MP
3044 /* we have to maintain excessive padding for compatibility with existing volumes */
3045 __u64 metadata_run_padding =
3046 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3047 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3048 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3049
3050 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3051 metadata_run_padding) >> SECTOR_SHIFT;
356d9d52
MP
3052 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3053 ic->log2_metadata_run = __ffs(ic->metadata_run);
3054 else
3055 ic->log2_metadata_run = -1;
7eada909 3056
356d9d52
MP
3057 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3058 last_sector = get_data_sector(ic, last_area, last_offset);
3059 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3060 return -EINVAL;
3061 } else {
30bba430 3062 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
356d9d52
MP
3063 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3064 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3065 meta_size <<= ic->log2_buffer_sectors;
3066 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3067 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3068 return -EINVAL;
3069 ic->metadata_run = 1;
3070 ic->log2_metadata_run = 0;
3071 }
7eada909
MP
3072
3073 return 0;
3074}
3075
3076static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3077{
3078 unsigned journal_sections;
3079 int test_bit;
3080
56b67a4f 3081 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
7eada909 3082 memcpy(ic->sb->magic, SB_MAGIC, 8);
7eada909 3083 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
9d609f85 3084 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
7eada909
MP
3085 if (ic->journal_mac_alg.alg_string)
3086 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3087
3088 calculate_journal_section_size(ic);
3089 journal_sections = journal_sectors / ic->journal_section_sectors;
3090 if (!journal_sections)
3091 journal_sections = 1;
7eada909 3092
356d9d52 3093 if (!ic->meta_dev) {
d537858a
MP
3094 if (ic->fix_padding)
3095 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
356d9d52
MP
3096 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3097 if (!interleave_sectors)
3098 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3099 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3100 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3101 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3102
3103 ic->provided_data_sectors = 0;
3104 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3105 __u64 prev_data_sectors = ic->provided_data_sectors;
3106
3107 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3108 if (calculate_device_limits(ic))
3109 ic->provided_data_sectors = prev_data_sectors;
3110 }
3111 if (!ic->provided_data_sectors)
3112 return -EINVAL;
3113 } else {
3114 ic->sb->log2_interleave_sectors = 0;
3115 ic->provided_data_sectors = ic->data_device_sectors;
3116 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3117
3118try_smaller_buffer:
3119 ic->sb->journal_sections = cpu_to_le32(0);
3120 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3121 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3122 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3123 if (test_journal_sections > journal_sections)
3124 continue;
3125 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3126 if (calculate_device_limits(ic))
3127 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
7eada909 3128
356d9d52
MP
3129 }
3130 if (!le32_to_cpu(ic->sb->journal_sections)) {
3131 if (ic->log2_buffer_sectors > 3) {
3132 ic->log2_buffer_sectors--;
3133 goto try_smaller_buffer;
3134 }
3135 return -EINVAL;
3136 }
7eada909
MP
3137 }
3138
7eada909
MP
3139 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3140
1f9fc0b8
MP
3141 sb_set_version(ic);
3142
7eada909
MP
3143 return 0;
3144}
3145
3146static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3147{
3148 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3149 struct blk_integrity bi;
3150
3151 memset(&bi, 0, sizeof(bi));
3152 bi.profile = &dm_integrity_profile;
9d609f85
MP
3153 bi.tuple_size = ic->tag_size;
3154 bi.tag_size = bi.tuple_size;
84ff1bcc 3155 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
7eada909
MP
3156
3157 blk_integrity_register(disk, &bi);
3158 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3159}
3160
d5027e03 3161static void dm_integrity_free_page_list(struct page_list *pl)
7eada909
MP
3162{
3163 unsigned i;
3164
3165 if (!pl)
3166 return;
d5027e03
MP
3167 for (i = 0; pl[i].page; i++)
3168 __free_page(pl[i].page);
7eada909
MP
3169 kvfree(pl);
3170}
3171
d5027e03 3172static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
7eada909 3173{
7eada909
MP
3174 struct page_list *pl;
3175 unsigned i;
3176
d5027e03 3177 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
7eada909
MP
3178 if (!pl)
3179 return NULL;
3180
d5027e03 3181 for (i = 0; i < n_pages; i++) {
7eada909
MP
3182 pl[i].page = alloc_page(GFP_KERNEL);
3183 if (!pl[i].page) {
d5027e03 3184 dm_integrity_free_page_list(pl);
7eada909
MP
3185 return NULL;
3186 }
3187 if (i)
3188 pl[i - 1].next = &pl[i];
3189 }
d5027e03
MP
3190 pl[i].page = NULL;
3191 pl[i].next = NULL;
7eada909
MP
3192
3193 return pl;
3194}
3195
3196static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3197{
3198 unsigned i;
3199 for (i = 0; i < ic->journal_sections; i++)
3200 kvfree(sl[i]);
fc8cec11 3201 kvfree(sl);
7eada909
MP
3202}
3203
05d6909e
MS
3204static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3205 struct page_list *pl)
7eada909
MP
3206{
3207 struct scatterlist **sl;
3208 unsigned i;
3209
344476e1
KC
3210 sl = kvmalloc_array(ic->journal_sections,
3211 sizeof(struct scatterlist *),
3212 GFP_KERNEL | __GFP_ZERO);
7eada909
MP
3213 if (!sl)
3214 return NULL;
3215
3216 for (i = 0; i < ic->journal_sections; i++) {
3217 struct scatterlist *s;
3218 unsigned start_index, start_offset;
3219 unsigned end_index, end_offset;
3220 unsigned n_pages;
3221 unsigned idx;
3222
3223 page_list_location(ic, i, 0, &start_index, &start_offset);
05d6909e
MS
3224 page_list_location(ic, i, ic->journal_section_sectors - 1,
3225 &end_index, &end_offset);
7eada909
MP
3226
3227 n_pages = (end_index - start_index + 1);
3228
344476e1
KC
3229 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3230 GFP_KERNEL);
7eada909
MP
3231 if (!s) {
3232 dm_integrity_free_journal_scatterlist(ic, sl);
3233 return NULL;
3234 }
3235
3236 sg_init_table(s, n_pages);
3237 for (idx = start_index; idx <= end_index; idx++) {
3238 char *va = lowmem_page_address(pl[idx].page);
3239 unsigned start = 0, end = PAGE_SIZE;
3240 if (idx == start_index)
3241 start = start_offset;
3242 if (idx == end_index)
3243 end = end_offset + (1 << SECTOR_SHIFT);
3244 sg_set_buf(&s[idx - start_index], va + start, end - start);
3245 }
3246
3247 sl[i] = s;
3248 }
3249
3250 return sl;
3251}
3252
3253static void free_alg(struct alg_spec *a)
3254{
3255 kzfree(a->alg_string);
3256 kzfree(a->key);
3257 memset(a, 0, sizeof *a);
3258}
3259
3260static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3261{
3262 char *k;
3263
3264 free_alg(a);
3265
3266 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3267 if (!a->alg_string)
3268 goto nomem;
3269
3270 k = strchr(a->alg_string, ':');
3271 if (k) {
7eada909
MP
3272 *k = 0;
3273 a->key_string = k + 1;
3274 if (strlen(a->key_string) & 1)
3275 goto inval;
3276
3277 a->key_size = strlen(a->key_string) / 2;
3278 a->key = kmalloc(a->key_size, GFP_KERNEL);
3279 if (!a->key)
3280 goto nomem;
6625d903
MP
3281 if (hex2bin(a->key, a->key_string, a->key_size))
3282 goto inval;
7eada909
MP
3283 }
3284
3285 return 0;
3286inval:
3287 *error = error_inval;
3288 return -EINVAL;
3289nomem:
3290 *error = "Out of memory for an argument";
3291 return -ENOMEM;
3292}
3293
3294static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3295 char *error_alg, char *error_key)
3296{
3297 int r;
3298
3299 if (a->alg_string) {
3d234b33 3300 *hash = crypto_alloc_shash(a->alg_string, 0, 0);
7eada909
MP
3301 if (IS_ERR(*hash)) {
3302 *error = error_alg;
3303 r = PTR_ERR(*hash);
3304 *hash = NULL;
3305 return r;
3306 }
3307
3308 if (a->key) {
3309 r = crypto_shash_setkey(*hash, a->key, a->key_size);
3310 if (r) {
3311 *error = error_key;
3312 return r;
3313 }
e16b4f99
MB
3314 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3315 *error = error_key;
3316 return -ENOKEY;
7eada909
MP
3317 }
3318 }
3319
3320 return 0;
3321}
3322
1aa0efd4
MS
3323static int create_journal(struct dm_integrity_c *ic, char **error)
3324{
3325 int r = 0;
3326 unsigned i;
3327 __u64 journal_pages, journal_desc_size, journal_tree_size;
717f4b1c
MP
3328 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3329 struct skcipher_request *req = NULL;
56b67a4f
MP
3330
3331 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3332 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3333 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3334 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
1aa0efd4
MS
3335
3336 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3337 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3338 journal_desc_size = journal_pages * sizeof(struct page_list);
ca79b0c2 3339 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
1aa0efd4
MS
3340 *error = "Journal doesn't fit into memory";
3341 r = -ENOMEM;
3342 goto bad;
3343 }
3344 ic->journal_pages = journal_pages;
3345
d5027e03 3346 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
1aa0efd4
MS
3347 if (!ic->journal) {
3348 *error = "Could not allocate memory for journal";
3349 r = -ENOMEM;
3350 goto bad;
3351 }
3352 if (ic->journal_crypt_alg.alg_string) {
3353 unsigned ivsize, blocksize;
3354 struct journal_completion comp;
3355
3356 comp.ic = ic;
3357 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
3358 if (IS_ERR(ic->journal_crypt)) {
3359 *error = "Invalid journal cipher";
3360 r = PTR_ERR(ic->journal_crypt);
3361 ic->journal_crypt = NULL;
3362 goto bad;
3363 }
3364 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3365 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3366
3367 if (ic->journal_crypt_alg.key) {
3368 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3369 ic->journal_crypt_alg.key_size);
3370 if (r) {
3371 *error = "Error setting encryption key";
3372 goto bad;
3373 }
3374 }
3375 DEBUG_print("cipher %s, block size %u iv size %u\n",
3376 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3377
d5027e03 3378 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
1aa0efd4
MS
3379 if (!ic->journal_io) {
3380 *error = "Could not allocate memory for journal io";
3381 r = -ENOMEM;
3382 goto bad;
3383 }
3384
3385 if (blocksize == 1) {
3386 struct scatterlist *sg;
717f4b1c
MP
3387
3388 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3389 if (!req) {
3390 *error = "Could not allocate crypt request";
3391 r = -ENOMEM;
3392 goto bad;
3393 }
3394
131670c2 3395 crypt_iv = kzalloc(ivsize, GFP_KERNEL);
717f4b1c
MP
3396 if (!crypt_iv) {
3397 *error = "Could not allocate iv";
3398 r = -ENOMEM;
3399 goto bad;
3400 }
1aa0efd4 3401
d5027e03 3402 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
1aa0efd4
MS
3403 if (!ic->journal_xor) {
3404 *error = "Could not allocate memory for journal xor";
3405 r = -ENOMEM;
3406 goto bad;
3407 }
3408
344476e1
KC
3409 sg = kvmalloc_array(ic->journal_pages + 1,
3410 sizeof(struct scatterlist),
3411 GFP_KERNEL);
1aa0efd4
MS
3412 if (!sg) {
3413 *error = "Unable to allocate sg list";
3414 r = -ENOMEM;
3415 goto bad;
3416 }
3417 sg_init_table(sg, ic->journal_pages + 1);
3418 for (i = 0; i < ic->journal_pages; i++) {
3419 char *va = lowmem_page_address(ic->journal_xor[i].page);
3420 clear_page(va);
3421 sg_set_buf(&sg[i], va, PAGE_SIZE);
3422 }
3423 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
1aa0efd4 3424
05d6909e
MS
3425 skcipher_request_set_crypt(req, sg, sg,
3426 PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
b5e8ad92 3427 init_completion(&comp.comp);
1aa0efd4
MS
3428 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3429 if (do_crypt(true, req, &comp))
3430 wait_for_completion(&comp.comp);
3431 kvfree(sg);
3432 r = dm_integrity_failed(ic);
3433 if (r) {
3434 *error = "Unable to encrypt journal";
3435 goto bad;
3436 }
3437 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3438
3439 crypto_free_skcipher(ic->journal_crypt);
3440 ic->journal_crypt = NULL;
3441 } else {
1aa0efd4 3442 unsigned crypt_len = roundup(ivsize, blocksize);
56b67a4f 3443
717f4b1c
MP
3444 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3445 if (!req) {
3446 *error = "Could not allocate crypt request";
3447 r = -ENOMEM;
3448 goto bad;
3449 }
3450
3451 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3452 if (!crypt_iv) {
3453 *error = "Could not allocate iv";
3454 r = -ENOMEM;
3455 goto bad;
3456 }
3457
56b67a4f
MP
3458 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3459 if (!crypt_data) {
3460 *error = "Unable to allocate crypt data";
3461 r = -ENOMEM;
3462 goto bad;
3463 }
1aa0efd4 3464
1aa0efd4
MS
3465 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3466 if (!ic->journal_scatterlist) {
3467 *error = "Unable to allocate sg list";
3468 r = -ENOMEM;
3469 goto bad;
3470 }
3471 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3472 if (!ic->journal_io_scatterlist) {
3473 *error = "Unable to allocate sg list";
3474 r = -ENOMEM;
3475 goto bad;
3476 }
344476e1
KC
3477 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3478 sizeof(struct skcipher_request *),
3479 GFP_KERNEL | __GFP_ZERO);
1aa0efd4
MS
3480 if (!ic->sk_requests) {
3481 *error = "Unable to allocate sk requests";
3482 r = -ENOMEM;
3483 goto bad;
3484 }
3485 for (i = 0; i < ic->journal_sections; i++) {
3486 struct scatterlist sg;
3487 struct skcipher_request *section_req;
3488 __u32 section_le = cpu_to_le32(i);
3489
717f4b1c 3490 memset(crypt_iv, 0x00, ivsize);
1aa0efd4
MS
3491 memset(crypt_data, 0x00, crypt_len);
3492 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3493
3494 sg_init_one(&sg, crypt_data, crypt_len);
717f4b1c 3495 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
b5e8ad92 3496 init_completion(&comp.comp);
1aa0efd4
MS
3497 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3498 if (do_crypt(true, req, &comp))
3499 wait_for_completion(&comp.comp);
3500
3501 r = dm_integrity_failed(ic);
3502 if (r) {
3503 *error = "Unable to generate iv";
3504 goto bad;
3505 }
3506
3507 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3508 if (!section_req) {
3509 *error = "Unable to allocate crypt request";
3510 r = -ENOMEM;
3511 goto bad;
3512 }
6da2ec56
KC
3513 section_req->iv = kmalloc_array(ivsize, 2,
3514 GFP_KERNEL);
1aa0efd4
MS
3515 if (!section_req->iv) {
3516 skcipher_request_free(section_req);
3517 *error = "Unable to allocate iv";
3518 r = -ENOMEM;
3519 goto bad;
3520 }
3521 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3522 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3523 ic->sk_requests[i] = section_req;
3524 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3525 }
3526 }
3527 }
3528
3529 for (i = 0; i < N_COMMIT_IDS; i++) {
3530 unsigned j;
3531retest_commit_id:
3532 for (j = 0; j < i; j++) {
3533 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3534 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3535 goto retest_commit_id;
3536 }
3537 }
3538 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3539 }
3540
3541 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3542 if (journal_tree_size > ULONG_MAX) {
3543 *error = "Journal doesn't fit into memory";
3544 r = -ENOMEM;
3545 goto bad;
3546 }
702a6204 3547 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
1aa0efd4
MS
3548 if (!ic->journal_tree) {
3549 *error = "Could not allocate memory for journal tree";
3550 r = -ENOMEM;
3551 }
3552bad:
56b67a4f 3553 kfree(crypt_data);
717f4b1c
MP
3554 kfree(crypt_iv);
3555 skcipher_request_free(req);
3556
1aa0efd4
MS
3557 return r;
3558}
3559
7eada909 3560/*
56b67a4f 3561 * Construct a integrity mapping
7eada909
MP
3562 *
3563 * Arguments:
3564 * device
3565 * offset from the start of the device
3566 * tag size
468dfca3 3567 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
7eada909
MP
3568 * number of optional arguments
3569 * optional arguments:
56b67a4f
MP
3570 * journal_sectors
3571 * interleave_sectors
3572 * buffer_sectors
3573 * journal_watermark
3574 * commit_time
88ad5d1e
MP
3575 * meta_device
3576 * block_size
468dfca3
MP
3577 * sectors_per_bit
3578 * bitmap_flush_interval
56b67a4f
MP
3579 * internal_hash
3580 * journal_crypt
3581 * journal_mac
88ad5d1e 3582 * recalculate
7eada909
MP
3583 */
3584static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3585{
3586 struct dm_integrity_c *ic;
3587 char dummy;
3588 int r;
7eada909
MP
3589 unsigned extra_args;
3590 struct dm_arg_set as;
5916a22b 3591 static const struct dm_arg _args[] = {
9d609f85 3592 {0, 9, "Invalid number of feature args"},
7eada909
MP
3593 };
3594 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3595 bool should_write_sb;
7eada909
MP
3596 __u64 threshold;
3597 unsigned long long start;
468dfca3
MP
3598 __s8 log2_sectors_per_bitmap_bit = -1;
3599 __s8 log2_blocks_per_bitmap_bit;
3600 __u64 bits_in_journal;
3601 __u64 n_bitmap_bits;
7eada909
MP
3602
3603#define DIRECT_ARGUMENTS 4
3604
3605 if (argc <= DIRECT_ARGUMENTS) {
3606 ti->error = "Invalid argument count";
3607 return -EINVAL;
3608 }
3609
3610 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3611 if (!ic) {
3612 ti->error = "Cannot allocate integrity context";
3613 return -ENOMEM;
3614 }
3615 ti->private = ic;
3616 ti->per_io_data_size = sizeof(struct dm_integrity_io);
adc0daad 3617 ic->ti = ti;
7eada909 3618
7eada909 3619 ic->in_progress = RB_ROOT;
724376a0 3620 INIT_LIST_HEAD(&ic->wait_list);
7eada909
MP
3621 init_waitqueue_head(&ic->endio_wait);
3622 bio_list_init(&ic->flush_bio_list);
3623 init_waitqueue_head(&ic->copy_to_journal_wait);
3624 init_completion(&ic->crypto_backoff);
3f2e5393 3625 atomic64_set(&ic->number_of_mismatches, 0);
468dfca3 3626 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
7eada909
MP
3627
3628 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3629 if (r) {
3630 ti->error = "Device lookup failed";
3631 goto bad;
3632 }
3633
3634 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3635 ti->error = "Invalid starting offset";
3636 r = -EINVAL;
3637 goto bad;
3638 }
3639 ic->start = start;
3640
3641 if (strcmp(argv[2], "-")) {
3642 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3643 ti->error = "Invalid tag size";
3644 r = -EINVAL;
3645 goto bad;
3646 }
3647 }
3648
05d6909e
MS
3649 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3650 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
7eada909 3651 ic->mode = argv[3][0];
468dfca3
MP
3652 } else {
3653 ti->error = "Invalid mode (expecting J, B, D, R)";
7eada909
MP
3654 r = -EINVAL;
3655 goto bad;
3656 }
3657
356d9d52 3658 journal_sectors = 0;
7eada909
MP
3659 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3660 buffer_sectors = DEFAULT_BUFFER_SECTORS;
3661 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3662 sync_msec = DEFAULT_SYNC_MSEC;
9d609f85 3663 ic->sectors_per_block = 1;
7eada909
MP
3664
3665 as.argc = argc - DIRECT_ARGUMENTS;
3666 as.argv = argv + DIRECT_ARGUMENTS;
3667 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3668 if (r)
3669 goto bad;
3670
3671 while (extra_args--) {
3672 const char *opt_string;
3673 unsigned val;
468dfca3 3674 unsigned long long llval;
7eada909
MP
3675 opt_string = dm_shift_arg(&as);
3676 if (!opt_string) {
3677 r = -EINVAL;
3678 ti->error = "Not enough feature arguments";
3679 goto bad;
3680 }
56b67a4f 3681 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
356d9d52 3682 journal_sectors = val ? val : 1;
56b67a4f 3683 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
7eada909 3684 interleave_sectors = val;
56b67a4f 3685 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
7eada909 3686 buffer_sectors = val;
56b67a4f 3687 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
7eada909 3688 journal_watermark = val;
56b67a4f 3689 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
7eada909 3690 sync_msec = val;
0d74e6a3 3691 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
356d9d52
MP
3692 if (ic->meta_dev) {
3693 dm_put_device(ti, ic->meta_dev);
3694 ic->meta_dev = NULL;
3695 }
05d6909e
MS
3696 r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3697 dm_table_get_mode(ti->table), &ic->meta_dev);
356d9d52
MP
3698 if (r) {
3699 ti->error = "Device lookup failed";
3700 goto bad;
3701 }
3702 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
9d609f85
MP
3703 if (val < 1 << SECTOR_SHIFT ||
3704 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3705 (val & (val -1))) {
3706 r = -EINVAL;
3707 ti->error = "Invalid block_size argument";
3708 goto bad;
3709 }
3710 ic->sectors_per_block = val >> SECTOR_SHIFT;
468dfca3
MP
3711 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3712 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3713 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3714 if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3715 r = -EINVAL;
3716 ti->error = "Invalid bitmap_flush_interval argument";
3717 }
3718 ic->bitmap_flush_interval = msecs_to_jiffies(val);
0d74e6a3 3719 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
7eada909 3720 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
56b67a4f 3721 "Invalid internal_hash argument");
7eada909
MP
3722 if (r)
3723 goto bad;
0d74e6a3 3724 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
7eada909 3725 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
56b67a4f 3726 "Invalid journal_crypt argument");
7eada909
MP
3727 if (r)
3728 goto bad;
0d74e6a3 3729 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
7eada909 3730 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
56b67a4f 3731 "Invalid journal_mac argument");
7eada909
MP
3732 if (r)
3733 goto bad;
a3fcf725 3734 } else if (!strcmp(opt_string, "recalculate")) {
468dfca3 3735 ic->recalculate_flag = true;
d537858a
MP
3736 } else if (!strcmp(opt_string, "fix_padding")) {
3737 ic->fix_padding = true;
7eada909
MP
3738 } else {
3739 r = -EINVAL;
3740 ti->error = "Invalid argument";
3741 goto bad;
3742 }
3743 }
3744
356d9d52
MP
3745 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3746 if (!ic->meta_dev)
3747 ic->meta_device_sectors = ic->data_device_sectors;
3748 else
3749 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3750
3751 if (!journal_sectors) {
3752 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
05d6909e 3753 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
356d9d52
MP
3754 }
3755
3756 if (!buffer_sectors)
3757 buffer_sectors = 1;
3758 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3759
7eada909
MP
3760 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3761 "Invalid internal hash", "Error setting internal hash key");
3762 if (r)
3763 goto bad;
3764
3765 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3766 "Invalid journal mac", "Error setting journal mac key");
3767 if (r)
3768 goto bad;
3769
3770 if (!ic->tag_size) {
3771 if (!ic->internal_hash) {
3772 ti->error = "Unknown tag size";
3773 r = -EINVAL;
3774 goto bad;
3775 }
3776 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3777 }
3778 if (ic->tag_size > MAX_TAG_SIZE) {
3779 ti->error = "Too big tag size";
3780 r = -EINVAL;
3781 goto bad;
3782 }
3783 if (!(ic->tag_size & (ic->tag_size - 1)))
3784 ic->log2_tag_size = __ffs(ic->tag_size);
3785 else
3786 ic->log2_tag_size = -1;
3787
468dfca3
MP
3788 if (ic->mode == 'B' && !ic->internal_hash) {
3789 r = -EINVAL;
3790 ti->error = "Bitmap mode can be only used with internal hash";
3791 goto bad;
3792 }
3793
7eada909
MP
3794 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3795 ic->autocommit_msec = sync_msec;
8376d3c1 3796 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
7eada909
MP
3797
3798 ic->io = dm_io_client_create();
3799 if (IS_ERR(ic->io)) {
3800 r = PTR_ERR(ic->io);
3801 ic->io = NULL;
3802 ti->error = "Cannot allocate dm io";
3803 goto bad;
3804 }
3805
6f1c819c
KO
3806 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3807 if (r) {
7eada909
MP
3808 ti->error = "Cannot allocate mempool";
3809 goto bad;
3810 }
3811
3812 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3813 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3814 if (!ic->metadata_wq) {
3815 ti->error = "Cannot allocate workqueue";
3816 r = -ENOMEM;
3817 goto bad;
3818 }
3819
3820 /*
3821 * If this workqueue were percpu, it would cause bio reordering
3822 * and reduced performance.
3823 */
3824 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3825 if (!ic->wait_wq) {
3826 ti->error = "Cannot allocate workqueue";
3827 r = -ENOMEM;
3828 goto bad;
3829 }
3830
53770f0e
MP
3831 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
3832 METADATA_WORKQUEUE_MAX_ACTIVE);
3833 if (!ic->offload_wq) {
3834 ti->error = "Cannot allocate workqueue";
3835 r = -ENOMEM;
3836 goto bad;
3837 }
3838
7eada909
MP
3839 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3840 if (!ic->commit_wq) {
3841 ti->error = "Cannot allocate workqueue";
3842 r = -ENOMEM;
3843 goto bad;
3844 }
3845 INIT_WORK(&ic->commit_work, integrity_commit);
3846
468dfca3 3847 if (ic->mode == 'J' || ic->mode == 'B') {
7eada909
MP
3848 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3849 if (!ic->writer_wq) {
3850 ti->error = "Cannot allocate workqueue";
3851 r = -ENOMEM;
3852 goto bad;
3853 }
3854 INIT_WORK(&ic->writer_work, integrity_writer);
3855 }
3856
3857 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3858 if (!ic->sb) {
3859 r = -ENOMEM;
3860 ti->error = "Cannot allocate superblock area";
3861 goto bad;
3862 }
3863
3864 r = sync_rw_sb(ic, REQ_OP_READ, 0);
3865 if (r) {
3866 ti->error = "Error reading superblock";
3867 goto bad;
3868 }
c2bcb2b7
MP
3869 should_write_sb = false;
3870 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3871 if (ic->mode != 'R') {
56b67a4f
MP
3872 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3873 r = -EINVAL;
3874 ti->error = "The device is not initialized";
3875 goto bad;
7eada909
MP
3876 }
3877 }
3878
3879 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3880 if (r) {
3881 ti->error = "Could not initialize superblock";
3882 goto bad;
3883 }
c2bcb2b7
MP
3884 if (ic->mode != 'R')
3885 should_write_sb = true;
7eada909
MP
3886 }
3887
d537858a 3888 if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
7eada909
MP
3889 r = -EINVAL;
3890 ti->error = "Unknown version";
3891 goto bad;
3892 }
3893 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3894 r = -EINVAL;
9d609f85
MP
3895 ti->error = "Tag size doesn't match the information in superblock";
3896 goto bad;
3897 }
3898 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3899 r = -EINVAL;
3900 ti->error = "Block size doesn't match the information in superblock";
7eada909
MP
3901 goto bad;
3902 }
bc86a41e
MP
3903 if (!le32_to_cpu(ic->sb->journal_sections)) {
3904 r = -EINVAL;
3905 ti->error = "Corrupted superblock, journal_sections is 0";
3906 goto bad;
3907 }
7eada909 3908 /* make sure that ti->max_io_len doesn't overflow */
356d9d52
MP
3909 if (!ic->meta_dev) {
3910 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3911 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3912 r = -EINVAL;
3913 ti->error = "Invalid interleave_sectors in the superblock";
3914 goto bad;
3915 }
3916 } else {
3917 if (ic->sb->log2_interleave_sectors) {
3918 r = -EINVAL;
3919 ti->error = "Invalid interleave_sectors in the superblock";
3920 goto bad;
3921 }
7eada909
MP
3922 }
3923 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3924 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3925 /* test for overflow */
3926 r = -EINVAL;
3927 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3928 goto bad;
3929 }
3930 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3931 r = -EINVAL;
3932 ti->error = "Journal mac mismatch";
3933 goto bad;
3934 }
356d9d52
MP
3935
3936try_smaller_buffer:
7eada909
MP
3937 r = calculate_device_limits(ic);
3938 if (r) {
356d9d52
MP
3939 if (ic->meta_dev) {
3940 if (ic->log2_buffer_sectors > 3) {
3941 ic->log2_buffer_sectors--;
3942 goto try_smaller_buffer;
3943 }
3944 }
7eada909
MP
3945 ti->error = "The device is too small";
3946 goto bad;
3947 }
468dfca3
MP
3948
3949 if (log2_sectors_per_bitmap_bit < 0)
3950 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
3951 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
3952 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
3953
3954 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
3955 if (bits_in_journal > UINT_MAX)
3956 bits_in_journal = UINT_MAX;
3957 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
3958 log2_sectors_per_bitmap_bit++;
3959
3960 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
3961 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
3962 if (should_write_sb) {
3963 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
3964 }
3965 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
3966 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
3967 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
3968
356d9d52
MP
3969 if (!ic->meta_dev)
3970 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
3971
2ad50606
OM
3972 if (ti->len > ic->provided_data_sectors) {
3973 r = -EINVAL;
3974 ti->error = "Not enough provided sectors for requested mapping size";
3975 goto bad;
3976 }
7eada909 3977
7eada909
MP
3978
3979 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3980 threshold += 50;
3981 do_div(threshold, 100);
3982 ic->free_sectors_threshold = threshold;
3983
3984 DEBUG_print("initialized:\n");
3985 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3986 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3987 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3988 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3989 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3990 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3991 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3992 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
468dfca3 3993 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
7eada909
MP
3994 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3995 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3996 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3997 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3998 (unsigned long long)ic->provided_data_sectors);
3999 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
468dfca3 4000 DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
7eada909 4001
468dfca3 4002 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
a3fcf725
MP
4003 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4004 ic->sb->recalc_sector = cpu_to_le64(0);
4005 }
4006
468dfca3 4007 if (ic->internal_hash) {
e8c2566f 4008 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
a3fcf725
MP
4009 if (!ic->recalc_wq ) {
4010 ti->error = "Cannot allocate workqueue";
4011 r = -ENOMEM;
4012 goto bad;
4013 }
4014 INIT_WORK(&ic->recalc_work, integrity_recalc);
4015 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4016 if (!ic->recalc_buffer) {
4017 ti->error = "Cannot allocate buffer for recalculating";
4018 r = -ENOMEM;
4019 goto bad;
4020 }
329e0989
KC
4021 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4022 ic->tag_size, GFP_KERNEL);
a3fcf725
MP
4023 if (!ic->recalc_tags) {
4024 ti->error = "Cannot allocate tags for recalculating";
4025 r = -ENOMEM;
4026 goto bad;
4027 }
4028 }
4029
356d9d52
MP
4030 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4031 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
7eada909
MP
4032 if (IS_ERR(ic->bufio)) {
4033 r = PTR_ERR(ic->bufio);
4034 ti->error = "Cannot initialize dm-bufio";
4035 ic->bufio = NULL;
4036 goto bad;
4037 }
4038 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4039
c2bcb2b7
MP
4040 if (ic->mode != 'R') {
4041 r = create_journal(ic, &ti->error);
4042 if (r)
4043 goto bad;
468dfca3
MP
4044
4045 }
4046
4047 if (ic->mode == 'B') {
4048 unsigned i;
4049 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4050
4051 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4052 if (!ic->recalc_bitmap) {
4053 r = -ENOMEM;
4054 goto bad;
4055 }
4056 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4057 if (!ic->may_write_bitmap) {
4058 r = -ENOMEM;
4059 goto bad;
4060 }
4061 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4062 if (!ic->bbs) {
4063 r = -ENOMEM;
4064 goto bad;
4065 }
4066 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4067 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4068 struct bitmap_block_status *bbs = &ic->bbs[i];
4069 unsigned sector, pl_index, pl_offset;
4070
4071 INIT_WORK(&bbs->work, bitmap_block_work);
4072 bbs->ic = ic;
4073 bbs->idx = i;
4074 bio_list_init(&bbs->bio_queue);
4075 spin_lock_init(&bbs->bio_queue_lock);
4076
4077 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4078 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4079 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4080
4081 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4082 }
c2bcb2b7 4083 }
7eada909
MP
4084
4085 if (should_write_sb) {
4086 int r;
4087
4088 init_journal(ic, 0, ic->journal_sections, 0);
4089 r = dm_integrity_failed(ic);
4090 if (unlikely(r)) {
4091 ti->error = "Error initializing journal";
4092 goto bad;
4093 }
4094 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4095 if (r) {
4096 ti->error = "Error initializing superblock";
4097 goto bad;
4098 }
4099 ic->just_formatted = true;
4100 }
4101
356d9d52
MP
4102 if (!ic->meta_dev) {
4103 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4104 if (r)
4105 goto bad;
4106 }
468dfca3
MP
4107 if (ic->mode == 'B') {
4108 unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4109 if (!max_io_len)
4110 max_io_len = 1U << 31;
4111 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4112 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4113 r = dm_set_target_max_io_len(ti, max_io_len);
4114 if (r)
4115 goto bad;
4116 }
4117 }
7eada909
MP
4118
4119 if (!ic->internal_hash)
4120 dm_integrity_set(ti, ic);
4121
4122 ti->num_flush_bios = 1;
4123 ti->flush_supported = true;
4124
4125 return 0;
468dfca3 4126
7eada909
MP
4127bad:
4128 dm_integrity_dtr(ti);
4129 return r;
4130}
4131
4132static void dm_integrity_dtr(struct dm_target *ti)
4133{
4134 struct dm_integrity_c *ic = ti->private;
4135
4136 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
724376a0 4137 BUG_ON(!list_empty(&ic->wait_list));
7eada909
MP
4138
4139 if (ic->metadata_wq)
4140 destroy_workqueue(ic->metadata_wq);
4141 if (ic->wait_wq)
4142 destroy_workqueue(ic->wait_wq);
53770f0e
MP
4143 if (ic->offload_wq)
4144 destroy_workqueue(ic->offload_wq);
7eada909
MP
4145 if (ic->commit_wq)
4146 destroy_workqueue(ic->commit_wq);
4147 if (ic->writer_wq)
4148 destroy_workqueue(ic->writer_wq);
a3fcf725
MP
4149 if (ic->recalc_wq)
4150 destroy_workqueue(ic->recalc_wq);
97abfde1
MP
4151 vfree(ic->recalc_buffer);
4152 kvfree(ic->recalc_tags);
468dfca3 4153 kvfree(ic->bbs);
7eada909
MP
4154 if (ic->bufio)
4155 dm_bufio_client_destroy(ic->bufio);
6f1c819c 4156 mempool_exit(&ic->journal_io_mempool);
7eada909
MP
4157 if (ic->io)
4158 dm_io_client_destroy(ic->io);
4159 if (ic->dev)
4160 dm_put_device(ti, ic->dev);
356d9d52
MP
4161 if (ic->meta_dev)
4162 dm_put_device(ti, ic->meta_dev);
d5027e03
MP
4163 dm_integrity_free_page_list(ic->journal);
4164 dm_integrity_free_page_list(ic->journal_io);
4165 dm_integrity_free_page_list(ic->journal_xor);
468dfca3
MP
4166 dm_integrity_free_page_list(ic->recalc_bitmap);
4167 dm_integrity_free_page_list(ic->may_write_bitmap);
7eada909
MP
4168 if (ic->journal_scatterlist)
4169 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4170 if (ic->journal_io_scatterlist)
4171 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4172 if (ic->sk_requests) {
4173 unsigned i;
4174
4175 for (i = 0; i < ic->journal_sections; i++) {
4176 struct skcipher_request *req = ic->sk_requests[i];
4177 if (req) {
4178 kzfree(req->iv);
4179 skcipher_request_free(req);
4180 }
4181 }
4182 kvfree(ic->sk_requests);
4183 }
4184 kvfree(ic->journal_tree);
4185 if (ic->sb)
4186 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4187
4188 if (ic->internal_hash)
4189 crypto_free_shash(ic->internal_hash);
4190 free_alg(&ic->internal_hash_alg);
4191
4192 if (ic->journal_crypt)
4193 crypto_free_skcipher(ic->journal_crypt);
4194 free_alg(&ic->journal_crypt_alg);
4195
4196 if (ic->journal_mac)
4197 crypto_free_shash(ic->journal_mac);
4198 free_alg(&ic->journal_mac_alg);
4199
4200 kfree(ic);
4201}
4202
4203static struct target_type integrity_target = {
4204 .name = "integrity",
636be424 4205 .version = {1, 5, 0},
7eada909
MP
4206 .module = THIS_MODULE,
4207 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4208 .ctr = dm_integrity_ctr,
4209 .dtr = dm_integrity_dtr,
4210 .map = dm_integrity_map,
4211 .postsuspend = dm_integrity_postsuspend,
4212 .resume = dm_integrity_resume,
4213 .status = dm_integrity_status,
4214 .iterate_devices = dm_integrity_iterate_devices,
9d609f85 4215 .io_hints = dm_integrity_io_hints,
7eada909
MP
4216};
4217
5efedc9b 4218static int __init dm_integrity_init(void)
7eada909
MP
4219{
4220 int r;
4221
4222 journal_io_cache = kmem_cache_create("integrity_journal_io",
4223 sizeof(struct journal_io), 0, 0, NULL);
4224 if (!journal_io_cache) {
4225 DMERR("can't allocate journal io cache");
4226 return -ENOMEM;
4227 }
4228
4229 r = dm_register_target(&integrity_target);
4230
4231 if (r < 0)
4232 DMERR("register failed %d", r);
4233
4234 return r;
4235}
4236
5efedc9b 4237static void __exit dm_integrity_exit(void)
7eada909
MP
4238{
4239 dm_unregister_target(&integrity_target);
4240 kmem_cache_destroy(journal_io_cache);
4241}
4242
4243module_init(dm_integrity_init);
4244module_exit(dm_integrity_exit);
4245
4246MODULE_AUTHOR("Milan Broz");
4247MODULE_AUTHOR("Mikulas Patocka");
4248MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4249MODULE_LICENSE("GPL");