]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/md/dm-integrity.c
dm crypt: replace custom implementation of hex2bin()
[mirror_ubuntu-focal-kernel.git] / drivers / md / dm-integrity.c
CommitLineData
7eada909
MP
1/*
2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/module.h>
10#include <linux/device-mapper.h>
11#include <linux/dm-io.h>
12#include <linux/vmalloc.h>
13#include <linux/sort.h>
14#include <linux/rbtree.h>
15#include <linux/delay.h>
16#include <linux/random.h>
9d609f85 17#include <linux/log2.h>
7eada909
MP
18#include <crypto/hash.h>
19#include <crypto/skcipher.h>
20#include <linux/async_tx.h>
21#include "dm-bufio.h"
22
23#define DM_MSG_PREFIX "integrity"
24
25#define DEFAULT_INTERLEAVE_SECTORS 32768
26#define DEFAULT_JOURNAL_SIZE_FACTOR 7
27#define DEFAULT_BUFFER_SECTORS 128
28#define DEFAULT_JOURNAL_WATERMARK 50
29#define DEFAULT_SYNC_MSEC 10000
30#define DEFAULT_MAX_JOURNAL_SECTORS 131072
56b67a4f
MP
31#define MIN_LOG2_INTERLEAVE_SECTORS 3
32#define MAX_LOG2_INTERLEAVE_SECTORS 31
7eada909
MP
33#define METADATA_WORKQUEUE_MAX_ACTIVE 16
34
35/*
36 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
37 * so it should not be enabled in the official kernel
38 */
39//#define DEBUG_PRINT
40//#define INTERNAL_VERIFY
41
42/*
43 * On disk structures
44 */
45
46#define SB_MAGIC "integrt"
47#define SB_VERSION 1
48#define SB_SECTORS 8
9d609f85 49#define MAX_SECTORS_PER_BLOCK 8
7eada909
MP
50
51struct superblock {
52 __u8 magic[8];
53 __u8 version;
54 __u8 log2_interleave_sectors;
55 __u16 integrity_tag_size;
56 __u32 journal_sections;
57 __u64 provided_data_sectors; /* userspace uses this value */
58 __u32 flags;
9d609f85 59 __u8 log2_sectors_per_block;
7eada909
MP
60};
61
62#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
63
64#define JOURNAL_ENTRY_ROUNDUP 8
65
66typedef __u64 commit_id_t;
67#define JOURNAL_MAC_PER_SECTOR 8
68
69struct journal_entry {
70 union {
71 struct {
72 __u32 sector_lo;
73 __u32 sector_hi;
74 } s;
75 __u64 sector;
76 } u;
9d609f85
MP
77 commit_id_t last_bytes[0];
78 /* __u8 tag[0]; */
7eada909
MP
79};
80
9d609f85
MP
81#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
82
7eada909
MP
83#if BITS_PER_LONG == 64
84#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
85#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
86#elif defined(CONFIG_LBDAF)
87#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
88#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
89#else
90#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
91#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
92#endif
93#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
94#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
95#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
96#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
97
98#define JOURNAL_BLOCK_SECTORS 8
99#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
100#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
101
102struct journal_sector {
103 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
104 __u8 mac[JOURNAL_MAC_PER_SECTOR];
105 commit_id_t commit_id;
106};
107
9d609f85 108#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
7eada909
MP
109
110#define METADATA_PADDING_SECTORS 8
111
112#define N_COMMIT_IDS 4
113
114static unsigned char prev_commit_seq(unsigned char seq)
115{
116 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
117}
118
119static unsigned char next_commit_seq(unsigned char seq)
120{
121 return (seq + 1) % N_COMMIT_IDS;
122}
123
124/*
125 * In-memory structures
126 */
127
128struct journal_node {
129 struct rb_node node;
130 sector_t sector;
131};
132
133struct alg_spec {
134 char *alg_string;
135 char *key_string;
136 __u8 *key;
137 unsigned key_size;
138};
139
140struct dm_integrity_c {
141 struct dm_dev *dev;
142 unsigned tag_size;
143 __s8 log2_tag_size;
144 sector_t start;
145 mempool_t *journal_io_mempool;
146 struct dm_io_client *io;
147 struct dm_bufio_client *bufio;
148 struct workqueue_struct *metadata_wq;
149 struct superblock *sb;
150 unsigned journal_pages;
151 struct page_list *journal;
152 struct page_list *journal_io;
153 struct page_list *journal_xor;
154
155 struct crypto_skcipher *journal_crypt;
156 struct scatterlist **journal_scatterlist;
157 struct scatterlist **journal_io_scatterlist;
158 struct skcipher_request **sk_requests;
159
160 struct crypto_shash *journal_mac;
161
162 struct journal_node *journal_tree;
163 struct rb_root journal_tree_root;
164
165 sector_t provided_data_sectors;
166
167 unsigned short journal_entry_size;
168 unsigned char journal_entries_per_sector;
169 unsigned char journal_section_entries;
9d609f85 170 unsigned short journal_section_sectors;
7eada909
MP
171 unsigned journal_sections;
172 unsigned journal_entries;
173 sector_t device_sectors;
174 unsigned initial_sectors;
175 unsigned metadata_run;
176 __s8 log2_metadata_run;
177 __u8 log2_buffer_sectors;
9d609f85 178 __u8 sectors_per_block;
7eada909
MP
179
180 unsigned char mode;
181 bool suspending;
182
183 int failed;
184
185 struct crypto_shash *internal_hash;
186
187 /* these variables are locked with endio_wait.lock */
188 struct rb_root in_progress;
189 wait_queue_head_t endio_wait;
190 struct workqueue_struct *wait_wq;
191
192 unsigned char commit_seq;
193 commit_id_t commit_ids[N_COMMIT_IDS];
194
195 unsigned committed_section;
196 unsigned n_committed_sections;
197
198 unsigned uncommitted_section;
199 unsigned n_uncommitted_sections;
200
201 unsigned free_section;
202 unsigned char free_section_entry;
203 unsigned free_sectors;
204
205 unsigned free_sectors_threshold;
206
207 struct workqueue_struct *commit_wq;
208 struct work_struct commit_work;
209
210 struct workqueue_struct *writer_wq;
211 struct work_struct writer_work;
212
213 struct bio_list flush_bio_list;
214
215 unsigned long autocommit_jiffies;
216 struct timer_list autocommit_timer;
217 unsigned autocommit_msec;
218
219 wait_queue_head_t copy_to_journal_wait;
220
221 struct completion crypto_backoff;
222
223 bool journal_uptodate;
224 bool just_formatted;
225
226 struct alg_spec internal_hash_alg;
227 struct alg_spec journal_crypt_alg;
228 struct alg_spec journal_mac_alg;
229};
230
231struct dm_integrity_range {
232 sector_t logical_sector;
233 unsigned n_sectors;
234 struct rb_node node;
235};
236
237struct dm_integrity_io {
238 struct work_struct work;
239
240 struct dm_integrity_c *ic;
241 bool write;
242 bool fua;
243
244 struct dm_integrity_range range;
245
246 sector_t metadata_block;
247 unsigned metadata_offset;
248
249 atomic_t in_flight;
250 int bi_error;
251
252 struct completion *completion;
253
254 struct block_device *orig_bi_bdev;
255 bio_end_io_t *orig_bi_end_io;
256 struct bio_integrity_payload *orig_bi_integrity;
257 struct bvec_iter orig_bi_iter;
258};
259
260struct journal_completion {
261 struct dm_integrity_c *ic;
262 atomic_t in_flight;
263 struct completion comp;
264};
265
266struct journal_io {
267 struct dm_integrity_range range;
268 struct journal_completion *comp;
269};
270
271static struct kmem_cache *journal_io_cache;
272
273#define JOURNAL_IO_MEMPOOL 32
274
275#ifdef DEBUG_PRINT
276#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
277static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
278{
279 va_list args;
280 va_start(args, msg);
281 vprintk(msg, args);
282 va_end(args);
283 if (len)
284 pr_cont(":");
285 while (len) {
286 pr_cont(" %02x", *bytes);
287 bytes++;
288 len--;
289 }
290 pr_cont("\n");
291}
292#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
293#else
294#define DEBUG_print(x, ...) do { } while (0)
295#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
296#endif
297
298/*
299 * DM Integrity profile, protection is performed layer above (dm-crypt)
300 */
301static struct blk_integrity_profile dm_integrity_profile = {
302 .name = "DM-DIF-EXT-TAG",
303 .generate_fn = NULL,
304 .verify_fn = NULL,
305};
306
307static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
308static void integrity_bio_wait(struct work_struct *w);
309static void dm_integrity_dtr(struct dm_target *ti);
310
311static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
312{
313 if (!cmpxchg(&ic->failed, 0, err))
314 DMERR("Error on %s: %d", msg, err);
315}
316
317static int dm_integrity_failed(struct dm_integrity_c *ic)
318{
319 return ACCESS_ONCE(ic->failed);
320}
321
322static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
323 unsigned j, unsigned char seq)
324{
325 /*
326 * Xor the number with section and sector, so that if a piece of
327 * journal is written at wrong place, it is detected.
328 */
329 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
330}
331
332static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
333 sector_t *area, sector_t *offset)
334{
335 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
336
337 *area = data_sector >> log2_interleave_sectors;
338 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
339}
340
9d609f85
MP
341#define sector_to_block(ic, n) \
342do { \
343 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
344 (n) >>= (ic)->sb->log2_sectors_per_block; \
345} while (0)
346
7eada909
MP
347static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
348 sector_t offset, unsigned *metadata_offset)
349{
350 __u64 ms;
351 unsigned mo;
352
353 ms = area << ic->sb->log2_interleave_sectors;
354 if (likely(ic->log2_metadata_run >= 0))
355 ms += area << ic->log2_metadata_run;
356 else
357 ms += area * ic->metadata_run;
358 ms >>= ic->log2_buffer_sectors;
359
9d609f85
MP
360 sector_to_block(ic, offset);
361
7eada909
MP
362 if (likely(ic->log2_tag_size >= 0)) {
363 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
364 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
365 } else {
366 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
367 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
368 }
369 *metadata_offset = mo;
370 return ms;
371}
372
373static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
374{
375 sector_t result;
376
377 result = area << ic->sb->log2_interleave_sectors;
378 if (likely(ic->log2_metadata_run >= 0))
379 result += (area + 1) << ic->log2_metadata_run;
380 else
381 result += (area + 1) * ic->metadata_run;
382
383 result += (sector_t)ic->initial_sectors + offset;
384 return result;
385}
386
387static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
388{
389 if (unlikely(*sec_ptr >= ic->journal_sections))
390 *sec_ptr -= ic->journal_sections;
391}
392
393static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
394{
395 struct dm_io_request io_req;
396 struct dm_io_region io_loc;
397
398 io_req.bi_op = op;
399 io_req.bi_op_flags = op_flags;
400 io_req.mem.type = DM_IO_KMEM;
401 io_req.mem.ptr.addr = ic->sb;
402 io_req.notify.fn = NULL;
403 io_req.client = ic->io;
404 io_loc.bdev = ic->dev->bdev;
405 io_loc.sector = ic->start;
406 io_loc.count = SB_SECTORS;
407
408 return dm_io(&io_req, 1, &io_loc, NULL);
409}
410
411static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
412 bool e, const char *function)
413{
414#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
415 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
416
417 if (unlikely(section >= ic->journal_sections) ||
418 unlikely(offset >= limit)) {
419 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
420 function, section, offset, ic->journal_sections, limit);
421 BUG();
422 }
423#endif
424}
425
426static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
427 unsigned *pl_index, unsigned *pl_offset)
428{
429 unsigned sector;
430
56b67a4f 431 access_journal_check(ic, section, offset, false, "page_list_location");
7eada909
MP
432
433 sector = section * ic->journal_section_sectors + offset;
434
435 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
436 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
437}
438
439static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
440 unsigned section, unsigned offset, unsigned *n_sectors)
441{
442 unsigned pl_index, pl_offset;
443 char *va;
444
445 page_list_location(ic, section, offset, &pl_index, &pl_offset);
446
447 if (n_sectors)
448 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
449
450 va = lowmem_page_address(pl[pl_index].page);
451
452 return (struct journal_sector *)(va + pl_offset);
453}
454
455static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
456{
457 return access_page_list(ic, ic->journal, section, offset, NULL);
458}
459
460static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
461{
462 unsigned rel_sector, offset;
463 struct journal_sector *js;
464
465 access_journal_check(ic, section, n, true, "access_journal_entry");
466
467 rel_sector = n % JOURNAL_BLOCK_SECTORS;
468 offset = n / JOURNAL_BLOCK_SECTORS;
469
470 js = access_journal(ic, section, rel_sector);
471 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
472}
473
474static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
475{
9d609f85 476 n <<= ic->sb->log2_sectors_per_block;
7eada909 477
9d609f85
MP
478 n += JOURNAL_BLOCK_SECTORS;
479
480 access_journal_check(ic, section, n, false, "access_journal_data");
481
482 return access_journal(ic, section, n);
7eada909
MP
483}
484
485static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
486{
487 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
488 int r;
489 unsigned j, size;
490
491 desc->tfm = ic->journal_mac;
492 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
493
494 r = crypto_shash_init(desc);
495 if (unlikely(r)) {
496 dm_integrity_io_error(ic, "crypto_shash_init", r);
497 goto err;
498 }
499
500 for (j = 0; j < ic->journal_section_entries; j++) {
501 struct journal_entry *je = access_journal_entry(ic, section, j);
502 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
503 if (unlikely(r)) {
504 dm_integrity_io_error(ic, "crypto_shash_update", r);
505 goto err;
506 }
507 }
508
509 size = crypto_shash_digestsize(ic->journal_mac);
510
511 if (likely(size <= JOURNAL_MAC_SIZE)) {
512 r = crypto_shash_final(desc, result);
513 if (unlikely(r)) {
514 dm_integrity_io_error(ic, "crypto_shash_final", r);
515 goto err;
516 }
517 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
518 } else {
519 __u8 digest[size];
520 r = crypto_shash_final(desc, digest);
521 if (unlikely(r)) {
522 dm_integrity_io_error(ic, "crypto_shash_final", r);
523 goto err;
524 }
525 memcpy(result, digest, JOURNAL_MAC_SIZE);
526 }
527
528 return;
529err:
530 memset(result, 0, JOURNAL_MAC_SIZE);
531}
532
533static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
534{
535 __u8 result[JOURNAL_MAC_SIZE];
536 unsigned j;
537
538 if (!ic->journal_mac)
539 return;
540
541 section_mac(ic, section, result);
542
543 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
544 struct journal_sector *js = access_journal(ic, section, j);
545
546 if (likely(wr))
547 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
548 else {
549 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
550 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
551 }
552 }
553}
554
555static void complete_journal_op(void *context)
556{
557 struct journal_completion *comp = context;
558 BUG_ON(!atomic_read(&comp->in_flight));
559 if (likely(atomic_dec_and_test(&comp->in_flight)))
560 complete(&comp->comp);
561}
562
563static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
564 unsigned n_sections, struct journal_completion *comp)
565{
566 struct async_submit_ctl submit;
567 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
568 unsigned pl_index, pl_offset, section_index;
569 struct page_list *source_pl, *target_pl;
570
571 if (likely(encrypt)) {
572 source_pl = ic->journal;
573 target_pl = ic->journal_io;
574 } else {
575 source_pl = ic->journal_io;
576 target_pl = ic->journal;
577 }
578
579 page_list_location(ic, section, 0, &pl_index, &pl_offset);
580
581 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
582
583 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
584
585 section_index = pl_index;
586
587 do {
588 size_t this_step;
589 struct page *src_pages[2];
590 struct page *dst_page;
591
592 while (unlikely(pl_index == section_index)) {
593 unsigned dummy;
594 if (likely(encrypt))
595 rw_section_mac(ic, section, true);
596 section++;
597 n_sections--;
598 if (!n_sections)
599 break;
600 page_list_location(ic, section, 0, &section_index, &dummy);
601 }
602
603 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
604 dst_page = target_pl[pl_index].page;
605 src_pages[0] = source_pl[pl_index].page;
606 src_pages[1] = ic->journal_xor[pl_index].page;
607
608 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
609
610 pl_index++;
611 pl_offset = 0;
612 n_bytes -= this_step;
613 } while (n_bytes);
614
615 BUG_ON(n_sections);
616
617 async_tx_issue_pending_all();
618}
619
620static void complete_journal_encrypt(struct crypto_async_request *req, int err)
621{
622 struct journal_completion *comp = req->data;
623 if (unlikely(err)) {
624 if (likely(err == -EINPROGRESS)) {
625 complete(&comp->ic->crypto_backoff);
626 return;
627 }
628 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
629 }
630 complete_journal_op(comp);
631}
632
633static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
634{
635 int r;
636 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
637 complete_journal_encrypt, comp);
638 if (likely(encrypt))
639 r = crypto_skcipher_encrypt(req);
640 else
641 r = crypto_skcipher_decrypt(req);
642 if (likely(!r))
643 return false;
644 if (likely(r == -EINPROGRESS))
645 return true;
646 if (likely(r == -EBUSY)) {
647 wait_for_completion(&comp->ic->crypto_backoff);
648 reinit_completion(&comp->ic->crypto_backoff);
649 return true;
650 }
651 dm_integrity_io_error(comp->ic, "encrypt", r);
652 return false;
653}
654
655static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
656 unsigned n_sections, struct journal_completion *comp)
657{
658 struct scatterlist **source_sg;
659 struct scatterlist **target_sg;
660
661 atomic_add(2, &comp->in_flight);
662
663 if (likely(encrypt)) {
664 source_sg = ic->journal_scatterlist;
665 target_sg = ic->journal_io_scatterlist;
666 } else {
667 source_sg = ic->journal_io_scatterlist;
668 target_sg = ic->journal_scatterlist;
669 }
670
671 do {
672 struct skcipher_request *req;
673 unsigned ivsize;
674 char *iv;
675
676 if (likely(encrypt))
677 rw_section_mac(ic, section, true);
678
679 req = ic->sk_requests[section];
680 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
681 iv = req->iv;
682
683 memcpy(iv, iv + ivsize, ivsize);
684
685 req->src = source_sg[section];
686 req->dst = target_sg[section];
687
688 if (unlikely(do_crypt(encrypt, req, comp)))
689 atomic_inc(&comp->in_flight);
690
691 section++;
692 n_sections--;
693 } while (n_sections);
694
695 atomic_dec(&comp->in_flight);
696 complete_journal_op(comp);
697}
698
699static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
700 unsigned n_sections, struct journal_completion *comp)
701{
702 if (ic->journal_xor)
703 return xor_journal(ic, encrypt, section, n_sections, comp);
704 else
705 return crypt_journal(ic, encrypt, section, n_sections, comp);
706}
707
708static void complete_journal_io(unsigned long error, void *context)
709{
710 struct journal_completion *comp = context;
711 if (unlikely(error != 0))
712 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
713 complete_journal_op(comp);
714}
715
716static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
717 unsigned n_sections, struct journal_completion *comp)
718{
719 struct dm_io_request io_req;
720 struct dm_io_region io_loc;
721 unsigned sector, n_sectors, pl_index, pl_offset;
722 int r;
723
724 if (unlikely(dm_integrity_failed(ic))) {
725 if (comp)
726 complete_journal_io(-1UL, comp);
727 return;
728 }
729
730 sector = section * ic->journal_section_sectors;
731 n_sectors = n_sections * ic->journal_section_sectors;
732
733 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
734 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
735
736 io_req.bi_op = op;
737 io_req.bi_op_flags = op_flags;
738 io_req.mem.type = DM_IO_PAGE_LIST;
739 if (ic->journal_io)
740 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
741 else
742 io_req.mem.ptr.pl = &ic->journal[pl_index];
743 io_req.mem.offset = pl_offset;
744 if (likely(comp != NULL)) {
745 io_req.notify.fn = complete_journal_io;
746 io_req.notify.context = comp;
747 } else {
748 io_req.notify.fn = NULL;
749 }
750 io_req.client = ic->io;
751 io_loc.bdev = ic->dev->bdev;
752 io_loc.sector = ic->start + SB_SECTORS + sector;
753 io_loc.count = n_sectors;
754
755 r = dm_io(&io_req, 1, &io_loc, NULL);
756 if (unlikely(r)) {
757 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
758 if (comp) {
759 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
760 complete_journal_io(-1UL, comp);
761 }
762 }
763}
764
765static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
766{
767 struct journal_completion io_comp;
768 struct journal_completion crypt_comp_1;
769 struct journal_completion crypt_comp_2;
770 unsigned i;
771
772 io_comp.ic = ic;
773 io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp);
774
775 if (commit_start + commit_sections <= ic->journal_sections) {
776 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
777 if (ic->journal_io) {
778 crypt_comp_1.ic = ic;
779 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
780 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
781 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
782 wait_for_completion_io(&crypt_comp_1.comp);
783 } else {
784 for (i = 0; i < commit_sections; i++)
785 rw_section_mac(ic, commit_start + i, true);
786 }
787 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
788 } else {
789 unsigned to_end;
790 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
791 to_end = ic->journal_sections - commit_start;
792 if (ic->journal_io) {
793 crypt_comp_1.ic = ic;
794 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
795 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
796 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
797 if (try_wait_for_completion(&crypt_comp_1.comp)) {
798 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
799 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
800 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
801 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
802 wait_for_completion_io(&crypt_comp_1.comp);
803 } else {
804 crypt_comp_2.ic = ic;
805 crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp);
806 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
807 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
808 wait_for_completion_io(&crypt_comp_1.comp);
809 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
810 wait_for_completion_io(&crypt_comp_2.comp);
811 }
812 } else {
813 for (i = 0; i < to_end; i++)
814 rw_section_mac(ic, commit_start + i, true);
815 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
816 for (i = 0; i < commit_sections - to_end; i++)
817 rw_section_mac(ic, i, true);
818 }
819 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
820 }
821
822 wait_for_completion_io(&io_comp.comp);
823}
824
825static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
826 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
827{
828 struct dm_io_request io_req;
829 struct dm_io_region io_loc;
830 int r;
831 unsigned sector, pl_index, pl_offset;
832
9d609f85
MP
833 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
834
7eada909
MP
835 if (unlikely(dm_integrity_failed(ic))) {
836 fn(-1UL, data);
837 return;
838 }
839
840 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
841
842 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
843 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
844
845 io_req.bi_op = REQ_OP_WRITE;
846 io_req.bi_op_flags = 0;
847 io_req.mem.type = DM_IO_PAGE_LIST;
848 io_req.mem.ptr.pl = &ic->journal[pl_index];
849 io_req.mem.offset = pl_offset;
850 io_req.notify.fn = fn;
851 io_req.notify.context = data;
852 io_req.client = ic->io;
853 io_loc.bdev = ic->dev->bdev;
854 io_loc.sector = ic->start + target;
855 io_loc.count = n_sectors;
856
857 r = dm_io(&io_req, 1, &io_loc, NULL);
858 if (unlikely(r)) {
859 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
860 fn(-1UL, data);
861 }
862}
863
864static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
865{
866 struct rb_node **n = &ic->in_progress.rb_node;
867 struct rb_node *parent;
868
9d609f85
MP
869 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
870
7eada909
MP
871 parent = NULL;
872
873 while (*n) {
874 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
875
876 parent = *n;
877 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
878 n = &range->node.rb_left;
879 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
880 n = &range->node.rb_right;
881 } else {
882 return false;
883 }
884 }
885
886 rb_link_node(&new_range->node, parent, n);
887 rb_insert_color(&new_range->node, &ic->in_progress);
888
889 return true;
890}
891
892static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
893{
894 rb_erase(&range->node, &ic->in_progress);
895 wake_up_locked(&ic->endio_wait);
896}
897
898static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
899{
900 unsigned long flags;
901
902 spin_lock_irqsave(&ic->endio_wait.lock, flags);
903 remove_range_unlocked(ic, range);
904 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
905}
906
907static void init_journal_node(struct journal_node *node)
908{
909 RB_CLEAR_NODE(&node->node);
910 node->sector = (sector_t)-1;
911}
912
913static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
914{
915 struct rb_node **link;
916 struct rb_node *parent;
917
918 node->sector = sector;
919 BUG_ON(!RB_EMPTY_NODE(&node->node));
920
921 link = &ic->journal_tree_root.rb_node;
922 parent = NULL;
923
924 while (*link) {
925 struct journal_node *j;
926 parent = *link;
927 j = container_of(parent, struct journal_node, node);
928 if (sector < j->sector)
929 link = &j->node.rb_left;
930 else
931 link = &j->node.rb_right;
932 }
933
934 rb_link_node(&node->node, parent, link);
935 rb_insert_color(&node->node, &ic->journal_tree_root);
936}
937
938static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
939{
940 BUG_ON(RB_EMPTY_NODE(&node->node));
941 rb_erase(&node->node, &ic->journal_tree_root);
942 init_journal_node(node);
943}
944
945#define NOT_FOUND (-1U)
946
947static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
948{
949 struct rb_node *n = ic->journal_tree_root.rb_node;
950 unsigned found = NOT_FOUND;
951 *next_sector = (sector_t)-1;
952 while (n) {
953 struct journal_node *j = container_of(n, struct journal_node, node);
954 if (sector == j->sector) {
955 found = j - ic->journal_tree;
956 }
957 if (sector < j->sector) {
958 *next_sector = j->sector;
959 n = j->node.rb_left;
960 } else {
961 n = j->node.rb_right;
962 }
963 }
964
965 return found;
966}
967
968static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
969{
970 struct journal_node *node, *next_node;
971 struct rb_node *next;
972
973 if (unlikely(pos >= ic->journal_entries))
974 return false;
975 node = &ic->journal_tree[pos];
976 if (unlikely(RB_EMPTY_NODE(&node->node)))
977 return false;
978 if (unlikely(node->sector != sector))
979 return false;
980
981 next = rb_next(&node->node);
982 if (unlikely(!next))
983 return true;
984
985 next_node = container_of(next, struct journal_node, node);
986 return next_node->sector != sector;
987}
988
989static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
990{
991 struct rb_node *next;
992 struct journal_node *next_node;
993 unsigned next_section;
994
995 BUG_ON(RB_EMPTY_NODE(&node->node));
996
997 next = rb_next(&node->node);
998 if (unlikely(!next))
999 return false;
1000
1001 next_node = container_of(next, struct journal_node, node);
1002
1003 if (next_node->sector != node->sector)
1004 return false;
1005
1006 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1007 if (next_section >= ic->committed_section &&
1008 next_section < ic->committed_section + ic->n_committed_sections)
1009 return true;
1010 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1011 return true;
1012
1013 return false;
1014}
1015
1016#define TAG_READ 0
1017#define TAG_WRITE 1
1018#define TAG_CMP 2
1019
1020static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1021 unsigned *metadata_offset, unsigned total_size, int op)
1022{
1023 do {
1024 unsigned char *data, *dp;
1025 struct dm_buffer *b;
1026 unsigned to_copy;
1027 int r;
1028
1029 r = dm_integrity_failed(ic);
1030 if (unlikely(r))
1031 return r;
1032
1033 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1034 if (unlikely(IS_ERR(data)))
1035 return PTR_ERR(data);
1036
1037 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1038 dp = data + *metadata_offset;
1039 if (op == TAG_READ) {
1040 memcpy(tag, dp, to_copy);
1041 } else if (op == TAG_WRITE) {
1042 memcpy(dp, tag, to_copy);
1043 dm_bufio_mark_buffer_dirty(b);
1044 } else {
1045 /* e.g.: op == TAG_CMP */
1046 if (unlikely(memcmp(dp, tag, to_copy))) {
1047 unsigned i;
1048
1049 for (i = 0; i < to_copy; i++) {
1050 if (dp[i] != tag[i])
1051 break;
1052 total_size--;
1053 }
1054 dm_bufio_release(b);
1055 return total_size;
1056 }
1057 }
1058 dm_bufio_release(b);
1059
1060 tag += to_copy;
1061 *metadata_offset += to_copy;
1062 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1063 (*metadata_block)++;
1064 *metadata_offset = 0;
1065 }
1066 total_size -= to_copy;
1067 } while (unlikely(total_size));
1068
1069 return 0;
1070}
1071
1072static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1073{
1074 int r;
1075 r = dm_bufio_write_dirty_buffers(ic->bufio);
1076 if (unlikely(r))
1077 dm_integrity_io_error(ic, "writing tags", r);
1078}
1079
1080static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 __add_wait_queue(&ic->endio_wait, &wait);
1084 __set_current_state(TASK_UNINTERRUPTIBLE);
1085 spin_unlock_irq(&ic->endio_wait.lock);
1086 io_schedule();
1087 spin_lock_irq(&ic->endio_wait.lock);
1088 __remove_wait_queue(&ic->endio_wait, &wait);
1089}
1090
1091static void autocommit_fn(unsigned long data)
1092{
1093 struct dm_integrity_c *ic = (struct dm_integrity_c *)data;
1094
1095 if (likely(!dm_integrity_failed(ic)))
1096 queue_work(ic->commit_wq, &ic->commit_work);
1097}
1098
1099static void schedule_autocommit(struct dm_integrity_c *ic)
1100{
1101 if (!timer_pending(&ic->autocommit_timer))
1102 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1103}
1104
1105static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1106{
1107 struct bio *bio;
1108 spin_lock_irq(&ic->endio_wait.lock);
1109 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1110 bio_list_add(&ic->flush_bio_list, bio);
1111 spin_unlock_irq(&ic->endio_wait.lock);
1112 queue_work(ic->commit_wq, &ic->commit_work);
1113}
1114
1115static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1116{
1117 int r = dm_integrity_failed(ic);
1118 if (unlikely(r) && !bio->bi_error)
1119 bio->bi_error = r;
1120 bio_endio(bio);
1121}
1122
1123static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1124{
1125 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1126
1127 if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic)))
1128 submit_flush_bio(ic, dio);
1129 else
1130 do_endio(ic, bio);
1131}
1132
1133static void dec_in_flight(struct dm_integrity_io *dio)
1134{
1135 if (atomic_dec_and_test(&dio->in_flight)) {
1136 struct dm_integrity_c *ic = dio->ic;
1137 struct bio *bio;
1138
1139 remove_range(ic, &dio->range);
1140
1141 if (unlikely(dio->write))
1142 schedule_autocommit(ic);
1143
1144 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1145
1146 if (unlikely(dio->bi_error) && !bio->bi_error)
1147 bio->bi_error = dio->bi_error;
1148 if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1149 dio->range.logical_sector += dio->range.n_sectors;
1150 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1151 INIT_WORK(&dio->work, integrity_bio_wait);
1152 queue_work(ic->wait_wq, &dio->work);
1153 return;
1154 }
1155 do_endio_flush(ic, dio);
1156 }
1157}
1158
1159static void integrity_end_io(struct bio *bio)
1160{
1161 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1162
1163 bio->bi_iter = dio->orig_bi_iter;
1164 bio->bi_bdev = dio->orig_bi_bdev;
1165 if (dio->orig_bi_integrity) {
1166 bio->bi_integrity = dio->orig_bi_integrity;
1167 bio->bi_opf |= REQ_INTEGRITY;
1168 }
1169 bio->bi_end_io = dio->orig_bi_end_io;
1170
1171 if (dio->completion)
1172 complete(dio->completion);
1173
1174 dec_in_flight(dio);
1175}
1176
1177static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1178 const char *data, char *result)
1179{
1180 __u64 sector_le = cpu_to_le64(sector);
1181 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1182 int r;
1183 unsigned digest_size;
1184
1185 req->tfm = ic->internal_hash;
1186 req->flags = 0;
1187
1188 r = crypto_shash_init(req);
1189 if (unlikely(r < 0)) {
1190 dm_integrity_io_error(ic, "crypto_shash_init", r);
1191 goto failed;
1192 }
1193
1194 r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1195 if (unlikely(r < 0)) {
1196 dm_integrity_io_error(ic, "crypto_shash_update", r);
1197 goto failed;
1198 }
1199
9d609f85 1200 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
1201 if (unlikely(r < 0)) {
1202 dm_integrity_io_error(ic, "crypto_shash_update", r);
1203 goto failed;
1204 }
1205
1206 r = crypto_shash_final(req, result);
1207 if (unlikely(r < 0)) {
1208 dm_integrity_io_error(ic, "crypto_shash_final", r);
1209 goto failed;
1210 }
1211
1212 digest_size = crypto_shash_digestsize(ic->internal_hash);
1213 if (unlikely(digest_size < ic->tag_size))
1214 memset(result + digest_size, 0, ic->tag_size - digest_size);
1215
1216 return;
1217
1218failed:
1219 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1220 get_random_bytes(result, ic->tag_size);
1221}
1222
1223static void integrity_metadata(struct work_struct *w)
1224{
1225 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1226 struct dm_integrity_c *ic = dio->ic;
1227
1228 int r;
1229
1230 if (ic->internal_hash) {
1231 struct bvec_iter iter;
1232 struct bio_vec bv;
1233 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1234 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1235 char *checksums;
56b67a4f 1236 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
7eada909
MP
1237 char checksums_onstack[ic->tag_size + extra_space];
1238 unsigned sectors_to_process = dio->range.n_sectors;
1239 sector_t sector = dio->range.logical_sector;
1240
c2bcb2b7
MP
1241 if (unlikely(ic->mode == 'R'))
1242 goto skip_io;
1243
9d609f85 1244 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
7eada909
MP
1245 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1246 if (!checksums)
1247 checksums = checksums_onstack;
1248
1249 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1250 unsigned pos;
1251 char *mem, *checksums_ptr;
1252
1253again:
1254 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1255 pos = 0;
1256 checksums_ptr = checksums;
1257 do {
1258 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1259 checksums_ptr += ic->tag_size;
9d609f85
MP
1260 sectors_to_process -= ic->sectors_per_block;
1261 pos += ic->sectors_per_block << SECTOR_SHIFT;
1262 sector += ic->sectors_per_block;
7eada909
MP
1263 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1264 kunmap_atomic(mem);
1265
1266 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1267 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1268 if (unlikely(r)) {
1269 if (r > 0) {
1270 DMERR("Checksum failed at sector 0x%llx",
1271 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1272 r = -EILSEQ;
1273 }
1274 if (likely(checksums != checksums_onstack))
1275 kfree(checksums);
1276 goto error;
1277 }
1278
1279 if (!sectors_to_process)
1280 break;
1281
1282 if (unlikely(pos < bv.bv_len)) {
1283 bv.bv_offset += pos;
1284 bv.bv_len -= pos;
1285 goto again;
1286 }
1287 }
1288
1289 if (likely(checksums != checksums_onstack))
1290 kfree(checksums);
1291 } else {
1292 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1293
1294 if (bip) {
1295 struct bio_vec biv;
1296 struct bvec_iter iter;
9d609f85
MP
1297 unsigned data_to_process = dio->range.n_sectors;
1298 sector_to_block(ic, data_to_process);
1299 data_to_process *= ic->tag_size;
7eada909
MP
1300
1301 bip_for_each_vec(biv, bip, iter) {
1302 unsigned char *tag;
1303 unsigned this_len;
1304
1305 BUG_ON(PageHighMem(biv.bv_page));
1306 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1307 this_len = min(biv.bv_len, data_to_process);
1308 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1309 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1310 if (unlikely(r))
1311 goto error;
1312 data_to_process -= this_len;
1313 if (!data_to_process)
1314 break;
1315 }
1316 }
1317 }
c2bcb2b7 1318skip_io:
7eada909
MP
1319 dec_in_flight(dio);
1320 return;
1321error:
1322 dio->bi_error = r;
1323 dec_in_flight(dio);
1324}
1325
1326static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1327{
1328 struct dm_integrity_c *ic = ti->private;
1329 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
9d609f85 1330 struct bio_integrity_payload *bip;
7eada909
MP
1331
1332 sector_t area, offset;
1333
1334 dio->ic = ic;
1335 dio->bi_error = 0;
1336
1337 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1338 submit_flush_bio(ic, dio);
1339 return DM_MAPIO_SUBMITTED;
1340 }
1341
1342 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1343 dio->write = bio_op(bio) == REQ_OP_WRITE;
1344 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1345 if (unlikely(dio->fua)) {
1346 /*
1347 * Don't pass down the FUA flag because we have to flush
1348 * disk cache anyway.
1349 */
1350 bio->bi_opf &= ~REQ_FUA;
1351 }
1352 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1353 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1354 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1355 (unsigned long long)ic->provided_data_sectors);
1356 return -EIO;
1357 }
9d609f85
MP
1358 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1359 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1360 ic->sectors_per_block,
1361 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
1362 return -EIO;
1363 }
1364
1365 if (ic->sectors_per_block > 1) {
1366 struct bvec_iter iter;
1367 struct bio_vec bv;
1368 bio_for_each_segment(bv, bio, iter) {
1369 if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1370 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1371 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1372 return -EIO;
1373 }
1374 }
1375 }
1376
1377 bip = bio_integrity(bio);
1378 if (!ic->internal_hash) {
1379 if (bip) {
1380 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1381 if (ic->log2_tag_size >= 0)
1382 wanted_tag_size <<= ic->log2_tag_size;
1383 else
1384 wanted_tag_size *= ic->tag_size;
1385 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1386 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
1387 return -EIO;
1388 }
1389 }
1390 } else {
1391 if (unlikely(bip != NULL)) {
1392 DMERR("Unexpected integrity data when using internal hash");
1393 return -EIO;
1394 }
1395 }
7eada909 1396
c2bcb2b7
MP
1397 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1398 return -EIO;
1399
7eada909
MP
1400 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1401 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1402 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1403
1404 dm_integrity_map_continue(dio, true);
1405 return DM_MAPIO_SUBMITTED;
1406}
1407
1408static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1409 unsigned journal_section, unsigned journal_entry)
1410{
1411 struct dm_integrity_c *ic = dio->ic;
1412 sector_t logical_sector;
1413 unsigned n_sectors;
1414
1415 logical_sector = dio->range.logical_sector;
1416 n_sectors = dio->range.n_sectors;
1417 do {
1418 struct bio_vec bv = bio_iovec(bio);
1419 char *mem;
1420
1421 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1422 bv.bv_len = n_sectors << SECTOR_SHIFT;
1423 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1424 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1425retry_kmap:
1426 mem = kmap_atomic(bv.bv_page);
1427 if (likely(dio->write))
1428 flush_dcache_page(bv.bv_page);
1429
1430 do {
1431 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1432
1433 if (unlikely(!dio->write)) {
1434 struct journal_sector *js;
9d609f85
MP
1435 char *mem_ptr;
1436 unsigned s;
7eada909
MP
1437
1438 if (unlikely(journal_entry_is_inprogress(je))) {
1439 flush_dcache_page(bv.bv_page);
1440 kunmap_atomic(mem);
1441
1442 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1443 goto retry_kmap;
1444 }
1445 smp_rmb();
1446 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1447 js = access_journal_data(ic, journal_section, journal_entry);
9d609f85
MP
1448 mem_ptr = mem + bv.bv_offset;
1449 s = 0;
1450 do {
1451 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1452 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1453 js++;
1454 mem_ptr += 1 << SECTOR_SHIFT;
1455 } while (++s < ic->sectors_per_block);
7eada909
MP
1456#ifdef INTERNAL_VERIFY
1457 if (ic->internal_hash) {
1458 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1459
1460 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
9d609f85 1461 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
7eada909
MP
1462 DMERR("Checksum failed when reading from journal, at sector 0x%llx",
1463 (unsigned long long)logical_sector);
1464 }
1465 }
1466#endif
1467 }
1468
1469 if (!ic->internal_hash) {
1470 struct bio_integrity_payload *bip = bio_integrity(bio);
1471 unsigned tag_todo = ic->tag_size;
9d609f85 1472 char *tag_ptr = journal_entry_tag(ic, je);
7eada909
MP
1473
1474 if (bip) do {
1475 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1476 unsigned tag_now = min(biv.bv_len, tag_todo);
1477 char *tag_addr;
1478 BUG_ON(PageHighMem(biv.bv_page));
1479 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1480 if (likely(dio->write))
1481 memcpy(tag_ptr, tag_addr, tag_now);
1482 else
1483 memcpy(tag_addr, tag_ptr, tag_now);
1484 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1485 tag_ptr += tag_now;
1486 tag_todo -= tag_now;
1487 } while (unlikely(tag_todo)); else {
1488 if (likely(dio->write))
1489 memset(tag_ptr, 0, tag_todo);
1490 }
1491 }
1492
1493 if (likely(dio->write)) {
1494 struct journal_sector *js;
9d609f85 1495 unsigned s;
7eada909
MP
1496
1497 js = access_journal_data(ic, journal_section, journal_entry);
9d609f85
MP
1498 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1499
1500 s = 0;
1501 do {
1502 je->last_bytes[s] = js[s].commit_id;
1503 } while (++s < ic->sectors_per_block);
7eada909
MP
1504
1505 if (ic->internal_hash) {
1506 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1507 if (unlikely(digest_size > ic->tag_size)) {
1508 char checksums_onstack[digest_size];
1509 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
9d609f85 1510 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
7eada909 1511 } else
9d609f85 1512 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
7eada909
MP
1513 }
1514
1515 journal_entry_set_sector(je, logical_sector);
1516 }
9d609f85 1517 logical_sector += ic->sectors_per_block;
7eada909
MP
1518
1519 journal_entry++;
1520 if (unlikely(journal_entry == ic->journal_section_entries)) {
1521 journal_entry = 0;
1522 journal_section++;
1523 wraparound_section(ic, &journal_section);
1524 }
1525
9d609f85
MP
1526 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1527 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
1528
1529 if (unlikely(!dio->write))
1530 flush_dcache_page(bv.bv_page);
1531 kunmap_atomic(mem);
1532 } while (n_sectors);
1533
1534 if (likely(dio->write)) {
1535 smp_mb();
1536 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1537 wake_up(&ic->copy_to_journal_wait);
1538 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1539 queue_work(ic->commit_wq, &ic->commit_work);
1540 } else {
1541 schedule_autocommit(ic);
1542 }
1543 } else {
1544 remove_range(ic, &dio->range);
1545 }
1546
1547 if (unlikely(bio->bi_iter.bi_size)) {
1548 sector_t area, offset;
1549
1550 dio->range.logical_sector = logical_sector;
1551 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1552 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1553 return true;
1554 }
1555
1556 return false;
1557}
1558
1559static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1560{
1561 struct dm_integrity_c *ic = dio->ic;
1562 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1563 unsigned journal_section, journal_entry;
1564 unsigned journal_read_pos;
1565 struct completion read_comp;
1566 bool need_sync_io = ic->internal_hash && !dio->write;
1567
1568 if (need_sync_io && from_map) {
1569 INIT_WORK(&dio->work, integrity_bio_wait);
1570 queue_work(ic->metadata_wq, &dio->work);
1571 return;
1572 }
1573
1574lock_retry:
1575 spin_lock_irq(&ic->endio_wait.lock);
1576retry:
1577 if (unlikely(dm_integrity_failed(ic))) {
1578 spin_unlock_irq(&ic->endio_wait.lock);
1579 do_endio(ic, bio);
1580 return;
1581 }
1582 dio->range.n_sectors = bio_sectors(bio);
1583 journal_read_pos = NOT_FOUND;
1584 if (likely(ic->mode == 'J')) {
1585 if (dio->write) {
1586 unsigned next_entry, i, pos;
1587 unsigned ws, we;
1588
1589 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
1590 if (unlikely(!dio->range.n_sectors))
1591 goto sleep;
1592 ic->free_sectors -= dio->range.n_sectors;
1593 journal_section = ic->free_section;
1594 journal_entry = ic->free_section_entry;
1595
1596 next_entry = ic->free_section_entry + dio->range.n_sectors;
1597 ic->free_section_entry = next_entry % ic->journal_section_entries;
1598 ic->free_section += next_entry / ic->journal_section_entries;
1599 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1600 wraparound_section(ic, &ic->free_section);
1601
1602 pos = journal_section * ic->journal_section_entries + journal_entry;
1603 ws = journal_section;
1604 we = journal_entry;
9d609f85
MP
1605 i = 0;
1606 do {
7eada909
MP
1607 struct journal_entry *je;
1608
1609 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1610 pos++;
1611 if (unlikely(pos >= ic->journal_entries))
1612 pos = 0;
1613
1614 je = access_journal_entry(ic, ws, we);
1615 BUG_ON(!journal_entry_is_unused(je));
1616 journal_entry_set_inprogress(je);
1617 we++;
1618 if (unlikely(we == ic->journal_section_entries)) {
1619 we = 0;
1620 ws++;
1621 wraparound_section(ic, &ws);
1622 }
9d609f85 1623 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
7eada909
MP
1624
1625 spin_unlock_irq(&ic->endio_wait.lock);
1626 goto journal_read_write;
1627 } else {
1628 sector_t next_sector;
1629 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1630 if (likely(journal_read_pos == NOT_FOUND)) {
1631 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1632 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1633 } else {
1634 unsigned i;
9d609f85
MP
1635 unsigned jp = journal_read_pos + 1;
1636 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1637 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
7eada909
MP
1638 break;
1639 }
1640 dio->range.n_sectors = i;
1641 }
1642 }
1643 }
1644 if (unlikely(!add_new_range(ic, &dio->range))) {
1645 /*
1646 * We must not sleep in the request routine because it could
1647 * stall bios on current->bio_list.
1648 * So, we offload the bio to a workqueue if we have to sleep.
1649 */
1650sleep:
1651 if (from_map) {
1652 spin_unlock_irq(&ic->endio_wait.lock);
1653 INIT_WORK(&dio->work, integrity_bio_wait);
1654 queue_work(ic->wait_wq, &dio->work);
1655 return;
1656 } else {
1657 sleep_on_endio_wait(ic);
1658 goto retry;
1659 }
1660 }
1661 spin_unlock_irq(&ic->endio_wait.lock);
1662
1663 if (unlikely(journal_read_pos != NOT_FOUND)) {
1664 journal_section = journal_read_pos / ic->journal_section_entries;
1665 journal_entry = journal_read_pos % ic->journal_section_entries;
1666 goto journal_read_write;
1667 }
1668
1669 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1670
1671 if (need_sync_io) {
1672 read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp);
1673 dio->completion = &read_comp;
1674 } else
1675 dio->completion = NULL;
1676
1677 dio->orig_bi_iter = bio->bi_iter;
1678
1679 dio->orig_bi_bdev = bio->bi_bdev;
1680 bio->bi_bdev = ic->dev->bdev;
1681
1682 dio->orig_bi_integrity = bio_integrity(bio);
1683 bio->bi_integrity = NULL;
1684 bio->bi_opf &= ~REQ_INTEGRITY;
1685
1686 dio->orig_bi_end_io = bio->bi_end_io;
1687 bio->bi_end_io = integrity_end_io;
1688
1689 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1690 bio->bi_iter.bi_sector += ic->start;
1691 generic_make_request(bio);
1692
1693 if (need_sync_io) {
1694 wait_for_completion_io(&read_comp);
1695 integrity_metadata(&dio->work);
1696 } else {
1697 INIT_WORK(&dio->work, integrity_metadata);
1698 queue_work(ic->metadata_wq, &dio->work);
1699 }
1700
1701 return;
1702
1703journal_read_write:
1704 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1705 goto lock_retry;
1706
1707 do_endio_flush(ic, dio);
1708}
1709
1710
1711static void integrity_bio_wait(struct work_struct *w)
1712{
1713 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1714
1715 dm_integrity_map_continue(dio, false);
1716}
1717
1718static void pad_uncommitted(struct dm_integrity_c *ic)
1719{
1720 if (ic->free_section_entry) {
1721 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1722 ic->free_section_entry = 0;
1723 ic->free_section++;
1724 wraparound_section(ic, &ic->free_section);
1725 ic->n_uncommitted_sections++;
1726 }
1727}
1728
1729static void integrity_commit(struct work_struct *w)
1730{
1731 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1732 unsigned commit_start, commit_sections;
1733 unsigned i, j, n;
1734 struct bio *flushes;
1735
1736 del_timer(&ic->autocommit_timer);
1737
1738 spin_lock_irq(&ic->endio_wait.lock);
1739 flushes = bio_list_get(&ic->flush_bio_list);
1740 if (unlikely(ic->mode != 'J')) {
1741 spin_unlock_irq(&ic->endio_wait.lock);
1742 dm_integrity_flush_buffers(ic);
1743 goto release_flush_bios;
1744 }
1745
1746 pad_uncommitted(ic);
1747 commit_start = ic->uncommitted_section;
1748 commit_sections = ic->n_uncommitted_sections;
1749 spin_unlock_irq(&ic->endio_wait.lock);
1750
1751 if (!commit_sections)
1752 goto release_flush_bios;
1753
1754 i = commit_start;
1755 for (n = 0; n < commit_sections; n++) {
1756 for (j = 0; j < ic->journal_section_entries; j++) {
1757 struct journal_entry *je;
1758 je = access_journal_entry(ic, i, j);
1759 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1760 }
1761 for (j = 0; j < ic->journal_section_sectors; j++) {
1762 struct journal_sector *js;
1763 js = access_journal(ic, i, j);
1764 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1765 }
1766 i++;
1767 if (unlikely(i >= ic->journal_sections))
1768 ic->commit_seq = next_commit_seq(ic->commit_seq);
1769 wraparound_section(ic, &i);
1770 }
1771 smp_rmb();
1772
1773 write_journal(ic, commit_start, commit_sections);
1774
1775 spin_lock_irq(&ic->endio_wait.lock);
1776 ic->uncommitted_section += commit_sections;
1777 wraparound_section(ic, &ic->uncommitted_section);
1778 ic->n_uncommitted_sections -= commit_sections;
1779 ic->n_committed_sections += commit_sections;
1780 spin_unlock_irq(&ic->endio_wait.lock);
1781
1782 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
1783 queue_work(ic->writer_wq, &ic->writer_work);
1784
1785release_flush_bios:
1786 while (flushes) {
1787 struct bio *next = flushes->bi_next;
1788 flushes->bi_next = NULL;
1789 do_endio(ic, flushes);
1790 flushes = next;
1791 }
1792}
1793
1794static void complete_copy_from_journal(unsigned long error, void *context)
1795{
1796 struct journal_io *io = context;
1797 struct journal_completion *comp = io->comp;
1798 struct dm_integrity_c *ic = comp->ic;
1799 remove_range(ic, &io->range);
1800 mempool_free(io, ic->journal_io_mempool);
1801 if (unlikely(error != 0))
1802 dm_integrity_io_error(ic, "copying from journal", -EIO);
1803 complete_journal_op(comp);
1804}
1805
9d609f85
MP
1806static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
1807 struct journal_entry *je)
1808{
1809 unsigned s = 0;
1810 do {
1811 js->commit_id = je->last_bytes[s];
1812 js++;
1813 } while (++s < ic->sectors_per_block);
1814}
1815
7eada909
MP
1816static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1817 unsigned write_sections, bool from_replay)
1818{
1819 unsigned i, j, n;
1820 struct journal_completion comp;
1821
1822 comp.ic = ic;
1823 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1824 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
1825
1826 i = write_start;
1827 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1828#ifndef INTERNAL_VERIFY
1829 if (unlikely(from_replay))
1830#endif
1831 rw_section_mac(ic, i, false);
1832 for (j = 0; j < ic->journal_section_entries; j++) {
1833 struct journal_entry *je = access_journal_entry(ic, i, j);
1834 sector_t sec, area, offset;
1835 unsigned k, l, next_loop;
1836 sector_t metadata_block;
1837 unsigned metadata_offset;
1838 struct journal_io *io;
1839
1840 if (journal_entry_is_unused(je))
1841 continue;
1842 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1843 sec = journal_entry_get_sector(je);
9d609f85
MP
1844 if (unlikely(from_replay)) {
1845 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
1846 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
1847 sec &= ~(sector_t)(ic->sectors_per_block - 1);
1848 }
1849 }
7eada909 1850 get_area_and_offset(ic, sec, &area, &offset);
9d609f85 1851 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
7eada909
MP
1852 for (k = j + 1; k < ic->journal_section_entries; k++) {
1853 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1854 sector_t sec2, area2, offset2;
1855 if (journal_entry_is_unused(je2))
1856 break;
1857 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1858 sec2 = journal_entry_get_sector(je2);
1859 get_area_and_offset(ic, sec2, &area2, &offset2);
9d609f85 1860 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
7eada909 1861 break;
9d609f85 1862 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
7eada909
MP
1863 }
1864 next_loop = k - 1;
1865
1866 io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO);
1867 io->comp = &comp;
1868 io->range.logical_sector = sec;
9d609f85 1869 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
7eada909
MP
1870
1871 spin_lock_irq(&ic->endio_wait.lock);
1872 while (unlikely(!add_new_range(ic, &io->range)))
1873 sleep_on_endio_wait(ic);
1874
1875 if (likely(!from_replay)) {
1876 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
1877
1878 /* don't write if there is newer committed sector */
1879 while (j < k && find_newer_committed_node(ic, &section_node[j])) {
1880 struct journal_entry *je2 = access_journal_entry(ic, i, j);
1881
1882 journal_entry_set_unused(je2);
1883 remove_journal_node(ic, &section_node[j]);
1884 j++;
9d609f85
MP
1885 sec += ic->sectors_per_block;
1886 offset += ic->sectors_per_block;
7eada909
MP
1887 }
1888 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
1889 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
1890
1891 journal_entry_set_unused(je2);
1892 remove_journal_node(ic, &section_node[k - 1]);
1893 k--;
1894 }
1895 if (j == k) {
1896 remove_range_unlocked(ic, &io->range);
1897 spin_unlock_irq(&ic->endio_wait.lock);
1898 mempool_free(io, ic->journal_io_mempool);
1899 goto skip_io;
1900 }
1901 for (l = j; l < k; l++) {
1902 remove_journal_node(ic, &section_node[l]);
1903 }
1904 }
1905 spin_unlock_irq(&ic->endio_wait.lock);
1906
1907 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
1908 for (l = j; l < k; l++) {
1909 int r;
1910 struct journal_entry *je2 = access_journal_entry(ic, i, l);
1911
1912 if (
1913#ifndef INTERNAL_VERIFY
1914 unlikely(from_replay) &&
1915#endif
1916 ic->internal_hash) {
56b67a4f 1917 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
7eada909 1918
9d609f85 1919 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
7eada909 1920 (char *)access_journal_data(ic, i, l), test_tag);
9d609f85 1921 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
7eada909
MP
1922 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
1923 }
1924
1925 journal_entry_set_unused(je2);
9d609f85 1926 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
7eada909
MP
1927 ic->tag_size, TAG_WRITE);
1928 if (unlikely(r)) {
1929 dm_integrity_io_error(ic, "reading tags", r);
1930 }
1931 }
1932
1933 atomic_inc(&comp.in_flight);
9d609f85
MP
1934 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
1935 (k - j) << ic->sb->log2_sectors_per_block,
1936 get_data_sector(ic, area, offset),
7eada909
MP
1937 complete_copy_from_journal, io);
1938skip_io:
1939 j = next_loop;
1940 }
1941 }
1942
1943 dm_bufio_write_dirty_buffers_async(ic->bufio);
1944
1945 complete_journal_op(&comp);
1946 wait_for_completion_io(&comp.comp);
1947
1948 dm_integrity_flush_buffers(ic);
1949}
1950
1951static void integrity_writer(struct work_struct *w)
1952{
1953 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
1954 unsigned write_start, write_sections;
1955
1956 unsigned prev_free_sectors;
1957
1958 /* the following test is not needed, but it tests the replay code */
1959 if (ACCESS_ONCE(ic->suspending))
1960 return;
1961
1962 spin_lock_irq(&ic->endio_wait.lock);
1963 write_start = ic->committed_section;
1964 write_sections = ic->n_committed_sections;
1965 spin_unlock_irq(&ic->endio_wait.lock);
1966
1967 if (!write_sections)
1968 return;
1969
1970 do_journal_write(ic, write_start, write_sections, false);
1971
1972 spin_lock_irq(&ic->endio_wait.lock);
1973
1974 ic->committed_section += write_sections;
1975 wraparound_section(ic, &ic->committed_section);
1976 ic->n_committed_sections -= write_sections;
1977
1978 prev_free_sectors = ic->free_sectors;
1979 ic->free_sectors += write_sections * ic->journal_section_entries;
1980 if (unlikely(!prev_free_sectors))
1981 wake_up_locked(&ic->endio_wait);
1982
1983 spin_unlock_irq(&ic->endio_wait.lock);
1984}
1985
1986static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
1987 unsigned n_sections, unsigned char commit_seq)
1988{
1989 unsigned i, j, n;
1990
1991 if (!n_sections)
1992 return;
1993
1994 for (n = 0; n < n_sections; n++) {
1995 i = start_section + n;
1996 wraparound_section(ic, &i);
1997 for (j = 0; j < ic->journal_section_sectors; j++) {
1998 struct journal_sector *js = access_journal(ic, i, j);
1999 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2000 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2001 }
2002 for (j = 0; j < ic->journal_section_entries; j++) {
2003 struct journal_entry *je = access_journal_entry(ic, i, j);
2004 journal_entry_set_unused(je);
2005 }
2006 }
2007
2008 write_journal(ic, start_section, n_sections);
2009}
2010
2011static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2012{
2013 unsigned char k;
2014 for (k = 0; k < N_COMMIT_IDS; k++) {
2015 if (dm_integrity_commit_id(ic, i, j, k) == id)
2016 return k;
2017 }
2018 dm_integrity_io_error(ic, "journal commit id", -EIO);
2019 return -EIO;
2020}
2021
2022static void replay_journal(struct dm_integrity_c *ic)
2023{
2024 unsigned i, j;
2025 bool used_commit_ids[N_COMMIT_IDS];
2026 unsigned max_commit_id_sections[N_COMMIT_IDS];
2027 unsigned write_start, write_sections;
2028 unsigned continue_section;
2029 bool journal_empty;
2030 unsigned char unused, last_used, want_commit_seq;
2031
c2bcb2b7
MP
2032 if (ic->mode == 'R')
2033 return;
2034
7eada909
MP
2035 if (ic->journal_uptodate)
2036 return;
2037
2038 last_used = 0;
2039 write_start = 0;
2040
2041 if (!ic->just_formatted) {
2042 DEBUG_print("reading journal\n");
2043 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2044 if (ic->journal_io)
2045 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2046 if (ic->journal_io) {
2047 struct journal_completion crypt_comp;
2048 crypt_comp.ic = ic;
2049 crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp);
2050 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2051 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2052 wait_for_completion(&crypt_comp.comp);
2053 }
2054 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2055 }
2056
2057 if (dm_integrity_failed(ic))
2058 goto clear_journal;
2059
2060 journal_empty = true;
2061 memset(used_commit_ids, 0, sizeof used_commit_ids);
2062 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2063 for (i = 0; i < ic->journal_sections; i++) {
2064 for (j = 0; j < ic->journal_section_sectors; j++) {
2065 int k;
2066 struct journal_sector *js = access_journal(ic, i, j);
2067 k = find_commit_seq(ic, i, j, js->commit_id);
2068 if (k < 0)
2069 goto clear_journal;
2070 used_commit_ids[k] = true;
2071 max_commit_id_sections[k] = i;
2072 }
2073 if (journal_empty) {
2074 for (j = 0; j < ic->journal_section_entries; j++) {
2075 struct journal_entry *je = access_journal_entry(ic, i, j);
2076 if (!journal_entry_is_unused(je)) {
2077 journal_empty = false;
2078 break;
2079 }
2080 }
2081 }
2082 }
2083
2084 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2085 unused = N_COMMIT_IDS - 1;
2086 while (unused && !used_commit_ids[unused - 1])
2087 unused--;
2088 } else {
2089 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2090 if (!used_commit_ids[unused])
2091 break;
2092 if (unused == N_COMMIT_IDS) {
2093 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2094 goto clear_journal;
2095 }
2096 }
2097 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2098 unused, used_commit_ids[0], used_commit_ids[1],
2099 used_commit_ids[2], used_commit_ids[3]);
2100
2101 last_used = prev_commit_seq(unused);
2102 want_commit_seq = prev_commit_seq(last_used);
2103
2104 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2105 journal_empty = true;
2106
2107 write_start = max_commit_id_sections[last_used] + 1;
2108 if (unlikely(write_start >= ic->journal_sections))
2109 want_commit_seq = next_commit_seq(want_commit_seq);
2110 wraparound_section(ic, &write_start);
2111
2112 i = write_start;
2113 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2114 for (j = 0; j < ic->journal_section_sectors; j++) {
2115 struct journal_sector *js = access_journal(ic, i, j);
2116
2117 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2118 /*
2119 * This could be caused by crash during writing.
2120 * We won't replay the inconsistent part of the
2121 * journal.
2122 */
2123 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2124 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2125 goto brk;
2126 }
2127 }
2128 i++;
2129 if (unlikely(i >= ic->journal_sections))
2130 want_commit_seq = next_commit_seq(want_commit_seq);
2131 wraparound_section(ic, &i);
2132 }
2133brk:
2134
2135 if (!journal_empty) {
2136 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2137 write_sections, write_start, want_commit_seq);
2138 do_journal_write(ic, write_start, write_sections, true);
2139 }
2140
2141 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2142 continue_section = write_start;
2143 ic->commit_seq = want_commit_seq;
2144 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2145 } else {
2146 unsigned s;
2147 unsigned char erase_seq;
2148clear_journal:
2149 DEBUG_print("clearing journal\n");
2150
2151 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2152 s = write_start;
2153 init_journal(ic, s, 1, erase_seq);
2154 s++;
2155 wraparound_section(ic, &s);
2156 if (ic->journal_sections >= 2) {
2157 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2158 s += ic->journal_sections - 2;
2159 wraparound_section(ic, &s);
2160 init_journal(ic, s, 1, erase_seq);
2161 }
2162
2163 continue_section = 0;
2164 ic->commit_seq = next_commit_seq(erase_seq);
2165 }
2166
2167 ic->committed_section = continue_section;
2168 ic->n_committed_sections = 0;
2169
2170 ic->uncommitted_section = continue_section;
2171 ic->n_uncommitted_sections = 0;
2172
2173 ic->free_section = continue_section;
2174 ic->free_section_entry = 0;
2175 ic->free_sectors = ic->journal_entries;
2176
2177 ic->journal_tree_root = RB_ROOT;
2178 for (i = 0; i < ic->journal_entries; i++)
2179 init_journal_node(&ic->journal_tree[i]);
2180}
2181
2182static void dm_integrity_postsuspend(struct dm_target *ti)
2183{
2184 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2185
2186 del_timer_sync(&ic->autocommit_timer);
2187
2188 ic->suspending = true;
2189
2190 queue_work(ic->commit_wq, &ic->commit_work);
2191 drain_workqueue(ic->commit_wq);
2192
2193 if (ic->mode == 'J') {
2194 drain_workqueue(ic->writer_wq);
2195 dm_integrity_flush_buffers(ic);
2196 }
2197
2198 ic->suspending = false;
2199
2200 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2201
2202 ic->journal_uptodate = true;
2203}
2204
2205static void dm_integrity_resume(struct dm_target *ti)
2206{
2207 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2208
2209 replay_journal(ic);
2210}
2211
2212static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2213 unsigned status_flags, char *result, unsigned maxlen)
2214{
2215 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2216 unsigned arg_count;
2217 size_t sz = 0;
2218
2219 switch (type) {
2220 case STATUSTYPE_INFO:
2221 result[0] = '\0';
2222 break;
2223
2224 case STATUSTYPE_TABLE: {
2225 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2226 watermark_percentage += ic->journal_entries / 2;
2227 do_div(watermark_percentage, ic->journal_entries);
2228 arg_count = 5;
9d609f85 2229 arg_count += ic->sectors_per_block != 1;
7eada909
MP
2230 arg_count += !!ic->internal_hash_alg.alg_string;
2231 arg_count += !!ic->journal_crypt_alg.alg_string;
2232 arg_count += !!ic->journal_mac_alg.alg_string;
2233 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2234 ic->tag_size, ic->mode, arg_count);
56b67a4f
MP
2235 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2236 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2237 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2238 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2239 DMEMIT(" commit_time:%u", ic->autocommit_msec);
9d609f85
MP
2240 if (ic->sectors_per_block != 1)
2241 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
2242
2243#define EMIT_ALG(a, n) \
2244 do { \
2245 if (ic->a.alg_string) { \
2246 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2247 if (ic->a.key_string) \
2248 DMEMIT(":%s", ic->a.key_string);\
2249 } \
2250 } while (0)
56b67a4f
MP
2251 EMIT_ALG(internal_hash_alg, "internal_hash");
2252 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2253 EMIT_ALG(journal_mac_alg, "journal_mac");
7eada909
MP
2254 break;
2255 }
2256 }
2257}
2258
2259static int dm_integrity_iterate_devices(struct dm_target *ti,
2260 iterate_devices_callout_fn fn, void *data)
2261{
2262 struct dm_integrity_c *ic = ti->private;
2263
2264 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2265}
2266
9d609f85
MP
2267static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2268{
2269 struct dm_integrity_c *ic = ti->private;
2270
2271 if (ic->sectors_per_block > 1) {
2272 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2273 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2274 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2275 }
2276}
2277
7eada909
MP
2278static void calculate_journal_section_size(struct dm_integrity_c *ic)
2279{
2280 unsigned sector_space = JOURNAL_SECTOR_DATA;
2281
2282 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
9d609f85 2283 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
7eada909
MP
2284 JOURNAL_ENTRY_ROUNDUP);
2285
2286 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2287 sector_space -= JOURNAL_MAC_PER_SECTOR;
2288 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2289 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
9d609f85 2290 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
7eada909
MP
2291 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2292}
2293
2294static int calculate_device_limits(struct dm_integrity_c *ic)
2295{
2296 __u64 initial_sectors;
2297 sector_t last_sector, last_area, last_offset;
2298
2299 calculate_journal_section_size(ic);
2300 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
2301 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX)
2302 return -EINVAL;
2303 ic->initial_sectors = initial_sectors;
2304
9d609f85 2305 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
7eada909
MP
2306 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2307 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2308 ic->log2_metadata_run = __ffs(ic->metadata_run);
2309 else
2310 ic->log2_metadata_run = -1;
2311
2312 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2313 last_sector = get_data_sector(ic, last_area, last_offset);
2314
2315 if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors)
2316 return -EINVAL;
2317
2318 return 0;
2319}
2320
2321static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2322{
2323 unsigned journal_sections;
2324 int test_bit;
2325
56b67a4f 2326 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
7eada909
MP
2327 memcpy(ic->sb->magic, SB_MAGIC, 8);
2328 ic->sb->version = SB_VERSION;
2329 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
9d609f85 2330 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
7eada909
MP
2331 if (ic->journal_mac_alg.alg_string)
2332 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2333
2334 calculate_journal_section_size(ic);
2335 journal_sections = journal_sectors / ic->journal_section_sectors;
2336 if (!journal_sections)
2337 journal_sections = 1;
2338 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2339
56b67a4f
MP
2340 if (!interleave_sectors)
2341 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
7eada909 2342 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
56b67a4f
MP
2343 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2344 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
7eada909
MP
2345
2346 ic->provided_data_sectors = 0;
2347 for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) {
2348 __u64 prev_data_sectors = ic->provided_data_sectors;
2349
2350 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2351 if (calculate_device_limits(ic))
2352 ic->provided_data_sectors = prev_data_sectors;
2353 }
2354
56b67a4f 2355 if (!ic->provided_data_sectors)
7eada909
MP
2356 return -EINVAL;
2357
2358 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2359
2360 return 0;
2361}
2362
2363static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2364{
2365 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2366 struct blk_integrity bi;
2367
2368 memset(&bi, 0, sizeof(bi));
2369 bi.profile = &dm_integrity_profile;
9d609f85
MP
2370 bi.tuple_size = ic->tag_size;
2371 bi.tag_size = bi.tuple_size;
2372 bi.interval_exp = ilog2(ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
2373
2374 blk_integrity_register(disk, &bi);
2375 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2376}
2377
2378/* FIXME: use new kvmalloc */
2379static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
2380{
2381 void *ptr = NULL;
2382
2383 if (size <= PAGE_SIZE)
2384 ptr = kmalloc(size, GFP_KERNEL | gfp);
2385 if (!ptr && size <= KMALLOC_MAX_SIZE)
2386 ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
2387 if (!ptr)
2388 ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
2389
2390 return ptr;
2391}
2392
2393static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2394{
2395 unsigned i;
2396
2397 if (!pl)
2398 return;
2399 for (i = 0; i < ic->journal_pages; i++)
2400 if (pl[i].page)
2401 __free_page(pl[i].page);
2402 kvfree(pl);
2403}
2404
2405static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2406{
2407 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
2408 struct page_list *pl;
2409 unsigned i;
2410
2411 pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
2412 if (!pl)
2413 return NULL;
2414
2415 for (i = 0; i < ic->journal_pages; i++) {
2416 pl[i].page = alloc_page(GFP_KERNEL);
2417 if (!pl[i].page) {
2418 dm_integrity_free_page_list(ic, pl);
2419 return NULL;
2420 }
2421 if (i)
2422 pl[i - 1].next = &pl[i];
2423 }
2424
2425 return pl;
2426}
2427
2428static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2429{
2430 unsigned i;
2431 for (i = 0; i < ic->journal_sections; i++)
2432 kvfree(sl[i]);
2433 kfree(sl);
2434}
2435
2436static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2437{
2438 struct scatterlist **sl;
2439 unsigned i;
2440
2441 sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
2442 if (!sl)
2443 return NULL;
2444
2445 for (i = 0; i < ic->journal_sections; i++) {
2446 struct scatterlist *s;
2447 unsigned start_index, start_offset;
2448 unsigned end_index, end_offset;
2449 unsigned n_pages;
2450 unsigned idx;
2451
2452 page_list_location(ic, i, 0, &start_index, &start_offset);
2453 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2454
2455 n_pages = (end_index - start_index + 1);
2456
2457 s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
2458 if (!s) {
2459 dm_integrity_free_journal_scatterlist(ic, sl);
2460 return NULL;
2461 }
2462
2463 sg_init_table(s, n_pages);
2464 for (idx = start_index; idx <= end_index; idx++) {
2465 char *va = lowmem_page_address(pl[idx].page);
2466 unsigned start = 0, end = PAGE_SIZE;
2467 if (idx == start_index)
2468 start = start_offset;
2469 if (idx == end_index)
2470 end = end_offset + (1 << SECTOR_SHIFT);
2471 sg_set_buf(&s[idx - start_index], va + start, end - start);
2472 }
2473
2474 sl[i] = s;
2475 }
2476
2477 return sl;
2478}
2479
2480static void free_alg(struct alg_spec *a)
2481{
2482 kzfree(a->alg_string);
2483 kzfree(a->key);
2484 memset(a, 0, sizeof *a);
2485}
2486
2487static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2488{
2489 char *k;
2490
2491 free_alg(a);
2492
2493 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2494 if (!a->alg_string)
2495 goto nomem;
2496
2497 k = strchr(a->alg_string, ':');
2498 if (k) {
2499 unsigned i;
2500
2501 *k = 0;
2502 a->key_string = k + 1;
2503 if (strlen(a->key_string) & 1)
2504 goto inval;
2505
2506 a->key_size = strlen(a->key_string) / 2;
2507 a->key = kmalloc(a->key_size, GFP_KERNEL);
2508 if (!a->key)
2509 goto nomem;
2510 for (i = 0; i < a->key_size; i++) {
2511 char digit[3];
2512 digit[0] = a->key_string[i * 2];
2513 digit[1] = a->key_string[i * 2 + 1];
2514 digit[2] = 0;
2515 if (strspn(digit, "0123456789abcdefABCDEF") != 2)
2516 goto inval;
2517 if (kstrtou8(digit, 16, &a->key[i]))
2518 goto inval;
2519 }
2520 }
2521
2522 return 0;
2523inval:
2524 *error = error_inval;
2525 return -EINVAL;
2526nomem:
2527 *error = "Out of memory for an argument";
2528 return -ENOMEM;
2529}
2530
2531static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2532 char *error_alg, char *error_key)
2533{
2534 int r;
2535
2536 if (a->alg_string) {
2537 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC);
2538 if (IS_ERR(*hash)) {
2539 *error = error_alg;
2540 r = PTR_ERR(*hash);
2541 *hash = NULL;
2542 return r;
2543 }
2544
2545 if (a->key) {
2546 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2547 if (r) {
2548 *error = error_key;
2549 return r;
2550 }
2551 }
2552 }
2553
2554 return 0;
2555}
2556
1aa0efd4
MS
2557static int create_journal(struct dm_integrity_c *ic, char **error)
2558{
2559 int r = 0;
2560 unsigned i;
2561 __u64 journal_pages, journal_desc_size, journal_tree_size;
56b67a4f
MP
2562 unsigned char *crypt_data = NULL;
2563
2564 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2565 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2566 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2567 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
1aa0efd4
MS
2568
2569 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2570 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2571 journal_desc_size = journal_pages * sizeof(struct page_list);
2572 if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) {
2573 *error = "Journal doesn't fit into memory";
2574 r = -ENOMEM;
2575 goto bad;
2576 }
2577 ic->journal_pages = journal_pages;
2578
2579 ic->journal = dm_integrity_alloc_page_list(ic);
2580 if (!ic->journal) {
2581 *error = "Could not allocate memory for journal";
2582 r = -ENOMEM;
2583 goto bad;
2584 }
2585 if (ic->journal_crypt_alg.alg_string) {
2586 unsigned ivsize, blocksize;
2587 struct journal_completion comp;
2588
2589 comp.ic = ic;
2590 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2591 if (IS_ERR(ic->journal_crypt)) {
2592 *error = "Invalid journal cipher";
2593 r = PTR_ERR(ic->journal_crypt);
2594 ic->journal_crypt = NULL;
2595 goto bad;
2596 }
2597 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2598 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2599
2600 if (ic->journal_crypt_alg.key) {
2601 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2602 ic->journal_crypt_alg.key_size);
2603 if (r) {
2604 *error = "Error setting encryption key";
2605 goto bad;
2606 }
2607 }
2608 DEBUG_print("cipher %s, block size %u iv size %u\n",
2609 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2610
2611 ic->journal_io = dm_integrity_alloc_page_list(ic);
2612 if (!ic->journal_io) {
2613 *error = "Could not allocate memory for journal io";
2614 r = -ENOMEM;
2615 goto bad;
2616 }
2617
2618 if (blocksize == 1) {
2619 struct scatterlist *sg;
2620 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2621 unsigned char iv[ivsize];
2622 skcipher_request_set_tfm(req, ic->journal_crypt);
2623
2624 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2625 if (!ic->journal_xor) {
2626 *error = "Could not allocate memory for journal xor";
2627 r = -ENOMEM;
2628 goto bad;
2629 }
2630
2631 sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
2632 if (!sg) {
2633 *error = "Unable to allocate sg list";
2634 r = -ENOMEM;
2635 goto bad;
2636 }
2637 sg_init_table(sg, ic->journal_pages + 1);
2638 for (i = 0; i < ic->journal_pages; i++) {
2639 char *va = lowmem_page_address(ic->journal_xor[i].page);
2640 clear_page(va);
2641 sg_set_buf(&sg[i], va, PAGE_SIZE);
2642 }
2643 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2644 memset(iv, 0x00, ivsize);
2645
2646 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
2647 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2648 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2649 if (do_crypt(true, req, &comp))
2650 wait_for_completion(&comp.comp);
2651 kvfree(sg);
2652 r = dm_integrity_failed(ic);
2653 if (r) {
2654 *error = "Unable to encrypt journal";
2655 goto bad;
2656 }
2657 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2658
2659 crypto_free_skcipher(ic->journal_crypt);
2660 ic->journal_crypt = NULL;
2661 } else {
2662 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2663 unsigned char iv[ivsize];
2664 unsigned crypt_len = roundup(ivsize, blocksize);
56b67a4f
MP
2665
2666 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2667 if (!crypt_data) {
2668 *error = "Unable to allocate crypt data";
2669 r = -ENOMEM;
2670 goto bad;
2671 }
1aa0efd4
MS
2672
2673 skcipher_request_set_tfm(req, ic->journal_crypt);
2674
2675 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2676 if (!ic->journal_scatterlist) {
2677 *error = "Unable to allocate sg list";
2678 r = -ENOMEM;
2679 goto bad;
2680 }
2681 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2682 if (!ic->journal_io_scatterlist) {
2683 *error = "Unable to allocate sg list";
2684 r = -ENOMEM;
2685 goto bad;
2686 }
2687 ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
2688 if (!ic->sk_requests) {
2689 *error = "Unable to allocate sk requests";
2690 r = -ENOMEM;
2691 goto bad;
2692 }
2693 for (i = 0; i < ic->journal_sections; i++) {
2694 struct scatterlist sg;
2695 struct skcipher_request *section_req;
2696 __u32 section_le = cpu_to_le32(i);
2697
2698 memset(iv, 0x00, ivsize);
2699 memset(crypt_data, 0x00, crypt_len);
2700 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
2701
2702 sg_init_one(&sg, crypt_data, crypt_len);
2703 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
2704 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2705 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2706 if (do_crypt(true, req, &comp))
2707 wait_for_completion(&comp.comp);
2708
2709 r = dm_integrity_failed(ic);
2710 if (r) {
2711 *error = "Unable to generate iv";
2712 goto bad;
2713 }
2714
2715 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2716 if (!section_req) {
2717 *error = "Unable to allocate crypt request";
2718 r = -ENOMEM;
2719 goto bad;
2720 }
2721 section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL);
2722 if (!section_req->iv) {
2723 skcipher_request_free(section_req);
2724 *error = "Unable to allocate iv";
2725 r = -ENOMEM;
2726 goto bad;
2727 }
2728 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
2729 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
2730 ic->sk_requests[i] = section_req;
2731 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
2732 }
2733 }
2734 }
2735
2736 for (i = 0; i < N_COMMIT_IDS; i++) {
2737 unsigned j;
2738retest_commit_id:
2739 for (j = 0; j < i; j++) {
2740 if (ic->commit_ids[j] == ic->commit_ids[i]) {
2741 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
2742 goto retest_commit_id;
2743 }
2744 }
2745 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
2746 }
2747
2748 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
2749 if (journal_tree_size > ULONG_MAX) {
2750 *error = "Journal doesn't fit into memory";
2751 r = -ENOMEM;
2752 goto bad;
2753 }
2754 ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
2755 if (!ic->journal_tree) {
2756 *error = "Could not allocate memory for journal tree";
2757 r = -ENOMEM;
2758 }
2759bad:
56b67a4f 2760 kfree(crypt_data);
1aa0efd4
MS
2761 return r;
2762}
2763
7eada909 2764/*
56b67a4f 2765 * Construct a integrity mapping
7eada909
MP
2766 *
2767 * Arguments:
2768 * device
2769 * offset from the start of the device
2770 * tag size
56b67a4f 2771 * D - direct writes, J - journal writes, R - recovery mode
7eada909
MP
2772 * number of optional arguments
2773 * optional arguments:
56b67a4f
MP
2774 * journal_sectors
2775 * interleave_sectors
2776 * buffer_sectors
2777 * journal_watermark
2778 * commit_time
2779 * internal_hash
2780 * journal_crypt
2781 * journal_mac
9d609f85 2782 * block_size
7eada909
MP
2783 */
2784static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
2785{
2786 struct dm_integrity_c *ic;
2787 char dummy;
2788 int r;
7eada909
MP
2789 unsigned extra_args;
2790 struct dm_arg_set as;
2791 static struct dm_arg _args[] = {
9d609f85 2792 {0, 9, "Invalid number of feature args"},
7eada909
MP
2793 };
2794 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
2795 bool should_write_sb;
7eada909
MP
2796 __u64 threshold;
2797 unsigned long long start;
2798
2799#define DIRECT_ARGUMENTS 4
2800
2801 if (argc <= DIRECT_ARGUMENTS) {
2802 ti->error = "Invalid argument count";
2803 return -EINVAL;
2804 }
2805
2806 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
2807 if (!ic) {
2808 ti->error = "Cannot allocate integrity context";
2809 return -ENOMEM;
2810 }
2811 ti->private = ic;
2812 ti->per_io_data_size = sizeof(struct dm_integrity_io);
2813
7eada909
MP
2814 ic->in_progress = RB_ROOT;
2815 init_waitqueue_head(&ic->endio_wait);
2816 bio_list_init(&ic->flush_bio_list);
2817 init_waitqueue_head(&ic->copy_to_journal_wait);
2818 init_completion(&ic->crypto_backoff);
2819
2820 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
2821 if (r) {
2822 ti->error = "Device lookup failed";
2823 goto bad;
2824 }
2825
2826 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
2827 ti->error = "Invalid starting offset";
2828 r = -EINVAL;
2829 goto bad;
2830 }
2831 ic->start = start;
2832
2833 if (strcmp(argv[2], "-")) {
2834 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
2835 ti->error = "Invalid tag size";
2836 r = -EINVAL;
2837 goto bad;
2838 }
2839 }
2840
c2bcb2b7 2841 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
7eada909
MP
2842 ic->mode = argv[3][0];
2843 else {
56b67a4f 2844 ti->error = "Invalid mode (expecting J, D, R)";
7eada909
MP
2845 r = -EINVAL;
2846 goto bad;
2847 }
2848
2849 ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
2850 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
2851 ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
2852 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2853 buffer_sectors = DEFAULT_BUFFER_SECTORS;
2854 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
2855 sync_msec = DEFAULT_SYNC_MSEC;
9d609f85 2856 ic->sectors_per_block = 1;
7eada909
MP
2857
2858 as.argc = argc - DIRECT_ARGUMENTS;
2859 as.argv = argv + DIRECT_ARGUMENTS;
2860 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
2861 if (r)
2862 goto bad;
2863
2864 while (extra_args--) {
2865 const char *opt_string;
2866 unsigned val;
2867 opt_string = dm_shift_arg(&as);
2868 if (!opt_string) {
2869 r = -EINVAL;
2870 ti->error = "Not enough feature arguments";
2871 goto bad;
2872 }
56b67a4f 2873 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
7eada909 2874 journal_sectors = val;
56b67a4f 2875 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
7eada909 2876 interleave_sectors = val;
56b67a4f 2877 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
7eada909 2878 buffer_sectors = val;
56b67a4f 2879 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
7eada909 2880 journal_watermark = val;
56b67a4f 2881 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
7eada909 2882 sync_msec = val;
9d609f85
MP
2883 else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
2884 if (val < 1 << SECTOR_SHIFT ||
2885 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
2886 (val & (val -1))) {
2887 r = -EINVAL;
2888 ti->error = "Invalid block_size argument";
2889 goto bad;
2890 }
2891 ic->sectors_per_block = val >> SECTOR_SHIFT;
2892 } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
7eada909 2893 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
56b67a4f 2894 "Invalid internal_hash argument");
7eada909
MP
2895 if (r)
2896 goto bad;
56b67a4f 2897 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
7eada909 2898 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
56b67a4f 2899 "Invalid journal_crypt argument");
7eada909
MP
2900 if (r)
2901 goto bad;
56b67a4f 2902 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
7eada909 2903 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
56b67a4f 2904 "Invalid journal_mac argument");
7eada909
MP
2905 if (r)
2906 goto bad;
2907 } else {
2908 r = -EINVAL;
2909 ti->error = "Invalid argument";
2910 goto bad;
2911 }
2912 }
2913
2914 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
2915 "Invalid internal hash", "Error setting internal hash key");
2916 if (r)
2917 goto bad;
2918
2919 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
2920 "Invalid journal mac", "Error setting journal mac key");
2921 if (r)
2922 goto bad;
2923
2924 if (!ic->tag_size) {
2925 if (!ic->internal_hash) {
2926 ti->error = "Unknown tag size";
2927 r = -EINVAL;
2928 goto bad;
2929 }
2930 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
2931 }
2932 if (ic->tag_size > MAX_TAG_SIZE) {
2933 ti->error = "Too big tag size";
2934 r = -EINVAL;
2935 goto bad;
2936 }
2937 if (!(ic->tag_size & (ic->tag_size - 1)))
2938 ic->log2_tag_size = __ffs(ic->tag_size);
2939 else
2940 ic->log2_tag_size = -1;
2941
2942 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
2943 ic->autocommit_msec = sync_msec;
2944 setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic);
2945
2946 ic->io = dm_io_client_create();
2947 if (IS_ERR(ic->io)) {
2948 r = PTR_ERR(ic->io);
2949 ic->io = NULL;
2950 ti->error = "Cannot allocate dm io";
2951 goto bad;
2952 }
2953
2954 ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache);
2955 if (!ic->journal_io_mempool) {
2956 r = -ENOMEM;
2957 ti->error = "Cannot allocate mempool";
2958 goto bad;
2959 }
2960
2961 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
2962 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
2963 if (!ic->metadata_wq) {
2964 ti->error = "Cannot allocate workqueue";
2965 r = -ENOMEM;
2966 goto bad;
2967 }
2968
2969 /*
2970 * If this workqueue were percpu, it would cause bio reordering
2971 * and reduced performance.
2972 */
2973 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
2974 if (!ic->wait_wq) {
2975 ti->error = "Cannot allocate workqueue";
2976 r = -ENOMEM;
2977 goto bad;
2978 }
2979
2980 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
2981 if (!ic->commit_wq) {
2982 ti->error = "Cannot allocate workqueue";
2983 r = -ENOMEM;
2984 goto bad;
2985 }
2986 INIT_WORK(&ic->commit_work, integrity_commit);
2987
2988 if (ic->mode == 'J') {
2989 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
2990 if (!ic->writer_wq) {
2991 ti->error = "Cannot allocate workqueue";
2992 r = -ENOMEM;
2993 goto bad;
2994 }
2995 INIT_WORK(&ic->writer_work, integrity_writer);
2996 }
2997
2998 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
2999 if (!ic->sb) {
3000 r = -ENOMEM;
3001 ti->error = "Cannot allocate superblock area";
3002 goto bad;
3003 }
3004
3005 r = sync_rw_sb(ic, REQ_OP_READ, 0);
3006 if (r) {
3007 ti->error = "Error reading superblock";
3008 goto bad;
3009 }
c2bcb2b7
MP
3010 should_write_sb = false;
3011 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3012 if (ic->mode != 'R') {
56b67a4f
MP
3013 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3014 r = -EINVAL;
3015 ti->error = "The device is not initialized";
3016 goto bad;
7eada909
MP
3017 }
3018 }
3019
3020 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3021 if (r) {
3022 ti->error = "Could not initialize superblock";
3023 goto bad;
3024 }
c2bcb2b7
MP
3025 if (ic->mode != 'R')
3026 should_write_sb = true;
7eada909
MP
3027 }
3028
3029 if (ic->sb->version != SB_VERSION) {
3030 r = -EINVAL;
3031 ti->error = "Unknown version";
3032 goto bad;
3033 }
3034 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3035 r = -EINVAL;
9d609f85
MP
3036 ti->error = "Tag size doesn't match the information in superblock";
3037 goto bad;
3038 }
3039 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3040 r = -EINVAL;
3041 ti->error = "Block size doesn't match the information in superblock";
7eada909
MP
3042 goto bad;
3043 }
3044 /* make sure that ti->max_io_len doesn't overflow */
56b67a4f
MP
3045 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3046 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
7eada909
MP
3047 r = -EINVAL;
3048 ti->error = "Invalid interleave_sectors in the superblock";
3049 goto bad;
3050 }
3051 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3052 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3053 /* test for overflow */
3054 r = -EINVAL;
3055 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3056 goto bad;
3057 }
3058 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3059 r = -EINVAL;
3060 ti->error = "Journal mac mismatch";
3061 goto bad;
3062 }
3063 r = calculate_device_limits(ic);
3064 if (r) {
3065 ti->error = "The device is too small";
3066 goto bad;
3067 }
3068
3069 if (!buffer_sectors)
3070 buffer_sectors = 1;
3071 ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT);
3072
3073 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3074 threshold += 50;
3075 do_div(threshold, 100);
3076 ic->free_sectors_threshold = threshold;
3077
3078 DEBUG_print("initialized:\n");
3079 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3080 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3081 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3082 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3083 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3084 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3085 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3086 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
3087 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
3088 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3089 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3090 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3091 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3092 (unsigned long long)ic->provided_data_sectors);
3093 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3094
3095 ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors),
3096 1, 0, NULL, NULL);
3097 if (IS_ERR(ic->bufio)) {
3098 r = PTR_ERR(ic->bufio);
3099 ti->error = "Cannot initialize dm-bufio";
3100 ic->bufio = NULL;
3101 goto bad;
3102 }
3103 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3104
c2bcb2b7
MP
3105 if (ic->mode != 'R') {
3106 r = create_journal(ic, &ti->error);
3107 if (r)
3108 goto bad;
3109 }
7eada909
MP
3110
3111 if (should_write_sb) {
3112 int r;
3113
3114 init_journal(ic, 0, ic->journal_sections, 0);
3115 r = dm_integrity_failed(ic);
3116 if (unlikely(r)) {
3117 ti->error = "Error initializing journal";
3118 goto bad;
3119 }
3120 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3121 if (r) {
3122 ti->error = "Error initializing superblock";
3123 goto bad;
3124 }
3125 ic->just_formatted = true;
3126 }
3127
3128 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3129 if (r)
3130 goto bad;
3131
3132 if (!ic->internal_hash)
3133 dm_integrity_set(ti, ic);
3134
3135 ti->num_flush_bios = 1;
3136 ti->flush_supported = true;
3137
3138 return 0;
3139bad:
3140 dm_integrity_dtr(ti);
3141 return r;
3142}
3143
3144static void dm_integrity_dtr(struct dm_target *ti)
3145{
3146 struct dm_integrity_c *ic = ti->private;
3147
3148 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3149
3150 if (ic->metadata_wq)
3151 destroy_workqueue(ic->metadata_wq);
3152 if (ic->wait_wq)
3153 destroy_workqueue(ic->wait_wq);
3154 if (ic->commit_wq)
3155 destroy_workqueue(ic->commit_wq);
3156 if (ic->writer_wq)
3157 destroy_workqueue(ic->writer_wq);
3158 if (ic->bufio)
3159 dm_bufio_client_destroy(ic->bufio);
3160 mempool_destroy(ic->journal_io_mempool);
3161 if (ic->io)
3162 dm_io_client_destroy(ic->io);
3163 if (ic->dev)
3164 dm_put_device(ti, ic->dev);
3165 dm_integrity_free_page_list(ic, ic->journal);
3166 dm_integrity_free_page_list(ic, ic->journal_io);
3167 dm_integrity_free_page_list(ic, ic->journal_xor);
3168 if (ic->journal_scatterlist)
3169 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3170 if (ic->journal_io_scatterlist)
3171 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3172 if (ic->sk_requests) {
3173 unsigned i;
3174
3175 for (i = 0; i < ic->journal_sections; i++) {
3176 struct skcipher_request *req = ic->sk_requests[i];
3177 if (req) {
3178 kzfree(req->iv);
3179 skcipher_request_free(req);
3180 }
3181 }
3182 kvfree(ic->sk_requests);
3183 }
3184 kvfree(ic->journal_tree);
3185 if (ic->sb)
3186 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3187
3188 if (ic->internal_hash)
3189 crypto_free_shash(ic->internal_hash);
3190 free_alg(&ic->internal_hash_alg);
3191
3192 if (ic->journal_crypt)
3193 crypto_free_skcipher(ic->journal_crypt);
3194 free_alg(&ic->journal_crypt_alg);
3195
3196 if (ic->journal_mac)
3197 crypto_free_shash(ic->journal_mac);
3198 free_alg(&ic->journal_mac_alg);
3199
3200 kfree(ic);
3201}
3202
3203static struct target_type integrity_target = {
3204 .name = "integrity",
3205 .version = {1, 0, 0},
3206 .module = THIS_MODULE,
3207 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3208 .ctr = dm_integrity_ctr,
3209 .dtr = dm_integrity_dtr,
3210 .map = dm_integrity_map,
3211 .postsuspend = dm_integrity_postsuspend,
3212 .resume = dm_integrity_resume,
3213 .status = dm_integrity_status,
3214 .iterate_devices = dm_integrity_iterate_devices,
9d609f85 3215 .io_hints = dm_integrity_io_hints,
7eada909
MP
3216};
3217
3218int __init dm_integrity_init(void)
3219{
3220 int r;
3221
3222 journal_io_cache = kmem_cache_create("integrity_journal_io",
3223 sizeof(struct journal_io), 0, 0, NULL);
3224 if (!journal_io_cache) {
3225 DMERR("can't allocate journal io cache");
3226 return -ENOMEM;
3227 }
3228
3229 r = dm_register_target(&integrity_target);
3230
3231 if (r < 0)
3232 DMERR("register failed %d", r);
3233
3234 return r;
3235}
3236
3237void dm_integrity_exit(void)
3238{
3239 dm_unregister_target(&integrity_target);
3240 kmem_cache_destroy(journal_io_cache);
3241}
3242
3243module_init(dm_integrity_init);
3244module_exit(dm_integrity_exit);
3245
3246MODULE_AUTHOR("Milan Broz");
3247MODULE_AUTHOR("Mikulas Patocka");
3248MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3249MODULE_LICENSE("GPL");