]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/md/dm-integrity.c
dm integrity: factor out create_journal() from dm_integrity_ctr()
[mirror_ubuntu-artful-kernel.git] / drivers / md / dm-integrity.c
CommitLineData
7eada909
MP
1/*
2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/module.h>
10#include <linux/device-mapper.h>
11#include <linux/dm-io.h>
12#include <linux/vmalloc.h>
13#include <linux/sort.h>
14#include <linux/rbtree.h>
15#include <linux/delay.h>
16#include <linux/random.h>
17#include <crypto/hash.h>
18#include <crypto/skcipher.h>
19#include <linux/async_tx.h>
20#include "dm-bufio.h"
21
22#define DM_MSG_PREFIX "integrity"
23
24#define DEFAULT_INTERLEAVE_SECTORS 32768
25#define DEFAULT_JOURNAL_SIZE_FACTOR 7
26#define DEFAULT_BUFFER_SECTORS 128
27#define DEFAULT_JOURNAL_WATERMARK 50
28#define DEFAULT_SYNC_MSEC 10000
29#define DEFAULT_MAX_JOURNAL_SECTORS 131072
30#define MIN_INTERLEAVE_SECTORS 3
31#define MAX_INTERLEAVE_SECTORS 31
32#define METADATA_WORKQUEUE_MAX_ACTIVE 16
33
34/*
35 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
36 * so it should not be enabled in the official kernel
37 */
38//#define DEBUG_PRINT
39//#define INTERNAL_VERIFY
40
41/*
42 * On disk structures
43 */
44
45#define SB_MAGIC "integrt"
46#define SB_VERSION 1
47#define SB_SECTORS 8
48
49struct superblock {
50 __u8 magic[8];
51 __u8 version;
52 __u8 log2_interleave_sectors;
53 __u16 integrity_tag_size;
54 __u32 journal_sections;
55 __u64 provided_data_sectors; /* userspace uses this value */
56 __u32 flags;
57};
58
59#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
60
61#define JOURNAL_ENTRY_ROUNDUP 8
62
63typedef __u64 commit_id_t;
64#define JOURNAL_MAC_PER_SECTOR 8
65
66struct journal_entry {
67 union {
68 struct {
69 __u32 sector_lo;
70 __u32 sector_hi;
71 } s;
72 __u64 sector;
73 } u;
74 commit_id_t last_bytes;
75 __u8 tag[0];
76};
77
78#if BITS_PER_LONG == 64
79#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
80#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
81#elif defined(CONFIG_LBDAF)
82#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
83#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
84#else
85#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
86#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
87#endif
88#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
89#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
90#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
91#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
92
93#define JOURNAL_BLOCK_SECTORS 8
94#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
95#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
96
97struct journal_sector {
98 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
99 __u8 mac[JOURNAL_MAC_PER_SECTOR];
100 commit_id_t commit_id;
101};
102
103#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, tag))
104
105#define METADATA_PADDING_SECTORS 8
106
107#define N_COMMIT_IDS 4
108
109static unsigned char prev_commit_seq(unsigned char seq)
110{
111 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
112}
113
114static unsigned char next_commit_seq(unsigned char seq)
115{
116 return (seq + 1) % N_COMMIT_IDS;
117}
118
119/*
120 * In-memory structures
121 */
122
123struct journal_node {
124 struct rb_node node;
125 sector_t sector;
126};
127
128struct alg_spec {
129 char *alg_string;
130 char *key_string;
131 __u8 *key;
132 unsigned key_size;
133};
134
135struct dm_integrity_c {
136 struct dm_dev *dev;
137 unsigned tag_size;
138 __s8 log2_tag_size;
139 sector_t start;
140 mempool_t *journal_io_mempool;
141 struct dm_io_client *io;
142 struct dm_bufio_client *bufio;
143 struct workqueue_struct *metadata_wq;
144 struct superblock *sb;
145 unsigned journal_pages;
146 struct page_list *journal;
147 struct page_list *journal_io;
148 struct page_list *journal_xor;
149
150 struct crypto_skcipher *journal_crypt;
151 struct scatterlist **journal_scatterlist;
152 struct scatterlist **journal_io_scatterlist;
153 struct skcipher_request **sk_requests;
154
155 struct crypto_shash *journal_mac;
156
157 struct journal_node *journal_tree;
158 struct rb_root journal_tree_root;
159
160 sector_t provided_data_sectors;
161
162 unsigned short journal_entry_size;
163 unsigned char journal_entries_per_sector;
164 unsigned char journal_section_entries;
165 unsigned char journal_section_sectors;
166 unsigned journal_sections;
167 unsigned journal_entries;
168 sector_t device_sectors;
169 unsigned initial_sectors;
170 unsigned metadata_run;
171 __s8 log2_metadata_run;
172 __u8 log2_buffer_sectors;
173
174 unsigned char mode;
175 bool suspending;
176
177 int failed;
178
179 struct crypto_shash *internal_hash;
180
181 /* these variables are locked with endio_wait.lock */
182 struct rb_root in_progress;
183 wait_queue_head_t endio_wait;
184 struct workqueue_struct *wait_wq;
185
186 unsigned char commit_seq;
187 commit_id_t commit_ids[N_COMMIT_IDS];
188
189 unsigned committed_section;
190 unsigned n_committed_sections;
191
192 unsigned uncommitted_section;
193 unsigned n_uncommitted_sections;
194
195 unsigned free_section;
196 unsigned char free_section_entry;
197 unsigned free_sectors;
198
199 unsigned free_sectors_threshold;
200
201 struct workqueue_struct *commit_wq;
202 struct work_struct commit_work;
203
204 struct workqueue_struct *writer_wq;
205 struct work_struct writer_work;
206
207 struct bio_list flush_bio_list;
208
209 unsigned long autocommit_jiffies;
210 struct timer_list autocommit_timer;
211 unsigned autocommit_msec;
212
213 wait_queue_head_t copy_to_journal_wait;
214
215 struct completion crypto_backoff;
216
217 bool journal_uptodate;
218 bool just_formatted;
219
220 struct alg_spec internal_hash_alg;
221 struct alg_spec journal_crypt_alg;
222 struct alg_spec journal_mac_alg;
223};
224
225struct dm_integrity_range {
226 sector_t logical_sector;
227 unsigned n_sectors;
228 struct rb_node node;
229};
230
231struct dm_integrity_io {
232 struct work_struct work;
233
234 struct dm_integrity_c *ic;
235 bool write;
236 bool fua;
237
238 struct dm_integrity_range range;
239
240 sector_t metadata_block;
241 unsigned metadata_offset;
242
243 atomic_t in_flight;
244 int bi_error;
245
246 struct completion *completion;
247
248 struct block_device *orig_bi_bdev;
249 bio_end_io_t *orig_bi_end_io;
250 struct bio_integrity_payload *orig_bi_integrity;
251 struct bvec_iter orig_bi_iter;
252};
253
254struct journal_completion {
255 struct dm_integrity_c *ic;
256 atomic_t in_flight;
257 struct completion comp;
258};
259
260struct journal_io {
261 struct dm_integrity_range range;
262 struct journal_completion *comp;
263};
264
265static struct kmem_cache *journal_io_cache;
266
267#define JOURNAL_IO_MEMPOOL 32
268
269#ifdef DEBUG_PRINT
270#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
271static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
272{
273 va_list args;
274 va_start(args, msg);
275 vprintk(msg, args);
276 va_end(args);
277 if (len)
278 pr_cont(":");
279 while (len) {
280 pr_cont(" %02x", *bytes);
281 bytes++;
282 len--;
283 }
284 pr_cont("\n");
285}
286#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
287#else
288#define DEBUG_print(x, ...) do { } while (0)
289#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
290#endif
291
292/*
293 * DM Integrity profile, protection is performed layer above (dm-crypt)
294 */
295static struct blk_integrity_profile dm_integrity_profile = {
296 .name = "DM-DIF-EXT-TAG",
297 .generate_fn = NULL,
298 .verify_fn = NULL,
299};
300
301static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
302static void integrity_bio_wait(struct work_struct *w);
303static void dm_integrity_dtr(struct dm_target *ti);
304
305static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
306{
307 if (!cmpxchg(&ic->failed, 0, err))
308 DMERR("Error on %s: %d", msg, err);
309}
310
311static int dm_integrity_failed(struct dm_integrity_c *ic)
312{
313 return ACCESS_ONCE(ic->failed);
314}
315
316static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
317 unsigned j, unsigned char seq)
318{
319 /*
320 * Xor the number with section and sector, so that if a piece of
321 * journal is written at wrong place, it is detected.
322 */
323 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
324}
325
326static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
327 sector_t *area, sector_t *offset)
328{
329 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
330
331 *area = data_sector >> log2_interleave_sectors;
332 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
333}
334
335static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
336 sector_t offset, unsigned *metadata_offset)
337{
338 __u64 ms;
339 unsigned mo;
340
341 ms = area << ic->sb->log2_interleave_sectors;
342 if (likely(ic->log2_metadata_run >= 0))
343 ms += area << ic->log2_metadata_run;
344 else
345 ms += area * ic->metadata_run;
346 ms >>= ic->log2_buffer_sectors;
347
348 if (likely(ic->log2_tag_size >= 0)) {
349 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
350 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
351 } else {
352 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
353 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
354 }
355 *metadata_offset = mo;
356 return ms;
357}
358
359static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
360{
361 sector_t result;
362
363 result = area << ic->sb->log2_interleave_sectors;
364 if (likely(ic->log2_metadata_run >= 0))
365 result += (area + 1) << ic->log2_metadata_run;
366 else
367 result += (area + 1) * ic->metadata_run;
368
369 result += (sector_t)ic->initial_sectors + offset;
370 return result;
371}
372
373static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
374{
375 if (unlikely(*sec_ptr >= ic->journal_sections))
376 *sec_ptr -= ic->journal_sections;
377}
378
379static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
380{
381 struct dm_io_request io_req;
382 struct dm_io_region io_loc;
383
384 io_req.bi_op = op;
385 io_req.bi_op_flags = op_flags;
386 io_req.mem.type = DM_IO_KMEM;
387 io_req.mem.ptr.addr = ic->sb;
388 io_req.notify.fn = NULL;
389 io_req.client = ic->io;
390 io_loc.bdev = ic->dev->bdev;
391 io_loc.sector = ic->start;
392 io_loc.count = SB_SECTORS;
393
394 return dm_io(&io_req, 1, &io_loc, NULL);
395}
396
397static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
398 bool e, const char *function)
399{
400#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
401 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
402
403 if (unlikely(section >= ic->journal_sections) ||
404 unlikely(offset >= limit)) {
405 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
406 function, section, offset, ic->journal_sections, limit);
407 BUG();
408 }
409#endif
410}
411
412static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
413 unsigned *pl_index, unsigned *pl_offset)
414{
415 unsigned sector;
416
417 access_journal_check(ic, section, offset, false, "access_journal");
418
419 sector = section * ic->journal_section_sectors + offset;
420
421 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
422 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
423}
424
425static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
426 unsigned section, unsigned offset, unsigned *n_sectors)
427{
428 unsigned pl_index, pl_offset;
429 char *va;
430
431 page_list_location(ic, section, offset, &pl_index, &pl_offset);
432
433 if (n_sectors)
434 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
435
436 va = lowmem_page_address(pl[pl_index].page);
437
438 return (struct journal_sector *)(va + pl_offset);
439}
440
441static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
442{
443 return access_page_list(ic, ic->journal, section, offset, NULL);
444}
445
446static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
447{
448 unsigned rel_sector, offset;
449 struct journal_sector *js;
450
451 access_journal_check(ic, section, n, true, "access_journal_entry");
452
453 rel_sector = n % JOURNAL_BLOCK_SECTORS;
454 offset = n / JOURNAL_BLOCK_SECTORS;
455
456 js = access_journal(ic, section, rel_sector);
457 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
458}
459
460static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
461{
462 access_journal_check(ic, section, n, true, "access_journal_data");
463
464 return access_journal(ic, section, n + JOURNAL_BLOCK_SECTORS);
465}
466
467static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
468{
469 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
470 int r;
471 unsigned j, size;
472
473 desc->tfm = ic->journal_mac;
474 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
475
476 r = crypto_shash_init(desc);
477 if (unlikely(r)) {
478 dm_integrity_io_error(ic, "crypto_shash_init", r);
479 goto err;
480 }
481
482 for (j = 0; j < ic->journal_section_entries; j++) {
483 struct journal_entry *je = access_journal_entry(ic, section, j);
484 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
485 if (unlikely(r)) {
486 dm_integrity_io_error(ic, "crypto_shash_update", r);
487 goto err;
488 }
489 }
490
491 size = crypto_shash_digestsize(ic->journal_mac);
492
493 if (likely(size <= JOURNAL_MAC_SIZE)) {
494 r = crypto_shash_final(desc, result);
495 if (unlikely(r)) {
496 dm_integrity_io_error(ic, "crypto_shash_final", r);
497 goto err;
498 }
499 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
500 } else {
501 __u8 digest[size];
502 r = crypto_shash_final(desc, digest);
503 if (unlikely(r)) {
504 dm_integrity_io_error(ic, "crypto_shash_final", r);
505 goto err;
506 }
507 memcpy(result, digest, JOURNAL_MAC_SIZE);
508 }
509
510 return;
511err:
512 memset(result, 0, JOURNAL_MAC_SIZE);
513}
514
515static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
516{
517 __u8 result[JOURNAL_MAC_SIZE];
518 unsigned j;
519
520 if (!ic->journal_mac)
521 return;
522
523 section_mac(ic, section, result);
524
525 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
526 struct journal_sector *js = access_journal(ic, section, j);
527
528 if (likely(wr))
529 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
530 else {
531 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
532 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
533 }
534 }
535}
536
537static void complete_journal_op(void *context)
538{
539 struct journal_completion *comp = context;
540 BUG_ON(!atomic_read(&comp->in_flight));
541 if (likely(atomic_dec_and_test(&comp->in_flight)))
542 complete(&comp->comp);
543}
544
545static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
546 unsigned n_sections, struct journal_completion *comp)
547{
548 struct async_submit_ctl submit;
549 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
550 unsigned pl_index, pl_offset, section_index;
551 struct page_list *source_pl, *target_pl;
552
553 if (likely(encrypt)) {
554 source_pl = ic->journal;
555 target_pl = ic->journal_io;
556 } else {
557 source_pl = ic->journal_io;
558 target_pl = ic->journal;
559 }
560
561 page_list_location(ic, section, 0, &pl_index, &pl_offset);
562
563 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
564
565 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
566
567 section_index = pl_index;
568
569 do {
570 size_t this_step;
571 struct page *src_pages[2];
572 struct page *dst_page;
573
574 while (unlikely(pl_index == section_index)) {
575 unsigned dummy;
576 if (likely(encrypt))
577 rw_section_mac(ic, section, true);
578 section++;
579 n_sections--;
580 if (!n_sections)
581 break;
582 page_list_location(ic, section, 0, &section_index, &dummy);
583 }
584
585 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
586 dst_page = target_pl[pl_index].page;
587 src_pages[0] = source_pl[pl_index].page;
588 src_pages[1] = ic->journal_xor[pl_index].page;
589
590 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
591
592 pl_index++;
593 pl_offset = 0;
594 n_bytes -= this_step;
595 } while (n_bytes);
596
597 BUG_ON(n_sections);
598
599 async_tx_issue_pending_all();
600}
601
602static void complete_journal_encrypt(struct crypto_async_request *req, int err)
603{
604 struct journal_completion *comp = req->data;
605 if (unlikely(err)) {
606 if (likely(err == -EINPROGRESS)) {
607 complete(&comp->ic->crypto_backoff);
608 return;
609 }
610 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
611 }
612 complete_journal_op(comp);
613}
614
615static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
616{
617 int r;
618 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
619 complete_journal_encrypt, comp);
620 if (likely(encrypt))
621 r = crypto_skcipher_encrypt(req);
622 else
623 r = crypto_skcipher_decrypt(req);
624 if (likely(!r))
625 return false;
626 if (likely(r == -EINPROGRESS))
627 return true;
628 if (likely(r == -EBUSY)) {
629 wait_for_completion(&comp->ic->crypto_backoff);
630 reinit_completion(&comp->ic->crypto_backoff);
631 return true;
632 }
633 dm_integrity_io_error(comp->ic, "encrypt", r);
634 return false;
635}
636
637static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
638 unsigned n_sections, struct journal_completion *comp)
639{
640 struct scatterlist **source_sg;
641 struct scatterlist **target_sg;
642
643 atomic_add(2, &comp->in_flight);
644
645 if (likely(encrypt)) {
646 source_sg = ic->journal_scatterlist;
647 target_sg = ic->journal_io_scatterlist;
648 } else {
649 source_sg = ic->journal_io_scatterlist;
650 target_sg = ic->journal_scatterlist;
651 }
652
653 do {
654 struct skcipher_request *req;
655 unsigned ivsize;
656 char *iv;
657
658 if (likely(encrypt))
659 rw_section_mac(ic, section, true);
660
661 req = ic->sk_requests[section];
662 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
663 iv = req->iv;
664
665 memcpy(iv, iv + ivsize, ivsize);
666
667 req->src = source_sg[section];
668 req->dst = target_sg[section];
669
670 if (unlikely(do_crypt(encrypt, req, comp)))
671 atomic_inc(&comp->in_flight);
672
673 section++;
674 n_sections--;
675 } while (n_sections);
676
677 atomic_dec(&comp->in_flight);
678 complete_journal_op(comp);
679}
680
681static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
682 unsigned n_sections, struct journal_completion *comp)
683{
684 if (ic->journal_xor)
685 return xor_journal(ic, encrypt, section, n_sections, comp);
686 else
687 return crypt_journal(ic, encrypt, section, n_sections, comp);
688}
689
690static void complete_journal_io(unsigned long error, void *context)
691{
692 struct journal_completion *comp = context;
693 if (unlikely(error != 0))
694 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
695 complete_journal_op(comp);
696}
697
698static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
699 unsigned n_sections, struct journal_completion *comp)
700{
701 struct dm_io_request io_req;
702 struct dm_io_region io_loc;
703 unsigned sector, n_sectors, pl_index, pl_offset;
704 int r;
705
706 if (unlikely(dm_integrity_failed(ic))) {
707 if (comp)
708 complete_journal_io(-1UL, comp);
709 return;
710 }
711
712 sector = section * ic->journal_section_sectors;
713 n_sectors = n_sections * ic->journal_section_sectors;
714
715 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
716 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
717
718 io_req.bi_op = op;
719 io_req.bi_op_flags = op_flags;
720 io_req.mem.type = DM_IO_PAGE_LIST;
721 if (ic->journal_io)
722 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
723 else
724 io_req.mem.ptr.pl = &ic->journal[pl_index];
725 io_req.mem.offset = pl_offset;
726 if (likely(comp != NULL)) {
727 io_req.notify.fn = complete_journal_io;
728 io_req.notify.context = comp;
729 } else {
730 io_req.notify.fn = NULL;
731 }
732 io_req.client = ic->io;
733 io_loc.bdev = ic->dev->bdev;
734 io_loc.sector = ic->start + SB_SECTORS + sector;
735 io_loc.count = n_sectors;
736
737 r = dm_io(&io_req, 1, &io_loc, NULL);
738 if (unlikely(r)) {
739 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
740 if (comp) {
741 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
742 complete_journal_io(-1UL, comp);
743 }
744 }
745}
746
747static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
748{
749 struct journal_completion io_comp;
750 struct journal_completion crypt_comp_1;
751 struct journal_completion crypt_comp_2;
752 unsigned i;
753
754 io_comp.ic = ic;
755 io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp);
756
757 if (commit_start + commit_sections <= ic->journal_sections) {
758 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
759 if (ic->journal_io) {
760 crypt_comp_1.ic = ic;
761 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
762 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
763 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
764 wait_for_completion_io(&crypt_comp_1.comp);
765 } else {
766 for (i = 0; i < commit_sections; i++)
767 rw_section_mac(ic, commit_start + i, true);
768 }
769 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
770 } else {
771 unsigned to_end;
772 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
773 to_end = ic->journal_sections - commit_start;
774 if (ic->journal_io) {
775 crypt_comp_1.ic = ic;
776 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
777 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
778 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
779 if (try_wait_for_completion(&crypt_comp_1.comp)) {
780 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
781 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
782 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
783 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
784 wait_for_completion_io(&crypt_comp_1.comp);
785 } else {
786 crypt_comp_2.ic = ic;
787 crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp);
788 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
789 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
790 wait_for_completion_io(&crypt_comp_1.comp);
791 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
792 wait_for_completion_io(&crypt_comp_2.comp);
793 }
794 } else {
795 for (i = 0; i < to_end; i++)
796 rw_section_mac(ic, commit_start + i, true);
797 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
798 for (i = 0; i < commit_sections - to_end; i++)
799 rw_section_mac(ic, i, true);
800 }
801 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
802 }
803
804 wait_for_completion_io(&io_comp.comp);
805}
806
807static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
808 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
809{
810 struct dm_io_request io_req;
811 struct dm_io_region io_loc;
812 int r;
813 unsigned sector, pl_index, pl_offset;
814
815 if (unlikely(dm_integrity_failed(ic))) {
816 fn(-1UL, data);
817 return;
818 }
819
820 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
821
822 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
823 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
824
825 io_req.bi_op = REQ_OP_WRITE;
826 io_req.bi_op_flags = 0;
827 io_req.mem.type = DM_IO_PAGE_LIST;
828 io_req.mem.ptr.pl = &ic->journal[pl_index];
829 io_req.mem.offset = pl_offset;
830 io_req.notify.fn = fn;
831 io_req.notify.context = data;
832 io_req.client = ic->io;
833 io_loc.bdev = ic->dev->bdev;
834 io_loc.sector = ic->start + target;
835 io_loc.count = n_sectors;
836
837 r = dm_io(&io_req, 1, &io_loc, NULL);
838 if (unlikely(r)) {
839 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
840 fn(-1UL, data);
841 }
842}
843
844static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
845{
846 struct rb_node **n = &ic->in_progress.rb_node;
847 struct rb_node *parent;
848
849 parent = NULL;
850
851 while (*n) {
852 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
853
854 parent = *n;
855 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
856 n = &range->node.rb_left;
857 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
858 n = &range->node.rb_right;
859 } else {
860 return false;
861 }
862 }
863
864 rb_link_node(&new_range->node, parent, n);
865 rb_insert_color(&new_range->node, &ic->in_progress);
866
867 return true;
868}
869
870static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
871{
872 rb_erase(&range->node, &ic->in_progress);
873 wake_up_locked(&ic->endio_wait);
874}
875
876static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
877{
878 unsigned long flags;
879
880 spin_lock_irqsave(&ic->endio_wait.lock, flags);
881 remove_range_unlocked(ic, range);
882 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
883}
884
885static void init_journal_node(struct journal_node *node)
886{
887 RB_CLEAR_NODE(&node->node);
888 node->sector = (sector_t)-1;
889}
890
891static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
892{
893 struct rb_node **link;
894 struct rb_node *parent;
895
896 node->sector = sector;
897 BUG_ON(!RB_EMPTY_NODE(&node->node));
898
899 link = &ic->journal_tree_root.rb_node;
900 parent = NULL;
901
902 while (*link) {
903 struct journal_node *j;
904 parent = *link;
905 j = container_of(parent, struct journal_node, node);
906 if (sector < j->sector)
907 link = &j->node.rb_left;
908 else
909 link = &j->node.rb_right;
910 }
911
912 rb_link_node(&node->node, parent, link);
913 rb_insert_color(&node->node, &ic->journal_tree_root);
914}
915
916static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
917{
918 BUG_ON(RB_EMPTY_NODE(&node->node));
919 rb_erase(&node->node, &ic->journal_tree_root);
920 init_journal_node(node);
921}
922
923#define NOT_FOUND (-1U)
924
925static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
926{
927 struct rb_node *n = ic->journal_tree_root.rb_node;
928 unsigned found = NOT_FOUND;
929 *next_sector = (sector_t)-1;
930 while (n) {
931 struct journal_node *j = container_of(n, struct journal_node, node);
932 if (sector == j->sector) {
933 found = j - ic->journal_tree;
934 }
935 if (sector < j->sector) {
936 *next_sector = j->sector;
937 n = j->node.rb_left;
938 } else {
939 n = j->node.rb_right;
940 }
941 }
942
943 return found;
944}
945
946static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
947{
948 struct journal_node *node, *next_node;
949 struct rb_node *next;
950
951 if (unlikely(pos >= ic->journal_entries))
952 return false;
953 node = &ic->journal_tree[pos];
954 if (unlikely(RB_EMPTY_NODE(&node->node)))
955 return false;
956 if (unlikely(node->sector != sector))
957 return false;
958
959 next = rb_next(&node->node);
960 if (unlikely(!next))
961 return true;
962
963 next_node = container_of(next, struct journal_node, node);
964 return next_node->sector != sector;
965}
966
967static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
968{
969 struct rb_node *next;
970 struct journal_node *next_node;
971 unsigned next_section;
972
973 BUG_ON(RB_EMPTY_NODE(&node->node));
974
975 next = rb_next(&node->node);
976 if (unlikely(!next))
977 return false;
978
979 next_node = container_of(next, struct journal_node, node);
980
981 if (next_node->sector != node->sector)
982 return false;
983
984 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
985 if (next_section >= ic->committed_section &&
986 next_section < ic->committed_section + ic->n_committed_sections)
987 return true;
988 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
989 return true;
990
991 return false;
992}
993
994#define TAG_READ 0
995#define TAG_WRITE 1
996#define TAG_CMP 2
997
998static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
999 unsigned *metadata_offset, unsigned total_size, int op)
1000{
1001 do {
1002 unsigned char *data, *dp;
1003 struct dm_buffer *b;
1004 unsigned to_copy;
1005 int r;
1006
1007 r = dm_integrity_failed(ic);
1008 if (unlikely(r))
1009 return r;
1010
1011 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1012 if (unlikely(IS_ERR(data)))
1013 return PTR_ERR(data);
1014
1015 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1016 dp = data + *metadata_offset;
1017 if (op == TAG_READ) {
1018 memcpy(tag, dp, to_copy);
1019 } else if (op == TAG_WRITE) {
1020 memcpy(dp, tag, to_copy);
1021 dm_bufio_mark_buffer_dirty(b);
1022 } else {
1023 /* e.g.: op == TAG_CMP */
1024 if (unlikely(memcmp(dp, tag, to_copy))) {
1025 unsigned i;
1026
1027 for (i = 0; i < to_copy; i++) {
1028 if (dp[i] != tag[i])
1029 break;
1030 total_size--;
1031 }
1032 dm_bufio_release(b);
1033 return total_size;
1034 }
1035 }
1036 dm_bufio_release(b);
1037
1038 tag += to_copy;
1039 *metadata_offset += to_copy;
1040 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1041 (*metadata_block)++;
1042 *metadata_offset = 0;
1043 }
1044 total_size -= to_copy;
1045 } while (unlikely(total_size));
1046
1047 return 0;
1048}
1049
1050static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1051{
1052 int r;
1053 r = dm_bufio_write_dirty_buffers(ic->bufio);
1054 if (unlikely(r))
1055 dm_integrity_io_error(ic, "writing tags", r);
1056}
1057
1058static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1059{
1060 DECLARE_WAITQUEUE(wait, current);
1061 __add_wait_queue(&ic->endio_wait, &wait);
1062 __set_current_state(TASK_UNINTERRUPTIBLE);
1063 spin_unlock_irq(&ic->endio_wait.lock);
1064 io_schedule();
1065 spin_lock_irq(&ic->endio_wait.lock);
1066 __remove_wait_queue(&ic->endio_wait, &wait);
1067}
1068
1069static void autocommit_fn(unsigned long data)
1070{
1071 struct dm_integrity_c *ic = (struct dm_integrity_c *)data;
1072
1073 if (likely(!dm_integrity_failed(ic)))
1074 queue_work(ic->commit_wq, &ic->commit_work);
1075}
1076
1077static void schedule_autocommit(struct dm_integrity_c *ic)
1078{
1079 if (!timer_pending(&ic->autocommit_timer))
1080 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1081}
1082
1083static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1084{
1085 struct bio *bio;
1086 spin_lock_irq(&ic->endio_wait.lock);
1087 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1088 bio_list_add(&ic->flush_bio_list, bio);
1089 spin_unlock_irq(&ic->endio_wait.lock);
1090 queue_work(ic->commit_wq, &ic->commit_work);
1091}
1092
1093static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1094{
1095 int r = dm_integrity_failed(ic);
1096 if (unlikely(r) && !bio->bi_error)
1097 bio->bi_error = r;
1098 bio_endio(bio);
1099}
1100
1101static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1102{
1103 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1104
1105 if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic)))
1106 submit_flush_bio(ic, dio);
1107 else
1108 do_endio(ic, bio);
1109}
1110
1111static void dec_in_flight(struct dm_integrity_io *dio)
1112{
1113 if (atomic_dec_and_test(&dio->in_flight)) {
1114 struct dm_integrity_c *ic = dio->ic;
1115 struct bio *bio;
1116
1117 remove_range(ic, &dio->range);
1118
1119 if (unlikely(dio->write))
1120 schedule_autocommit(ic);
1121
1122 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1123
1124 if (unlikely(dio->bi_error) && !bio->bi_error)
1125 bio->bi_error = dio->bi_error;
1126 if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1127 dio->range.logical_sector += dio->range.n_sectors;
1128 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1129 INIT_WORK(&dio->work, integrity_bio_wait);
1130 queue_work(ic->wait_wq, &dio->work);
1131 return;
1132 }
1133 do_endio_flush(ic, dio);
1134 }
1135}
1136
1137static void integrity_end_io(struct bio *bio)
1138{
1139 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1140
1141 bio->bi_iter = dio->orig_bi_iter;
1142 bio->bi_bdev = dio->orig_bi_bdev;
1143 if (dio->orig_bi_integrity) {
1144 bio->bi_integrity = dio->orig_bi_integrity;
1145 bio->bi_opf |= REQ_INTEGRITY;
1146 }
1147 bio->bi_end_io = dio->orig_bi_end_io;
1148
1149 if (dio->completion)
1150 complete(dio->completion);
1151
1152 dec_in_flight(dio);
1153}
1154
1155static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1156 const char *data, char *result)
1157{
1158 __u64 sector_le = cpu_to_le64(sector);
1159 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1160 int r;
1161 unsigned digest_size;
1162
1163 req->tfm = ic->internal_hash;
1164 req->flags = 0;
1165
1166 r = crypto_shash_init(req);
1167 if (unlikely(r < 0)) {
1168 dm_integrity_io_error(ic, "crypto_shash_init", r);
1169 goto failed;
1170 }
1171
1172 r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1173 if (unlikely(r < 0)) {
1174 dm_integrity_io_error(ic, "crypto_shash_update", r);
1175 goto failed;
1176 }
1177
1178 r = crypto_shash_update(req, data, 1 << SECTOR_SHIFT);
1179 if (unlikely(r < 0)) {
1180 dm_integrity_io_error(ic, "crypto_shash_update", r);
1181 goto failed;
1182 }
1183
1184 r = crypto_shash_final(req, result);
1185 if (unlikely(r < 0)) {
1186 dm_integrity_io_error(ic, "crypto_shash_final", r);
1187 goto failed;
1188 }
1189
1190 digest_size = crypto_shash_digestsize(ic->internal_hash);
1191 if (unlikely(digest_size < ic->tag_size))
1192 memset(result + digest_size, 0, ic->tag_size - digest_size);
1193
1194 return;
1195
1196failed:
1197 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1198 get_random_bytes(result, ic->tag_size);
1199}
1200
1201static void integrity_metadata(struct work_struct *w)
1202{
1203 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1204 struct dm_integrity_c *ic = dio->ic;
1205
1206 int r;
1207
1208 if (ic->internal_hash) {
1209 struct bvec_iter iter;
1210 struct bio_vec bv;
1211 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1212 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1213 char *checksums;
1214 unsigned extra_space = digest_size > ic->tag_size ? digest_size - ic->tag_size : 0;
1215 char checksums_onstack[ic->tag_size + extra_space];
1216 unsigned sectors_to_process = dio->range.n_sectors;
1217 sector_t sector = dio->range.logical_sector;
1218
1219 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT) * ic->tag_size + extra_space,
1220 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1221 if (!checksums)
1222 checksums = checksums_onstack;
1223
1224 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1225 unsigned pos;
1226 char *mem, *checksums_ptr;
1227
1228again:
1229 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1230 pos = 0;
1231 checksums_ptr = checksums;
1232 do {
1233 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1234 checksums_ptr += ic->tag_size;
1235 sectors_to_process--;
1236 pos += 1 << SECTOR_SHIFT;
1237 sector++;
1238 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1239 kunmap_atomic(mem);
1240
1241 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1242 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1243 if (unlikely(r)) {
1244 if (r > 0) {
1245 DMERR("Checksum failed at sector 0x%llx",
1246 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1247 r = -EILSEQ;
1248 }
1249 if (likely(checksums != checksums_onstack))
1250 kfree(checksums);
1251 goto error;
1252 }
1253
1254 if (!sectors_to_process)
1255 break;
1256
1257 if (unlikely(pos < bv.bv_len)) {
1258 bv.bv_offset += pos;
1259 bv.bv_len -= pos;
1260 goto again;
1261 }
1262 }
1263
1264 if (likely(checksums != checksums_onstack))
1265 kfree(checksums);
1266 } else {
1267 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1268
1269 if (bip) {
1270 struct bio_vec biv;
1271 struct bvec_iter iter;
1272 unsigned data_to_process = dio->range.n_sectors * ic->tag_size;
1273
1274 bip_for_each_vec(biv, bip, iter) {
1275 unsigned char *tag;
1276 unsigned this_len;
1277
1278 BUG_ON(PageHighMem(biv.bv_page));
1279 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1280 this_len = min(biv.bv_len, data_to_process);
1281 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1282 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1283 if (unlikely(r))
1284 goto error;
1285 data_to_process -= this_len;
1286 if (!data_to_process)
1287 break;
1288 }
1289 }
1290 }
1291 dec_in_flight(dio);
1292 return;
1293error:
1294 dio->bi_error = r;
1295 dec_in_flight(dio);
1296}
1297
1298static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1299{
1300 struct dm_integrity_c *ic = ti->private;
1301 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1302
1303 sector_t area, offset;
1304
1305 dio->ic = ic;
1306 dio->bi_error = 0;
1307
1308 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1309 submit_flush_bio(ic, dio);
1310 return DM_MAPIO_SUBMITTED;
1311 }
1312
1313 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1314 dio->write = bio_op(bio) == REQ_OP_WRITE;
1315 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1316 if (unlikely(dio->fua)) {
1317 /*
1318 * Don't pass down the FUA flag because we have to flush
1319 * disk cache anyway.
1320 */
1321 bio->bi_opf &= ~REQ_FUA;
1322 }
1323 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1324 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1325 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1326 (unsigned long long)ic->provided_data_sectors);
1327 return -EIO;
1328 }
1329
1330 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1331 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1332 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1333
1334 dm_integrity_map_continue(dio, true);
1335 return DM_MAPIO_SUBMITTED;
1336}
1337
1338static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1339 unsigned journal_section, unsigned journal_entry)
1340{
1341 struct dm_integrity_c *ic = dio->ic;
1342 sector_t logical_sector;
1343 unsigned n_sectors;
1344
1345 logical_sector = dio->range.logical_sector;
1346 n_sectors = dio->range.n_sectors;
1347 do {
1348 struct bio_vec bv = bio_iovec(bio);
1349 char *mem;
1350
1351 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1352 bv.bv_len = n_sectors << SECTOR_SHIFT;
1353 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1354 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1355retry_kmap:
1356 mem = kmap_atomic(bv.bv_page);
1357 if (likely(dio->write))
1358 flush_dcache_page(bv.bv_page);
1359
1360 do {
1361 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1362
1363 if (unlikely(!dio->write)) {
1364 struct journal_sector *js;
1365
1366 if (unlikely(journal_entry_is_inprogress(je))) {
1367 flush_dcache_page(bv.bv_page);
1368 kunmap_atomic(mem);
1369
1370 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1371 goto retry_kmap;
1372 }
1373 smp_rmb();
1374 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1375 js = access_journal_data(ic, journal_section, journal_entry);
1376 memcpy(mem + bv.bv_offset, js, JOURNAL_SECTOR_DATA);
1377 memcpy(mem + bv.bv_offset + JOURNAL_SECTOR_DATA, &je->last_bytes, sizeof je->last_bytes);
1378#ifdef INTERNAL_VERIFY
1379 if (ic->internal_hash) {
1380 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1381
1382 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1383 if (unlikely(memcmp(checksums_onstack, je->tag, ic->tag_size))) {
1384 DMERR("Checksum failed when reading from journal, at sector 0x%llx",
1385 (unsigned long long)logical_sector);
1386 }
1387 }
1388#endif
1389 }
1390
1391 if (!ic->internal_hash) {
1392 struct bio_integrity_payload *bip = bio_integrity(bio);
1393 unsigned tag_todo = ic->tag_size;
1394 char *tag_ptr = je->tag;
1395
1396 if (bip) do {
1397 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1398 unsigned tag_now = min(biv.bv_len, tag_todo);
1399 char *tag_addr;
1400 BUG_ON(PageHighMem(biv.bv_page));
1401 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1402 if (likely(dio->write))
1403 memcpy(tag_ptr, tag_addr, tag_now);
1404 else
1405 memcpy(tag_addr, tag_ptr, tag_now);
1406 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1407 tag_ptr += tag_now;
1408 tag_todo -= tag_now;
1409 } while (unlikely(tag_todo)); else {
1410 if (likely(dio->write))
1411 memset(tag_ptr, 0, tag_todo);
1412 }
1413 }
1414
1415 if (likely(dio->write)) {
1416 struct journal_sector *js;
1417
1418 js = access_journal_data(ic, journal_section, journal_entry);
1419 memcpy(js, mem + bv.bv_offset, 1 << SECTOR_SHIFT);
1420 je->last_bytes = js->commit_id;
1421
1422 if (ic->internal_hash) {
1423 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1424 if (unlikely(digest_size > ic->tag_size)) {
1425 char checksums_onstack[digest_size];
1426 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1427 memcpy(je->tag, checksums_onstack, ic->tag_size);
1428 } else
1429 integrity_sector_checksum(ic, logical_sector, (char *)js, je->tag);
1430 }
1431
1432 journal_entry_set_sector(je, logical_sector);
1433 }
1434 logical_sector++;
1435
1436 journal_entry++;
1437 if (unlikely(journal_entry == ic->journal_section_entries)) {
1438 journal_entry = 0;
1439 journal_section++;
1440 wraparound_section(ic, &journal_section);
1441 }
1442
1443 bv.bv_offset += 1 << SECTOR_SHIFT;
1444 } while (bv.bv_len -= 1 << SECTOR_SHIFT);
1445
1446 if (unlikely(!dio->write))
1447 flush_dcache_page(bv.bv_page);
1448 kunmap_atomic(mem);
1449 } while (n_sectors);
1450
1451 if (likely(dio->write)) {
1452 smp_mb();
1453 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1454 wake_up(&ic->copy_to_journal_wait);
1455 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1456 queue_work(ic->commit_wq, &ic->commit_work);
1457 } else {
1458 schedule_autocommit(ic);
1459 }
1460 } else {
1461 remove_range(ic, &dio->range);
1462 }
1463
1464 if (unlikely(bio->bi_iter.bi_size)) {
1465 sector_t area, offset;
1466
1467 dio->range.logical_sector = logical_sector;
1468 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1469 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1470 return true;
1471 }
1472
1473 return false;
1474}
1475
1476static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1477{
1478 struct dm_integrity_c *ic = dio->ic;
1479 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1480 unsigned journal_section, journal_entry;
1481 unsigned journal_read_pos;
1482 struct completion read_comp;
1483 bool need_sync_io = ic->internal_hash && !dio->write;
1484
1485 if (need_sync_io && from_map) {
1486 INIT_WORK(&dio->work, integrity_bio_wait);
1487 queue_work(ic->metadata_wq, &dio->work);
1488 return;
1489 }
1490
1491lock_retry:
1492 spin_lock_irq(&ic->endio_wait.lock);
1493retry:
1494 if (unlikely(dm_integrity_failed(ic))) {
1495 spin_unlock_irq(&ic->endio_wait.lock);
1496 do_endio(ic, bio);
1497 return;
1498 }
1499 dio->range.n_sectors = bio_sectors(bio);
1500 journal_read_pos = NOT_FOUND;
1501 if (likely(ic->mode == 'J')) {
1502 if (dio->write) {
1503 unsigned next_entry, i, pos;
1504 unsigned ws, we;
1505
1506 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
1507 if (unlikely(!dio->range.n_sectors))
1508 goto sleep;
1509 ic->free_sectors -= dio->range.n_sectors;
1510 journal_section = ic->free_section;
1511 journal_entry = ic->free_section_entry;
1512
1513 next_entry = ic->free_section_entry + dio->range.n_sectors;
1514 ic->free_section_entry = next_entry % ic->journal_section_entries;
1515 ic->free_section += next_entry / ic->journal_section_entries;
1516 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1517 wraparound_section(ic, &ic->free_section);
1518
1519 pos = journal_section * ic->journal_section_entries + journal_entry;
1520 ws = journal_section;
1521 we = journal_entry;
1522 for (i = 0; i < dio->range.n_sectors; i++) {
1523 struct journal_entry *je;
1524
1525 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1526 pos++;
1527 if (unlikely(pos >= ic->journal_entries))
1528 pos = 0;
1529
1530 je = access_journal_entry(ic, ws, we);
1531 BUG_ON(!journal_entry_is_unused(je));
1532 journal_entry_set_inprogress(je);
1533 we++;
1534 if (unlikely(we == ic->journal_section_entries)) {
1535 we = 0;
1536 ws++;
1537 wraparound_section(ic, &ws);
1538 }
1539 }
1540
1541 spin_unlock_irq(&ic->endio_wait.lock);
1542 goto journal_read_write;
1543 } else {
1544 sector_t next_sector;
1545 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1546 if (likely(journal_read_pos == NOT_FOUND)) {
1547 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1548 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1549 } else {
1550 unsigned i;
1551 for (i = 1; i < dio->range.n_sectors; i++) {
1552 if (!test_journal_node(ic, journal_read_pos + i, dio->range.logical_sector + i))
1553 break;
1554 }
1555 dio->range.n_sectors = i;
1556 }
1557 }
1558 }
1559 if (unlikely(!add_new_range(ic, &dio->range))) {
1560 /*
1561 * We must not sleep in the request routine because it could
1562 * stall bios on current->bio_list.
1563 * So, we offload the bio to a workqueue if we have to sleep.
1564 */
1565sleep:
1566 if (from_map) {
1567 spin_unlock_irq(&ic->endio_wait.lock);
1568 INIT_WORK(&dio->work, integrity_bio_wait);
1569 queue_work(ic->wait_wq, &dio->work);
1570 return;
1571 } else {
1572 sleep_on_endio_wait(ic);
1573 goto retry;
1574 }
1575 }
1576 spin_unlock_irq(&ic->endio_wait.lock);
1577
1578 if (unlikely(journal_read_pos != NOT_FOUND)) {
1579 journal_section = journal_read_pos / ic->journal_section_entries;
1580 journal_entry = journal_read_pos % ic->journal_section_entries;
1581 goto journal_read_write;
1582 }
1583
1584 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1585
1586 if (need_sync_io) {
1587 read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp);
1588 dio->completion = &read_comp;
1589 } else
1590 dio->completion = NULL;
1591
1592 dio->orig_bi_iter = bio->bi_iter;
1593
1594 dio->orig_bi_bdev = bio->bi_bdev;
1595 bio->bi_bdev = ic->dev->bdev;
1596
1597 dio->orig_bi_integrity = bio_integrity(bio);
1598 bio->bi_integrity = NULL;
1599 bio->bi_opf &= ~REQ_INTEGRITY;
1600
1601 dio->orig_bi_end_io = bio->bi_end_io;
1602 bio->bi_end_io = integrity_end_io;
1603
1604 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1605 bio->bi_iter.bi_sector += ic->start;
1606 generic_make_request(bio);
1607
1608 if (need_sync_io) {
1609 wait_for_completion_io(&read_comp);
1610 integrity_metadata(&dio->work);
1611 } else {
1612 INIT_WORK(&dio->work, integrity_metadata);
1613 queue_work(ic->metadata_wq, &dio->work);
1614 }
1615
1616 return;
1617
1618journal_read_write:
1619 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1620 goto lock_retry;
1621
1622 do_endio_flush(ic, dio);
1623}
1624
1625
1626static void integrity_bio_wait(struct work_struct *w)
1627{
1628 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1629
1630 dm_integrity_map_continue(dio, false);
1631}
1632
1633static void pad_uncommitted(struct dm_integrity_c *ic)
1634{
1635 if (ic->free_section_entry) {
1636 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1637 ic->free_section_entry = 0;
1638 ic->free_section++;
1639 wraparound_section(ic, &ic->free_section);
1640 ic->n_uncommitted_sections++;
1641 }
1642}
1643
1644static void integrity_commit(struct work_struct *w)
1645{
1646 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1647 unsigned commit_start, commit_sections;
1648 unsigned i, j, n;
1649 struct bio *flushes;
1650
1651 del_timer(&ic->autocommit_timer);
1652
1653 spin_lock_irq(&ic->endio_wait.lock);
1654 flushes = bio_list_get(&ic->flush_bio_list);
1655 if (unlikely(ic->mode != 'J')) {
1656 spin_unlock_irq(&ic->endio_wait.lock);
1657 dm_integrity_flush_buffers(ic);
1658 goto release_flush_bios;
1659 }
1660
1661 pad_uncommitted(ic);
1662 commit_start = ic->uncommitted_section;
1663 commit_sections = ic->n_uncommitted_sections;
1664 spin_unlock_irq(&ic->endio_wait.lock);
1665
1666 if (!commit_sections)
1667 goto release_flush_bios;
1668
1669 i = commit_start;
1670 for (n = 0; n < commit_sections; n++) {
1671 for (j = 0; j < ic->journal_section_entries; j++) {
1672 struct journal_entry *je;
1673 je = access_journal_entry(ic, i, j);
1674 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1675 }
1676 for (j = 0; j < ic->journal_section_sectors; j++) {
1677 struct journal_sector *js;
1678 js = access_journal(ic, i, j);
1679 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1680 }
1681 i++;
1682 if (unlikely(i >= ic->journal_sections))
1683 ic->commit_seq = next_commit_seq(ic->commit_seq);
1684 wraparound_section(ic, &i);
1685 }
1686 smp_rmb();
1687
1688 write_journal(ic, commit_start, commit_sections);
1689
1690 spin_lock_irq(&ic->endio_wait.lock);
1691 ic->uncommitted_section += commit_sections;
1692 wraparound_section(ic, &ic->uncommitted_section);
1693 ic->n_uncommitted_sections -= commit_sections;
1694 ic->n_committed_sections += commit_sections;
1695 spin_unlock_irq(&ic->endio_wait.lock);
1696
1697 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
1698 queue_work(ic->writer_wq, &ic->writer_work);
1699
1700release_flush_bios:
1701 while (flushes) {
1702 struct bio *next = flushes->bi_next;
1703 flushes->bi_next = NULL;
1704 do_endio(ic, flushes);
1705 flushes = next;
1706 }
1707}
1708
1709static void complete_copy_from_journal(unsigned long error, void *context)
1710{
1711 struct journal_io *io = context;
1712 struct journal_completion *comp = io->comp;
1713 struct dm_integrity_c *ic = comp->ic;
1714 remove_range(ic, &io->range);
1715 mempool_free(io, ic->journal_io_mempool);
1716 if (unlikely(error != 0))
1717 dm_integrity_io_error(ic, "copying from journal", -EIO);
1718 complete_journal_op(comp);
1719}
1720
1721static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1722 unsigned write_sections, bool from_replay)
1723{
1724 unsigned i, j, n;
1725 struct journal_completion comp;
1726
1727 comp.ic = ic;
1728 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1729 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
1730
1731 i = write_start;
1732 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1733#ifndef INTERNAL_VERIFY
1734 if (unlikely(from_replay))
1735#endif
1736 rw_section_mac(ic, i, false);
1737 for (j = 0; j < ic->journal_section_entries; j++) {
1738 struct journal_entry *je = access_journal_entry(ic, i, j);
1739 sector_t sec, area, offset;
1740 unsigned k, l, next_loop;
1741 sector_t metadata_block;
1742 unsigned metadata_offset;
1743 struct journal_io *io;
1744
1745 if (journal_entry_is_unused(je))
1746 continue;
1747 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1748 sec = journal_entry_get_sector(je);
1749 get_area_and_offset(ic, sec, &area, &offset);
1750 access_journal_data(ic, i, j)->commit_id = je->last_bytes;
1751 for (k = j + 1; k < ic->journal_section_entries; k++) {
1752 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1753 sector_t sec2, area2, offset2;
1754 if (journal_entry_is_unused(je2))
1755 break;
1756 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1757 sec2 = journal_entry_get_sector(je2);
1758 get_area_and_offset(ic, sec2, &area2, &offset2);
1759 if (area2 != area || offset2 != offset + (k - j))
1760 break;
1761 access_journal_data(ic, i, k)->commit_id = je2->last_bytes;
1762 }
1763 next_loop = k - 1;
1764
1765 io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO);
1766 io->comp = &comp;
1767 io->range.logical_sector = sec;
1768 io->range.n_sectors = k - j;
1769
1770 spin_lock_irq(&ic->endio_wait.lock);
1771 while (unlikely(!add_new_range(ic, &io->range)))
1772 sleep_on_endio_wait(ic);
1773
1774 if (likely(!from_replay)) {
1775 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
1776
1777 /* don't write if there is newer committed sector */
1778 while (j < k && find_newer_committed_node(ic, &section_node[j])) {
1779 struct journal_entry *je2 = access_journal_entry(ic, i, j);
1780
1781 journal_entry_set_unused(je2);
1782 remove_journal_node(ic, &section_node[j]);
1783 j++;
1784 sec++;
1785 offset++;
1786 }
1787 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
1788 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
1789
1790 journal_entry_set_unused(je2);
1791 remove_journal_node(ic, &section_node[k - 1]);
1792 k--;
1793 }
1794 if (j == k) {
1795 remove_range_unlocked(ic, &io->range);
1796 spin_unlock_irq(&ic->endio_wait.lock);
1797 mempool_free(io, ic->journal_io_mempool);
1798 goto skip_io;
1799 }
1800 for (l = j; l < k; l++) {
1801 remove_journal_node(ic, &section_node[l]);
1802 }
1803 }
1804 spin_unlock_irq(&ic->endio_wait.lock);
1805
1806 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
1807 for (l = j; l < k; l++) {
1808 int r;
1809 struct journal_entry *je2 = access_journal_entry(ic, i, l);
1810
1811 if (
1812#ifndef INTERNAL_VERIFY
1813 unlikely(from_replay) &&
1814#endif
1815 ic->internal_hash) {
1816 unsigned char test_tag[ic->tag_size];
1817
1818 integrity_sector_checksum(ic, sec + (l - j),
1819 (char *)access_journal_data(ic, i, l), test_tag);
1820 if (unlikely(memcmp(test_tag, je2->tag, ic->tag_size)))
1821 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
1822 }
1823
1824 journal_entry_set_unused(je2);
1825 r = dm_integrity_rw_tag(ic, je2->tag, &metadata_block, &metadata_offset,
1826 ic->tag_size, TAG_WRITE);
1827 if (unlikely(r)) {
1828 dm_integrity_io_error(ic, "reading tags", r);
1829 }
1830 }
1831
1832 atomic_inc(&comp.in_flight);
1833 copy_from_journal(ic, i, j, k - j, get_data_sector(ic, area, offset),
1834 complete_copy_from_journal, io);
1835skip_io:
1836 j = next_loop;
1837 }
1838 }
1839
1840 dm_bufio_write_dirty_buffers_async(ic->bufio);
1841
1842 complete_journal_op(&comp);
1843 wait_for_completion_io(&comp.comp);
1844
1845 dm_integrity_flush_buffers(ic);
1846}
1847
1848static void integrity_writer(struct work_struct *w)
1849{
1850 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
1851 unsigned write_start, write_sections;
1852
1853 unsigned prev_free_sectors;
1854
1855 /* the following test is not needed, but it tests the replay code */
1856 if (ACCESS_ONCE(ic->suspending))
1857 return;
1858
1859 spin_lock_irq(&ic->endio_wait.lock);
1860 write_start = ic->committed_section;
1861 write_sections = ic->n_committed_sections;
1862 spin_unlock_irq(&ic->endio_wait.lock);
1863
1864 if (!write_sections)
1865 return;
1866
1867 do_journal_write(ic, write_start, write_sections, false);
1868
1869 spin_lock_irq(&ic->endio_wait.lock);
1870
1871 ic->committed_section += write_sections;
1872 wraparound_section(ic, &ic->committed_section);
1873 ic->n_committed_sections -= write_sections;
1874
1875 prev_free_sectors = ic->free_sectors;
1876 ic->free_sectors += write_sections * ic->journal_section_entries;
1877 if (unlikely(!prev_free_sectors))
1878 wake_up_locked(&ic->endio_wait);
1879
1880 spin_unlock_irq(&ic->endio_wait.lock);
1881}
1882
1883static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
1884 unsigned n_sections, unsigned char commit_seq)
1885{
1886 unsigned i, j, n;
1887
1888 if (!n_sections)
1889 return;
1890
1891 for (n = 0; n < n_sections; n++) {
1892 i = start_section + n;
1893 wraparound_section(ic, &i);
1894 for (j = 0; j < ic->journal_section_sectors; j++) {
1895 struct journal_sector *js = access_journal(ic, i, j);
1896 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
1897 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
1898 }
1899 for (j = 0; j < ic->journal_section_entries; j++) {
1900 struct journal_entry *je = access_journal_entry(ic, i, j);
1901 journal_entry_set_unused(je);
1902 }
1903 }
1904
1905 write_journal(ic, start_section, n_sections);
1906}
1907
1908static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
1909{
1910 unsigned char k;
1911 for (k = 0; k < N_COMMIT_IDS; k++) {
1912 if (dm_integrity_commit_id(ic, i, j, k) == id)
1913 return k;
1914 }
1915 dm_integrity_io_error(ic, "journal commit id", -EIO);
1916 return -EIO;
1917}
1918
1919static void replay_journal(struct dm_integrity_c *ic)
1920{
1921 unsigned i, j;
1922 bool used_commit_ids[N_COMMIT_IDS];
1923 unsigned max_commit_id_sections[N_COMMIT_IDS];
1924 unsigned write_start, write_sections;
1925 unsigned continue_section;
1926 bool journal_empty;
1927 unsigned char unused, last_used, want_commit_seq;
1928
1929 if (ic->journal_uptodate)
1930 return;
1931
1932 last_used = 0;
1933 write_start = 0;
1934
1935 if (!ic->just_formatted) {
1936 DEBUG_print("reading journal\n");
1937 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
1938 if (ic->journal_io)
1939 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
1940 if (ic->journal_io) {
1941 struct journal_completion crypt_comp;
1942 crypt_comp.ic = ic;
1943 crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp);
1944 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
1945 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
1946 wait_for_completion(&crypt_comp.comp);
1947 }
1948 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
1949 }
1950
1951 if (dm_integrity_failed(ic))
1952 goto clear_journal;
1953
1954 journal_empty = true;
1955 memset(used_commit_ids, 0, sizeof used_commit_ids);
1956 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
1957 for (i = 0; i < ic->journal_sections; i++) {
1958 for (j = 0; j < ic->journal_section_sectors; j++) {
1959 int k;
1960 struct journal_sector *js = access_journal(ic, i, j);
1961 k = find_commit_seq(ic, i, j, js->commit_id);
1962 if (k < 0)
1963 goto clear_journal;
1964 used_commit_ids[k] = true;
1965 max_commit_id_sections[k] = i;
1966 }
1967 if (journal_empty) {
1968 for (j = 0; j < ic->journal_section_entries; j++) {
1969 struct journal_entry *je = access_journal_entry(ic, i, j);
1970 if (!journal_entry_is_unused(je)) {
1971 journal_empty = false;
1972 break;
1973 }
1974 }
1975 }
1976 }
1977
1978 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
1979 unused = N_COMMIT_IDS - 1;
1980 while (unused && !used_commit_ids[unused - 1])
1981 unused--;
1982 } else {
1983 for (unused = 0; unused < N_COMMIT_IDS; unused++)
1984 if (!used_commit_ids[unused])
1985 break;
1986 if (unused == N_COMMIT_IDS) {
1987 dm_integrity_io_error(ic, "journal commit ids", -EIO);
1988 goto clear_journal;
1989 }
1990 }
1991 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
1992 unused, used_commit_ids[0], used_commit_ids[1],
1993 used_commit_ids[2], used_commit_ids[3]);
1994
1995 last_used = prev_commit_seq(unused);
1996 want_commit_seq = prev_commit_seq(last_used);
1997
1998 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
1999 journal_empty = true;
2000
2001 write_start = max_commit_id_sections[last_used] + 1;
2002 if (unlikely(write_start >= ic->journal_sections))
2003 want_commit_seq = next_commit_seq(want_commit_seq);
2004 wraparound_section(ic, &write_start);
2005
2006 i = write_start;
2007 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2008 for (j = 0; j < ic->journal_section_sectors; j++) {
2009 struct journal_sector *js = access_journal(ic, i, j);
2010
2011 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2012 /*
2013 * This could be caused by crash during writing.
2014 * We won't replay the inconsistent part of the
2015 * journal.
2016 */
2017 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2018 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2019 goto brk;
2020 }
2021 }
2022 i++;
2023 if (unlikely(i >= ic->journal_sections))
2024 want_commit_seq = next_commit_seq(want_commit_seq);
2025 wraparound_section(ic, &i);
2026 }
2027brk:
2028
2029 if (!journal_empty) {
2030 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2031 write_sections, write_start, want_commit_seq);
2032 do_journal_write(ic, write_start, write_sections, true);
2033 }
2034
2035 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2036 continue_section = write_start;
2037 ic->commit_seq = want_commit_seq;
2038 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2039 } else {
2040 unsigned s;
2041 unsigned char erase_seq;
2042clear_journal:
2043 DEBUG_print("clearing journal\n");
2044
2045 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2046 s = write_start;
2047 init_journal(ic, s, 1, erase_seq);
2048 s++;
2049 wraparound_section(ic, &s);
2050 if (ic->journal_sections >= 2) {
2051 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2052 s += ic->journal_sections - 2;
2053 wraparound_section(ic, &s);
2054 init_journal(ic, s, 1, erase_seq);
2055 }
2056
2057 continue_section = 0;
2058 ic->commit_seq = next_commit_seq(erase_seq);
2059 }
2060
2061 ic->committed_section = continue_section;
2062 ic->n_committed_sections = 0;
2063
2064 ic->uncommitted_section = continue_section;
2065 ic->n_uncommitted_sections = 0;
2066
2067 ic->free_section = continue_section;
2068 ic->free_section_entry = 0;
2069 ic->free_sectors = ic->journal_entries;
2070
2071 ic->journal_tree_root = RB_ROOT;
2072 for (i = 0; i < ic->journal_entries; i++)
2073 init_journal_node(&ic->journal_tree[i]);
2074}
2075
2076static void dm_integrity_postsuspend(struct dm_target *ti)
2077{
2078 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2079
2080 del_timer_sync(&ic->autocommit_timer);
2081
2082 ic->suspending = true;
2083
2084 queue_work(ic->commit_wq, &ic->commit_work);
2085 drain_workqueue(ic->commit_wq);
2086
2087 if (ic->mode == 'J') {
2088 drain_workqueue(ic->writer_wq);
2089 dm_integrity_flush_buffers(ic);
2090 }
2091
2092 ic->suspending = false;
2093
2094 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2095
2096 ic->journal_uptodate = true;
2097}
2098
2099static void dm_integrity_resume(struct dm_target *ti)
2100{
2101 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2102
2103 replay_journal(ic);
2104}
2105
2106static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2107 unsigned status_flags, char *result, unsigned maxlen)
2108{
2109 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2110 unsigned arg_count;
2111 size_t sz = 0;
2112
2113 switch (type) {
2114 case STATUSTYPE_INFO:
2115 result[0] = '\0';
2116 break;
2117
2118 case STATUSTYPE_TABLE: {
2119 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2120 watermark_percentage += ic->journal_entries / 2;
2121 do_div(watermark_percentage, ic->journal_entries);
2122 arg_count = 5;
2123 arg_count += !!ic->internal_hash_alg.alg_string;
2124 arg_count += !!ic->journal_crypt_alg.alg_string;
2125 arg_count += !!ic->journal_mac_alg.alg_string;
2126 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2127 ic->tag_size, ic->mode, arg_count);
2128 DMEMIT(" journal-sectors:%u", ic->initial_sectors - SB_SECTORS);
2129 DMEMIT(" interleave-sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2130 DMEMIT(" buffer-sectors:%u", 1U << ic->log2_buffer_sectors);
2131 DMEMIT(" journal-watermark:%u", (unsigned)watermark_percentage);
2132 DMEMIT(" commit-time:%u", ic->autocommit_msec);
2133
2134#define EMIT_ALG(a, n) \
2135 do { \
2136 if (ic->a.alg_string) { \
2137 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2138 if (ic->a.key_string) \
2139 DMEMIT(":%s", ic->a.key_string);\
2140 } \
2141 } while (0)
2142 EMIT_ALG(internal_hash_alg, "internal-hash");
2143 EMIT_ALG(journal_crypt_alg, "journal-crypt");
2144 EMIT_ALG(journal_mac_alg, "journal-mac");
2145 break;
2146 }
2147 }
2148}
2149
2150static int dm_integrity_iterate_devices(struct dm_target *ti,
2151 iterate_devices_callout_fn fn, void *data)
2152{
2153 struct dm_integrity_c *ic = ti->private;
2154
2155 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2156}
2157
2158static void calculate_journal_section_size(struct dm_integrity_c *ic)
2159{
2160 unsigned sector_space = JOURNAL_SECTOR_DATA;
2161
2162 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
2163 ic->journal_entry_size = roundup(offsetof(struct journal_entry, tag) + ic->tag_size,
2164 JOURNAL_ENTRY_ROUNDUP);
2165
2166 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2167 sector_space -= JOURNAL_MAC_PER_SECTOR;
2168 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2169 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
2170 ic->journal_section_sectors = ic->journal_section_entries + JOURNAL_BLOCK_SECTORS;
2171 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2172}
2173
2174static int calculate_device_limits(struct dm_integrity_c *ic)
2175{
2176 __u64 initial_sectors;
2177 sector_t last_sector, last_area, last_offset;
2178
2179 calculate_journal_section_size(ic);
2180 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
2181 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX)
2182 return -EINVAL;
2183 ic->initial_sectors = initial_sectors;
2184
2185 ic->metadata_run = roundup((__u64)ic->tag_size << ic->sb->log2_interleave_sectors,
2186 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2187 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2188 ic->log2_metadata_run = __ffs(ic->metadata_run);
2189 else
2190 ic->log2_metadata_run = -1;
2191
2192 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2193 last_sector = get_data_sector(ic, last_area, last_offset);
2194
2195 if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors)
2196 return -EINVAL;
2197
2198 return 0;
2199}
2200
2201static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2202{
2203 unsigned journal_sections;
2204 int test_bit;
2205
2206 memcpy(ic->sb->magic, SB_MAGIC, 8);
2207 ic->sb->version = SB_VERSION;
2208 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
2209 if (ic->journal_mac_alg.alg_string)
2210 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2211
2212 calculate_journal_section_size(ic);
2213 journal_sections = journal_sectors / ic->journal_section_sectors;
2214 if (!journal_sections)
2215 journal_sections = 1;
2216 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2217
2218 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2219 ic->sb->log2_interleave_sectors = max((__u8)MIN_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2220 ic->sb->log2_interleave_sectors = min((__u8)MAX_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2221
2222 ic->provided_data_sectors = 0;
2223 for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) {
2224 __u64 prev_data_sectors = ic->provided_data_sectors;
2225
2226 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2227 if (calculate_device_limits(ic))
2228 ic->provided_data_sectors = prev_data_sectors;
2229 }
2230
2231 if (!le64_to_cpu(ic->provided_data_sectors))
2232 return -EINVAL;
2233
2234 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2235
2236 return 0;
2237}
2238
2239static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2240{
2241 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2242 struct blk_integrity bi;
2243
2244 memset(&bi, 0, sizeof(bi));
2245 bi.profile = &dm_integrity_profile;
2246 bi.tuple_size = ic->tag_size * (queue_logical_block_size(disk->queue) >> SECTOR_SHIFT);
2247 bi.tag_size = ic->tag_size;
2248
2249 blk_integrity_register(disk, &bi);
2250 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2251}
2252
2253/* FIXME: use new kvmalloc */
2254static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
2255{
2256 void *ptr = NULL;
2257
2258 if (size <= PAGE_SIZE)
2259 ptr = kmalloc(size, GFP_KERNEL | gfp);
2260 if (!ptr && size <= KMALLOC_MAX_SIZE)
2261 ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
2262 if (!ptr)
2263 ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
2264
2265 return ptr;
2266}
2267
2268static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2269{
2270 unsigned i;
2271
2272 if (!pl)
2273 return;
2274 for (i = 0; i < ic->journal_pages; i++)
2275 if (pl[i].page)
2276 __free_page(pl[i].page);
2277 kvfree(pl);
2278}
2279
2280static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2281{
2282 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
2283 struct page_list *pl;
2284 unsigned i;
2285
2286 pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
2287 if (!pl)
2288 return NULL;
2289
2290 for (i = 0; i < ic->journal_pages; i++) {
2291 pl[i].page = alloc_page(GFP_KERNEL);
2292 if (!pl[i].page) {
2293 dm_integrity_free_page_list(ic, pl);
2294 return NULL;
2295 }
2296 if (i)
2297 pl[i - 1].next = &pl[i];
2298 }
2299
2300 return pl;
2301}
2302
2303static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2304{
2305 unsigned i;
2306 for (i = 0; i < ic->journal_sections; i++)
2307 kvfree(sl[i]);
2308 kfree(sl);
2309}
2310
2311static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2312{
2313 struct scatterlist **sl;
2314 unsigned i;
2315
2316 sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
2317 if (!sl)
2318 return NULL;
2319
2320 for (i = 0; i < ic->journal_sections; i++) {
2321 struct scatterlist *s;
2322 unsigned start_index, start_offset;
2323 unsigned end_index, end_offset;
2324 unsigned n_pages;
2325 unsigned idx;
2326
2327 page_list_location(ic, i, 0, &start_index, &start_offset);
2328 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2329
2330 n_pages = (end_index - start_index + 1);
2331
2332 s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
2333 if (!s) {
2334 dm_integrity_free_journal_scatterlist(ic, sl);
2335 return NULL;
2336 }
2337
2338 sg_init_table(s, n_pages);
2339 for (idx = start_index; idx <= end_index; idx++) {
2340 char *va = lowmem_page_address(pl[idx].page);
2341 unsigned start = 0, end = PAGE_SIZE;
2342 if (idx == start_index)
2343 start = start_offset;
2344 if (idx == end_index)
2345 end = end_offset + (1 << SECTOR_SHIFT);
2346 sg_set_buf(&s[idx - start_index], va + start, end - start);
2347 }
2348
2349 sl[i] = s;
2350 }
2351
2352 return sl;
2353}
2354
2355static void free_alg(struct alg_spec *a)
2356{
2357 kzfree(a->alg_string);
2358 kzfree(a->key);
2359 memset(a, 0, sizeof *a);
2360}
2361
2362static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2363{
2364 char *k;
2365
2366 free_alg(a);
2367
2368 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2369 if (!a->alg_string)
2370 goto nomem;
2371
2372 k = strchr(a->alg_string, ':');
2373 if (k) {
2374 unsigned i;
2375
2376 *k = 0;
2377 a->key_string = k + 1;
2378 if (strlen(a->key_string) & 1)
2379 goto inval;
2380
2381 a->key_size = strlen(a->key_string) / 2;
2382 a->key = kmalloc(a->key_size, GFP_KERNEL);
2383 if (!a->key)
2384 goto nomem;
2385 for (i = 0; i < a->key_size; i++) {
2386 char digit[3];
2387 digit[0] = a->key_string[i * 2];
2388 digit[1] = a->key_string[i * 2 + 1];
2389 digit[2] = 0;
2390 if (strspn(digit, "0123456789abcdefABCDEF") != 2)
2391 goto inval;
2392 if (kstrtou8(digit, 16, &a->key[i]))
2393 goto inval;
2394 }
2395 }
2396
2397 return 0;
2398inval:
2399 *error = error_inval;
2400 return -EINVAL;
2401nomem:
2402 *error = "Out of memory for an argument";
2403 return -ENOMEM;
2404}
2405
2406static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2407 char *error_alg, char *error_key)
2408{
2409 int r;
2410
2411 if (a->alg_string) {
2412 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC);
2413 if (IS_ERR(*hash)) {
2414 *error = error_alg;
2415 r = PTR_ERR(*hash);
2416 *hash = NULL;
2417 return r;
2418 }
2419
2420 if (a->key) {
2421 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2422 if (r) {
2423 *error = error_key;
2424 return r;
2425 }
2426 }
2427 }
2428
2429 return 0;
2430}
2431
1aa0efd4
MS
2432static int create_journal(struct dm_integrity_c *ic, char **error)
2433{
2434 int r = 0;
2435 unsigned i;
2436 __u64 journal_pages, journal_desc_size, journal_tree_size;
2437
2438 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2439 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2440 journal_desc_size = journal_pages * sizeof(struct page_list);
2441 if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) {
2442 *error = "Journal doesn't fit into memory";
2443 r = -ENOMEM;
2444 goto bad;
2445 }
2446 ic->journal_pages = journal_pages;
2447
2448 ic->journal = dm_integrity_alloc_page_list(ic);
2449 if (!ic->journal) {
2450 *error = "Could not allocate memory for journal";
2451 r = -ENOMEM;
2452 goto bad;
2453 }
2454 if (ic->journal_crypt_alg.alg_string) {
2455 unsigned ivsize, blocksize;
2456 struct journal_completion comp;
2457
2458 comp.ic = ic;
2459 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2460 if (IS_ERR(ic->journal_crypt)) {
2461 *error = "Invalid journal cipher";
2462 r = PTR_ERR(ic->journal_crypt);
2463 ic->journal_crypt = NULL;
2464 goto bad;
2465 }
2466 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2467 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2468
2469 if (ic->journal_crypt_alg.key) {
2470 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2471 ic->journal_crypt_alg.key_size);
2472 if (r) {
2473 *error = "Error setting encryption key";
2474 goto bad;
2475 }
2476 }
2477 DEBUG_print("cipher %s, block size %u iv size %u\n",
2478 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2479
2480 ic->journal_io = dm_integrity_alloc_page_list(ic);
2481 if (!ic->journal_io) {
2482 *error = "Could not allocate memory for journal io";
2483 r = -ENOMEM;
2484 goto bad;
2485 }
2486
2487 if (blocksize == 1) {
2488 struct scatterlist *sg;
2489 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2490 unsigned char iv[ivsize];
2491 skcipher_request_set_tfm(req, ic->journal_crypt);
2492
2493 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2494 if (!ic->journal_xor) {
2495 *error = "Could not allocate memory for journal xor";
2496 r = -ENOMEM;
2497 goto bad;
2498 }
2499
2500 sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
2501 if (!sg) {
2502 *error = "Unable to allocate sg list";
2503 r = -ENOMEM;
2504 goto bad;
2505 }
2506 sg_init_table(sg, ic->journal_pages + 1);
2507 for (i = 0; i < ic->journal_pages; i++) {
2508 char *va = lowmem_page_address(ic->journal_xor[i].page);
2509 clear_page(va);
2510 sg_set_buf(&sg[i], va, PAGE_SIZE);
2511 }
2512 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2513 memset(iv, 0x00, ivsize);
2514
2515 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
2516 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2517 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2518 if (do_crypt(true, req, &comp))
2519 wait_for_completion(&comp.comp);
2520 kvfree(sg);
2521 r = dm_integrity_failed(ic);
2522 if (r) {
2523 *error = "Unable to encrypt journal";
2524 goto bad;
2525 }
2526 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2527
2528 crypto_free_skcipher(ic->journal_crypt);
2529 ic->journal_crypt = NULL;
2530 } else {
2531 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2532 unsigned char iv[ivsize];
2533 unsigned crypt_len = roundup(ivsize, blocksize);
2534 unsigned char crypt_data[crypt_len];
2535
2536 skcipher_request_set_tfm(req, ic->journal_crypt);
2537
2538 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2539 if (!ic->journal_scatterlist) {
2540 *error = "Unable to allocate sg list";
2541 r = -ENOMEM;
2542 goto bad;
2543 }
2544 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2545 if (!ic->journal_io_scatterlist) {
2546 *error = "Unable to allocate sg list";
2547 r = -ENOMEM;
2548 goto bad;
2549 }
2550 ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
2551 if (!ic->sk_requests) {
2552 *error = "Unable to allocate sk requests";
2553 r = -ENOMEM;
2554 goto bad;
2555 }
2556 for (i = 0; i < ic->journal_sections; i++) {
2557 struct scatterlist sg;
2558 struct skcipher_request *section_req;
2559 __u32 section_le = cpu_to_le32(i);
2560
2561 memset(iv, 0x00, ivsize);
2562 memset(crypt_data, 0x00, crypt_len);
2563 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
2564
2565 sg_init_one(&sg, crypt_data, crypt_len);
2566 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
2567 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2568 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2569 if (do_crypt(true, req, &comp))
2570 wait_for_completion(&comp.comp);
2571
2572 r = dm_integrity_failed(ic);
2573 if (r) {
2574 *error = "Unable to generate iv";
2575 goto bad;
2576 }
2577
2578 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2579 if (!section_req) {
2580 *error = "Unable to allocate crypt request";
2581 r = -ENOMEM;
2582 goto bad;
2583 }
2584 section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL);
2585 if (!section_req->iv) {
2586 skcipher_request_free(section_req);
2587 *error = "Unable to allocate iv";
2588 r = -ENOMEM;
2589 goto bad;
2590 }
2591 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
2592 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
2593 ic->sk_requests[i] = section_req;
2594 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
2595 }
2596 }
2597 }
2598
2599 for (i = 0; i < N_COMMIT_IDS; i++) {
2600 unsigned j;
2601retest_commit_id:
2602 for (j = 0; j < i; j++) {
2603 if (ic->commit_ids[j] == ic->commit_ids[i]) {
2604 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
2605 goto retest_commit_id;
2606 }
2607 }
2608 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
2609 }
2610
2611 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
2612 if (journal_tree_size > ULONG_MAX) {
2613 *error = "Journal doesn't fit into memory";
2614 r = -ENOMEM;
2615 goto bad;
2616 }
2617 ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
2618 if (!ic->journal_tree) {
2619 *error = "Could not allocate memory for journal tree";
2620 r = -ENOMEM;
2621 }
2622bad:
2623 return r;
2624}
2625
7eada909
MP
2626/*
2627 * Construct a integrity mapping: <dev_path> <offset> <tag_size>
2628 *
2629 * Arguments:
2630 * device
2631 * offset from the start of the device
2632 * tag size
2633 * D - direct writes, J - journal writes
2634 * number of optional arguments
2635 * optional arguments:
2636 * journal-sectors
2637 * interleave-sectors
2638 * buffer-sectors
2639 * journal-watermark
2640 * commit-time
2641 * internal-hash
2642 * journal-crypt
2643 * journal-mac
2644 */
2645static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
2646{
2647 struct dm_integrity_c *ic;
2648 char dummy;
2649 int r;
2650 unsigned i;
2651 unsigned extra_args;
2652 struct dm_arg_set as;
2653 static struct dm_arg _args[] = {
2654 {0, 7, "Invalid number of feature args"},
2655 };
2656 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
2657 bool should_write_sb;
7eada909
MP
2658 __u64 threshold;
2659 unsigned long long start;
2660
2661#define DIRECT_ARGUMENTS 4
2662
2663 if (argc <= DIRECT_ARGUMENTS) {
2664 ti->error = "Invalid argument count";
2665 return -EINVAL;
2666 }
2667
2668 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
2669 if (!ic) {
2670 ti->error = "Cannot allocate integrity context";
2671 return -ENOMEM;
2672 }
2673 ti->private = ic;
2674 ti->per_io_data_size = sizeof(struct dm_integrity_io);
2675
2676 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2677 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2678 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2679 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
2680
2681 ic->in_progress = RB_ROOT;
2682 init_waitqueue_head(&ic->endio_wait);
2683 bio_list_init(&ic->flush_bio_list);
2684 init_waitqueue_head(&ic->copy_to_journal_wait);
2685 init_completion(&ic->crypto_backoff);
2686
2687 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
2688 if (r) {
2689 ti->error = "Device lookup failed";
2690 goto bad;
2691 }
2692
2693 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
2694 ti->error = "Invalid starting offset";
2695 r = -EINVAL;
2696 goto bad;
2697 }
2698 ic->start = start;
2699
2700 if (strcmp(argv[2], "-")) {
2701 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
2702 ti->error = "Invalid tag size";
2703 r = -EINVAL;
2704 goto bad;
2705 }
2706 }
2707
2708 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D"))
2709 ic->mode = argv[3][0];
2710 else {
2711 ti->error = "Invalid mode (expecting J or D)";
2712 r = -EINVAL;
2713 goto bad;
2714 }
2715
2716 ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
2717 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
2718 ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
2719 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2720 buffer_sectors = DEFAULT_BUFFER_SECTORS;
2721 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
2722 sync_msec = DEFAULT_SYNC_MSEC;
2723
2724 as.argc = argc - DIRECT_ARGUMENTS;
2725 as.argv = argv + DIRECT_ARGUMENTS;
2726 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
2727 if (r)
2728 goto bad;
2729
2730 while (extra_args--) {
2731 const char *opt_string;
2732 unsigned val;
2733 opt_string = dm_shift_arg(&as);
2734 if (!opt_string) {
2735 r = -EINVAL;
2736 ti->error = "Not enough feature arguments";
2737 goto bad;
2738 }
2739 if (sscanf(opt_string, "journal-sectors:%u%c", &val, &dummy) == 1)
2740 journal_sectors = val;
2741 else if (sscanf(opt_string, "interleave-sectors:%u%c", &val, &dummy) == 1)
2742 interleave_sectors = val;
2743 else if (sscanf(opt_string, "buffer-sectors:%u%c", &val, &dummy) == 1)
2744 buffer_sectors = val;
2745 else if (sscanf(opt_string, "journal-watermark:%u%c", &val, &dummy) == 1 && val <= 100)
2746 journal_watermark = val;
2747 else if (sscanf(opt_string, "commit-time:%u%c", &val, &dummy) == 1)
2748 sync_msec = val;
2749 else if (!memcmp(opt_string, "internal-hash:", strlen("internal-hash:"))) {
2750 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
2751 "Invalid internal-hash argument");
2752 if (r)
2753 goto bad;
2754 } else if (!memcmp(opt_string, "journal-crypt:", strlen("journal-crypt:"))) {
2755 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
2756 "Invalid journal-crypt argument");
2757 if (r)
2758 goto bad;
2759 } else if (!memcmp(opt_string, "journal-mac:", strlen("journal-mac:"))) {
2760 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
2761 "Invalid journal-mac argument");
2762 if (r)
2763 goto bad;
2764 } else {
2765 r = -EINVAL;
2766 ti->error = "Invalid argument";
2767 goto bad;
2768 }
2769 }
2770
2771 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
2772 "Invalid internal hash", "Error setting internal hash key");
2773 if (r)
2774 goto bad;
2775
2776 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
2777 "Invalid journal mac", "Error setting journal mac key");
2778 if (r)
2779 goto bad;
2780
2781 if (!ic->tag_size) {
2782 if (!ic->internal_hash) {
2783 ti->error = "Unknown tag size";
2784 r = -EINVAL;
2785 goto bad;
2786 }
2787 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
2788 }
2789 if (ic->tag_size > MAX_TAG_SIZE) {
2790 ti->error = "Too big tag size";
2791 r = -EINVAL;
2792 goto bad;
2793 }
2794 if (!(ic->tag_size & (ic->tag_size - 1)))
2795 ic->log2_tag_size = __ffs(ic->tag_size);
2796 else
2797 ic->log2_tag_size = -1;
2798
2799 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
2800 ic->autocommit_msec = sync_msec;
2801 setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic);
2802
2803 ic->io = dm_io_client_create();
2804 if (IS_ERR(ic->io)) {
2805 r = PTR_ERR(ic->io);
2806 ic->io = NULL;
2807 ti->error = "Cannot allocate dm io";
2808 goto bad;
2809 }
2810
2811 ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache);
2812 if (!ic->journal_io_mempool) {
2813 r = -ENOMEM;
2814 ti->error = "Cannot allocate mempool";
2815 goto bad;
2816 }
2817
2818 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
2819 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
2820 if (!ic->metadata_wq) {
2821 ti->error = "Cannot allocate workqueue";
2822 r = -ENOMEM;
2823 goto bad;
2824 }
2825
2826 /*
2827 * If this workqueue were percpu, it would cause bio reordering
2828 * and reduced performance.
2829 */
2830 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
2831 if (!ic->wait_wq) {
2832 ti->error = "Cannot allocate workqueue";
2833 r = -ENOMEM;
2834 goto bad;
2835 }
2836
2837 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
2838 if (!ic->commit_wq) {
2839 ti->error = "Cannot allocate workqueue";
2840 r = -ENOMEM;
2841 goto bad;
2842 }
2843 INIT_WORK(&ic->commit_work, integrity_commit);
2844
2845 if (ic->mode == 'J') {
2846 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
2847 if (!ic->writer_wq) {
2848 ti->error = "Cannot allocate workqueue";
2849 r = -ENOMEM;
2850 goto bad;
2851 }
2852 INIT_WORK(&ic->writer_work, integrity_writer);
2853 }
2854
2855 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
2856 if (!ic->sb) {
2857 r = -ENOMEM;
2858 ti->error = "Cannot allocate superblock area";
2859 goto bad;
2860 }
2861
2862 r = sync_rw_sb(ic, REQ_OP_READ, 0);
2863 if (r) {
2864 ti->error = "Error reading superblock";
2865 goto bad;
2866 }
2867 if (!memcmp(ic->sb->magic, SB_MAGIC, 8)) {
2868 should_write_sb = false;
2869 } else {
2870 for (i = 0; i < 512; i += 8) {
2871 if (*(__u64 *)((__u8 *)ic->sb + i)) {
2872 r = -EINVAL;
2873 ti->error = "The device is not initialized";
2874 goto bad;
2875 }
2876 }
2877
2878 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
2879 if (r) {
2880 ti->error = "Could not initialize superblock";
2881 goto bad;
2882 }
2883 should_write_sb = true;
2884 }
2885
2886 if (ic->sb->version != SB_VERSION) {
2887 r = -EINVAL;
2888 ti->error = "Unknown version";
2889 goto bad;
2890 }
2891 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
2892 r = -EINVAL;
2893 ti->error = "Invalid tag size";
2894 goto bad;
2895 }
2896 /* make sure that ti->max_io_len doesn't overflow */
2897 if (ic->sb->log2_interleave_sectors < MIN_INTERLEAVE_SECTORS ||
2898 ic->sb->log2_interleave_sectors > MAX_INTERLEAVE_SECTORS) {
2899 r = -EINVAL;
2900 ti->error = "Invalid interleave_sectors in the superblock";
2901 goto bad;
2902 }
2903 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
2904 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
2905 /* test for overflow */
2906 r = -EINVAL;
2907 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
2908 goto bad;
2909 }
2910 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
2911 r = -EINVAL;
2912 ti->error = "Journal mac mismatch";
2913 goto bad;
2914 }
2915 r = calculate_device_limits(ic);
2916 if (r) {
2917 ti->error = "The device is too small";
2918 goto bad;
2919 }
2920
2921 if (!buffer_sectors)
2922 buffer_sectors = 1;
2923 ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT);
2924
2925 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
2926 threshold += 50;
2927 do_div(threshold, 100);
2928 ic->free_sectors_threshold = threshold;
2929
2930 DEBUG_print("initialized:\n");
2931 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
2932 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
2933 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
2934 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
2935 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
2936 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
2937 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
2938 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
2939 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
2940 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
2941 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
2942 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
2943 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
2944 (unsigned long long)ic->provided_data_sectors);
2945 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
2946
2947 ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors),
2948 1, 0, NULL, NULL);
2949 if (IS_ERR(ic->bufio)) {
2950 r = PTR_ERR(ic->bufio);
2951 ti->error = "Cannot initialize dm-bufio";
2952 ic->bufio = NULL;
2953 goto bad;
2954 }
2955 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
2956
1aa0efd4
MS
2957 r = create_journal(ic, &ti->error);
2958 if (r)
7eada909 2959 goto bad;
7eada909
MP
2960
2961 if (should_write_sb) {
2962 int r;
2963
2964 init_journal(ic, 0, ic->journal_sections, 0);
2965 r = dm_integrity_failed(ic);
2966 if (unlikely(r)) {
2967 ti->error = "Error initializing journal";
2968 goto bad;
2969 }
2970 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2971 if (r) {
2972 ti->error = "Error initializing superblock";
2973 goto bad;
2974 }
2975 ic->just_formatted = true;
2976 }
2977
2978 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
2979 if (r)
2980 goto bad;
2981
2982 if (!ic->internal_hash)
2983 dm_integrity_set(ti, ic);
2984
2985 ti->num_flush_bios = 1;
2986 ti->flush_supported = true;
2987
2988 return 0;
2989bad:
2990 dm_integrity_dtr(ti);
2991 return r;
2992}
2993
2994static void dm_integrity_dtr(struct dm_target *ti)
2995{
2996 struct dm_integrity_c *ic = ti->private;
2997
2998 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2999
3000 if (ic->metadata_wq)
3001 destroy_workqueue(ic->metadata_wq);
3002 if (ic->wait_wq)
3003 destroy_workqueue(ic->wait_wq);
3004 if (ic->commit_wq)
3005 destroy_workqueue(ic->commit_wq);
3006 if (ic->writer_wq)
3007 destroy_workqueue(ic->writer_wq);
3008 if (ic->bufio)
3009 dm_bufio_client_destroy(ic->bufio);
3010 mempool_destroy(ic->journal_io_mempool);
3011 if (ic->io)
3012 dm_io_client_destroy(ic->io);
3013 if (ic->dev)
3014 dm_put_device(ti, ic->dev);
3015 dm_integrity_free_page_list(ic, ic->journal);
3016 dm_integrity_free_page_list(ic, ic->journal_io);
3017 dm_integrity_free_page_list(ic, ic->journal_xor);
3018 if (ic->journal_scatterlist)
3019 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3020 if (ic->journal_io_scatterlist)
3021 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3022 if (ic->sk_requests) {
3023 unsigned i;
3024
3025 for (i = 0; i < ic->journal_sections; i++) {
3026 struct skcipher_request *req = ic->sk_requests[i];
3027 if (req) {
3028 kzfree(req->iv);
3029 skcipher_request_free(req);
3030 }
3031 }
3032 kvfree(ic->sk_requests);
3033 }
3034 kvfree(ic->journal_tree);
3035 if (ic->sb)
3036 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3037
3038 if (ic->internal_hash)
3039 crypto_free_shash(ic->internal_hash);
3040 free_alg(&ic->internal_hash_alg);
3041
3042 if (ic->journal_crypt)
3043 crypto_free_skcipher(ic->journal_crypt);
3044 free_alg(&ic->journal_crypt_alg);
3045
3046 if (ic->journal_mac)
3047 crypto_free_shash(ic->journal_mac);
3048 free_alg(&ic->journal_mac_alg);
3049
3050 kfree(ic);
3051}
3052
3053static struct target_type integrity_target = {
3054 .name = "integrity",
3055 .version = {1, 0, 0},
3056 .module = THIS_MODULE,
3057 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3058 .ctr = dm_integrity_ctr,
3059 .dtr = dm_integrity_dtr,
3060 .map = dm_integrity_map,
3061 .postsuspend = dm_integrity_postsuspend,
3062 .resume = dm_integrity_resume,
3063 .status = dm_integrity_status,
3064 .iterate_devices = dm_integrity_iterate_devices,
3065};
3066
3067int __init dm_integrity_init(void)
3068{
3069 int r;
3070
3071 journal_io_cache = kmem_cache_create("integrity_journal_io",
3072 sizeof(struct journal_io), 0, 0, NULL);
3073 if (!journal_io_cache) {
3074 DMERR("can't allocate journal io cache");
3075 return -ENOMEM;
3076 }
3077
3078 r = dm_register_target(&integrity_target);
3079
3080 if (r < 0)
3081 DMERR("register failed %d", r);
3082
3083 return r;
3084}
3085
3086void dm_integrity_exit(void)
3087{
3088 dm_unregister_target(&integrity_target);
3089 kmem_cache_destroy(journal_io_cache);
3090}
3091
3092module_init(dm_integrity_init);
3093module_exit(dm_integrity_exit);
3094
3095MODULE_AUTHOR("Milan Broz");
3096MODULE_AUTHOR("Mikulas Patocka");
3097MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3098MODULE_LICENSE("GPL");