]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/md/dm-verity-target.c
dm verity: factor out structures and functions useful to separate object
[mirror_ubuntu-focal-kernel.git] / drivers / md / dm-verity-target.c
1 /*
2 * Copyright (C) 2012 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
7 *
8 * This file is released under the GPLv2.
9 *
10 * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
11 * default prefetch value. Data are read in "prefetch_cluster" chunks from the
12 * hash device. Setting this greatly improves performance when data and hash
13 * are on the same disk on different partitions on devices with poor random
14 * access behavior.
15 */
16
17 #include "dm-verity.h"
18
19 #include <linux/module.h>
20 #include <linux/reboot.h>
21
22 #define DM_MSG_PREFIX "verity"
23
24 #define DM_VERITY_ENV_LENGTH 42
25 #define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
26
27 #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
28
29 #define DM_VERITY_MAX_CORRUPTED_ERRS 100
30
31 #define DM_VERITY_OPT_LOGGING "ignore_corruption"
32 #define DM_VERITY_OPT_RESTART "restart_on_corruption"
33
34 #define DM_VERITY_OPTS_MAX 1
35
36 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
37
38 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
39
40 struct dm_verity_prefetch_work {
41 struct work_struct work;
42 struct dm_verity *v;
43 sector_t block;
44 unsigned n_blocks;
45 };
46
47 /*
48 * Auxiliary structure appended to each dm-bufio buffer. If the value
49 * hash_verified is nonzero, hash of the block has been verified.
50 *
51 * The variable hash_verified is set to 0 when allocating the buffer, then
52 * it can be changed to 1 and it is never reset to 0 again.
53 *
54 * There is no lock around this value, a race condition can at worst cause
55 * that multiple processes verify the hash of the same buffer simultaneously
56 * and write 1 to hash_verified simultaneously.
57 * This condition is harmless, so we don't need locking.
58 */
59 struct buffer_aux {
60 int hash_verified;
61 };
62
63 /*
64 * Initialize struct buffer_aux for a freshly created buffer.
65 */
66 static void dm_bufio_alloc_callback(struct dm_buffer *buf)
67 {
68 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
69
70 aux->hash_verified = 0;
71 }
72
73 /*
74 * Translate input sector number to the sector number on the target device.
75 */
76 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
77 {
78 return v->data_start + dm_target_offset(v->ti, bi_sector);
79 }
80
81 /*
82 * Return hash position of a specified block at a specified tree level
83 * (0 is the lowest level).
84 * The lowest "hash_per_block_bits"-bits of the result denote hash position
85 * inside a hash block. The remaining bits denote location of the hash block.
86 */
87 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
88 int level)
89 {
90 return block >> (level * v->hash_per_block_bits);
91 }
92
93 /*
94 * Wrapper for crypto_shash_init, which handles verity salting.
95 */
96 static int verity_hash_init(struct dm_verity *v, struct shash_desc *desc)
97 {
98 int r;
99
100 desc->tfm = v->tfm;
101 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
102
103 r = crypto_shash_init(desc);
104
105 if (unlikely(r < 0)) {
106 DMERR("crypto_shash_init failed: %d", r);
107 return r;
108 }
109
110 if (likely(v->version >= 1)) {
111 r = crypto_shash_update(desc, v->salt, v->salt_size);
112
113 if (unlikely(r < 0)) {
114 DMERR("crypto_shash_update failed: %d", r);
115 return r;
116 }
117 }
118
119 return 0;
120 }
121
122 static int verity_hash_update(struct dm_verity *v, struct shash_desc *desc,
123 const u8 *data, size_t len)
124 {
125 int r = crypto_shash_update(desc, data, len);
126
127 if (unlikely(r < 0))
128 DMERR("crypto_shash_update failed: %d", r);
129
130 return r;
131 }
132
133 static int verity_hash_final(struct dm_verity *v, struct shash_desc *desc,
134 u8 *digest)
135 {
136 int r;
137
138 if (unlikely(!v->version)) {
139 r = crypto_shash_update(desc, v->salt, v->salt_size);
140
141 if (r < 0) {
142 DMERR("crypto_shash_update failed: %d", r);
143 return r;
144 }
145 }
146
147 r = crypto_shash_final(desc, digest);
148
149 if (unlikely(r < 0))
150 DMERR("crypto_shash_final failed: %d", r);
151
152 return r;
153 }
154
155 int verity_hash(struct dm_verity *v, struct shash_desc *desc,
156 const u8 *data, size_t len, u8 *digest)
157 {
158 int r;
159
160 r = verity_hash_init(v, desc);
161 if (unlikely(r < 0))
162 return r;
163
164 r = verity_hash_update(v, desc, data, len);
165 if (unlikely(r < 0))
166 return r;
167
168 return verity_hash_final(v, desc, digest);
169 }
170
171 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
172 sector_t *hash_block, unsigned *offset)
173 {
174 sector_t position = verity_position_at_level(v, block, level);
175 unsigned idx;
176
177 *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
178
179 if (!offset)
180 return;
181
182 idx = position & ((1 << v->hash_per_block_bits) - 1);
183 if (!v->version)
184 *offset = idx * v->digest_size;
185 else
186 *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
187 }
188
189 /*
190 * Handle verification errors.
191 */
192 static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
193 unsigned long long block)
194 {
195 char verity_env[DM_VERITY_ENV_LENGTH];
196 char *envp[] = { verity_env, NULL };
197 const char *type_str = "";
198 struct mapped_device *md = dm_table_get_md(v->ti->table);
199
200 /* Corruption should be visible in device status in all modes */
201 v->hash_failed = 1;
202
203 if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
204 goto out;
205
206 v->corrupted_errs++;
207
208 switch (type) {
209 case DM_VERITY_BLOCK_TYPE_DATA:
210 type_str = "data";
211 break;
212 case DM_VERITY_BLOCK_TYPE_METADATA:
213 type_str = "metadata";
214 break;
215 default:
216 BUG();
217 }
218
219 DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
220 block);
221
222 if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
223 DMERR("%s: reached maximum errors", v->data_dev->name);
224
225 snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
226 DM_VERITY_ENV_VAR_NAME, type, block);
227
228 kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
229
230 out:
231 if (v->mode == DM_VERITY_MODE_LOGGING)
232 return 0;
233
234 if (v->mode == DM_VERITY_MODE_RESTART)
235 kernel_restart("dm-verity device corrupted");
236
237 return 1;
238 }
239
240 /*
241 * Verify hash of a metadata block pertaining to the specified data block
242 * ("block" argument) at a specified level ("level" argument).
243 *
244 * On successful return, verity_io_want_digest(v, io) contains the hash value
245 * for a lower tree level or for the data block (if we're at the lowest level).
246 *
247 * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
248 * If "skip_unverified" is false, unverified buffer is hashed and verified
249 * against current value of verity_io_want_digest(v, io).
250 */
251 static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
252 sector_t block, int level, bool skip_unverified,
253 u8 *want_digest)
254 {
255 struct dm_buffer *buf;
256 struct buffer_aux *aux;
257 u8 *data;
258 int r;
259 sector_t hash_block;
260 unsigned offset;
261
262 verity_hash_at_level(v, block, level, &hash_block, &offset);
263
264 data = dm_bufio_read(v->bufio, hash_block, &buf);
265 if (IS_ERR(data))
266 return PTR_ERR(data);
267
268 aux = dm_bufio_get_aux_data(buf);
269
270 if (!aux->hash_verified) {
271 if (skip_unverified) {
272 r = 1;
273 goto release_ret_r;
274 }
275
276 r = verity_hash(v, verity_io_hash_desc(v, io),
277 data, 1 << v->hash_dev_block_bits,
278 verity_io_real_digest(v, io));
279 if (unlikely(r < 0))
280 goto release_ret_r;
281
282 if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
283 v->digest_size) == 0))
284 aux->hash_verified = 1;
285 else if (verity_handle_err(v,
286 DM_VERITY_BLOCK_TYPE_METADATA,
287 hash_block)) {
288 r = -EIO;
289 goto release_ret_r;
290 }
291 }
292
293 data += offset;
294 memcpy(want_digest, data, v->digest_size);
295 r = 0;
296
297 release_ret_r:
298 dm_bufio_release(buf);
299 return r;
300 }
301
302 /*
303 * Find a hash for a given block, write it to digest and verify the integrity
304 * of the hash tree if necessary.
305 */
306 int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
307 sector_t block, u8 *digest)
308 {
309 int i;
310 int r;
311
312 if (likely(v->levels)) {
313 /*
314 * First, we try to get the requested hash for
315 * the current block. If the hash block itself is
316 * verified, zero is returned. If it isn't, this
317 * function returns 1 and we fall back to whole
318 * chain verification.
319 */
320 r = verity_verify_level(v, io, block, 0, true, digest);
321 if (likely(r <= 0))
322 return r;
323 }
324
325 memcpy(digest, v->root_digest, v->digest_size);
326
327 for (i = v->levels - 1; i >= 0; i--) {
328 r = verity_verify_level(v, io, block, i, false, digest);
329 if (unlikely(r))
330 return r;
331 }
332
333 return 0;
334 }
335
336 /*
337 * Verify one "dm_verity_io" structure.
338 */
339 static int verity_verify_io(struct dm_verity_io *io)
340 {
341 struct dm_verity *v = io->v;
342 struct bio *bio = dm_bio_from_per_bio_data(io,
343 v->ti->per_bio_data_size);
344 unsigned b;
345
346 for (b = 0; b < io->n_blocks; b++) {
347 int r;
348 unsigned todo;
349 struct shash_desc *desc = verity_io_hash_desc(v, io);
350
351 r = verity_hash_for_block(v, io, io->block + b,
352 verity_io_want_digest(v, io));
353 if (unlikely(r < 0))
354 return r;
355
356 r = verity_hash_init(v, desc);
357 if (unlikely(r < 0))
358 return r;
359
360 todo = 1 << v->data_dev_block_bits;
361 do {
362 u8 *page;
363 unsigned len;
364 struct bio_vec bv = bio_iter_iovec(bio, io->iter);
365
366 page = kmap_atomic(bv.bv_page);
367 len = bv.bv_len;
368 if (likely(len >= todo))
369 len = todo;
370 r = verity_hash_update(v, desc, page + bv.bv_offset,
371 len);
372 kunmap_atomic(page);
373
374 if (unlikely(r < 0))
375 return r;
376
377 bio_advance_iter(bio, &io->iter, len);
378 todo -= len;
379 } while (todo);
380
381 r = verity_hash_final(v, desc, verity_io_real_digest(v, io));
382 if (unlikely(r < 0))
383 return r;
384
385 if (likely(memcmp(verity_io_real_digest(v, io),
386 verity_io_want_digest(v, io), v->digest_size) == 0))
387 continue;
388 else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
389 io->block + b))
390 return -EIO;
391 }
392
393 return 0;
394 }
395
396 /*
397 * End one "io" structure with a given error.
398 */
399 static void verity_finish_io(struct dm_verity_io *io, int error)
400 {
401 struct dm_verity *v = io->v;
402 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
403
404 bio->bi_end_io = io->orig_bi_end_io;
405 bio->bi_error = error;
406
407 bio_endio(bio);
408 }
409
410 static void verity_work(struct work_struct *w)
411 {
412 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
413
414 verity_finish_io(io, verity_verify_io(io));
415 }
416
417 static void verity_end_io(struct bio *bio)
418 {
419 struct dm_verity_io *io = bio->bi_private;
420
421 if (bio->bi_error) {
422 verity_finish_io(io, bio->bi_error);
423 return;
424 }
425
426 INIT_WORK(&io->work, verity_work);
427 queue_work(io->v->verify_wq, &io->work);
428 }
429
430 /*
431 * Prefetch buffers for the specified io.
432 * The root buffer is not prefetched, it is assumed that it will be cached
433 * all the time.
434 */
435 static void verity_prefetch_io(struct work_struct *work)
436 {
437 struct dm_verity_prefetch_work *pw =
438 container_of(work, struct dm_verity_prefetch_work, work);
439 struct dm_verity *v = pw->v;
440 int i;
441
442 for (i = v->levels - 2; i >= 0; i--) {
443 sector_t hash_block_start;
444 sector_t hash_block_end;
445 verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
446 verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
447 if (!i) {
448 unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
449
450 cluster >>= v->data_dev_block_bits;
451 if (unlikely(!cluster))
452 goto no_prefetch_cluster;
453
454 if (unlikely(cluster & (cluster - 1)))
455 cluster = 1 << __fls(cluster);
456
457 hash_block_start &= ~(sector_t)(cluster - 1);
458 hash_block_end |= cluster - 1;
459 if (unlikely(hash_block_end >= v->hash_blocks))
460 hash_block_end = v->hash_blocks - 1;
461 }
462 no_prefetch_cluster:
463 dm_bufio_prefetch(v->bufio, hash_block_start,
464 hash_block_end - hash_block_start + 1);
465 }
466
467 kfree(pw);
468 }
469
470 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
471 {
472 struct dm_verity_prefetch_work *pw;
473
474 pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
475 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
476
477 if (!pw)
478 return;
479
480 INIT_WORK(&pw->work, verity_prefetch_io);
481 pw->v = v;
482 pw->block = io->block;
483 pw->n_blocks = io->n_blocks;
484 queue_work(v->verify_wq, &pw->work);
485 }
486
487 /*
488 * Bio map function. It allocates dm_verity_io structure and bio vector and
489 * fills them. Then it issues prefetches and the I/O.
490 */
491 static int verity_map(struct dm_target *ti, struct bio *bio)
492 {
493 struct dm_verity *v = ti->private;
494 struct dm_verity_io *io;
495
496 bio->bi_bdev = v->data_dev->bdev;
497 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
498
499 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
500 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
501 DMERR_LIMIT("unaligned io");
502 return -EIO;
503 }
504
505 if (bio_end_sector(bio) >>
506 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
507 DMERR_LIMIT("io out of range");
508 return -EIO;
509 }
510
511 if (bio_data_dir(bio) == WRITE)
512 return -EIO;
513
514 io = dm_per_bio_data(bio, ti->per_bio_data_size);
515 io->v = v;
516 io->orig_bi_end_io = bio->bi_end_io;
517 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
518 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
519
520 bio->bi_end_io = verity_end_io;
521 bio->bi_private = io;
522 io->iter = bio->bi_iter;
523
524 verity_submit_prefetch(v, io);
525
526 generic_make_request(bio);
527
528 return DM_MAPIO_SUBMITTED;
529 }
530
531 /*
532 * Status: V (valid) or C (corruption found)
533 */
534 static void verity_status(struct dm_target *ti, status_type_t type,
535 unsigned status_flags, char *result, unsigned maxlen)
536 {
537 struct dm_verity *v = ti->private;
538 unsigned sz = 0;
539 unsigned x;
540
541 switch (type) {
542 case STATUSTYPE_INFO:
543 DMEMIT("%c", v->hash_failed ? 'C' : 'V');
544 break;
545 case STATUSTYPE_TABLE:
546 DMEMIT("%u %s %s %u %u %llu %llu %s ",
547 v->version,
548 v->data_dev->name,
549 v->hash_dev->name,
550 1 << v->data_dev_block_bits,
551 1 << v->hash_dev_block_bits,
552 (unsigned long long)v->data_blocks,
553 (unsigned long long)v->hash_start,
554 v->alg_name
555 );
556 for (x = 0; x < v->digest_size; x++)
557 DMEMIT("%02x", v->root_digest[x]);
558 DMEMIT(" ");
559 if (!v->salt_size)
560 DMEMIT("-");
561 else
562 for (x = 0; x < v->salt_size; x++)
563 DMEMIT("%02x", v->salt[x]);
564 if (v->mode != DM_VERITY_MODE_EIO) {
565 DMEMIT(" 1 ");
566 switch (v->mode) {
567 case DM_VERITY_MODE_LOGGING:
568 DMEMIT(DM_VERITY_OPT_LOGGING);
569 break;
570 case DM_VERITY_MODE_RESTART:
571 DMEMIT(DM_VERITY_OPT_RESTART);
572 break;
573 default:
574 BUG();
575 }
576 }
577 break;
578 }
579 }
580
581 static int verity_prepare_ioctl(struct dm_target *ti,
582 struct block_device **bdev, fmode_t *mode)
583 {
584 struct dm_verity *v = ti->private;
585
586 *bdev = v->data_dev->bdev;
587
588 if (v->data_start ||
589 ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
590 return 1;
591 return 0;
592 }
593
594 static int verity_iterate_devices(struct dm_target *ti,
595 iterate_devices_callout_fn fn, void *data)
596 {
597 struct dm_verity *v = ti->private;
598
599 return fn(ti, v->data_dev, v->data_start, ti->len, data);
600 }
601
602 static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
603 {
604 struct dm_verity *v = ti->private;
605
606 if (limits->logical_block_size < 1 << v->data_dev_block_bits)
607 limits->logical_block_size = 1 << v->data_dev_block_bits;
608
609 if (limits->physical_block_size < 1 << v->data_dev_block_bits)
610 limits->physical_block_size = 1 << v->data_dev_block_bits;
611
612 blk_limits_io_min(limits, limits->logical_block_size);
613 }
614
615 static void verity_dtr(struct dm_target *ti)
616 {
617 struct dm_verity *v = ti->private;
618
619 if (v->verify_wq)
620 destroy_workqueue(v->verify_wq);
621
622 if (v->bufio)
623 dm_bufio_client_destroy(v->bufio);
624
625 kfree(v->salt);
626 kfree(v->root_digest);
627
628 if (v->tfm)
629 crypto_free_shash(v->tfm);
630
631 kfree(v->alg_name);
632
633 if (v->hash_dev)
634 dm_put_device(ti, v->hash_dev);
635
636 if (v->data_dev)
637 dm_put_device(ti, v->data_dev);
638
639 kfree(v);
640 }
641
642 static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
643 {
644 int r;
645 unsigned argc;
646 struct dm_target *ti = v->ti;
647 const char *arg_name;
648
649 static struct dm_arg _args[] = {
650 {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
651 };
652
653 r = dm_read_arg_group(_args, as, &argc, &ti->error);
654 if (r)
655 return -EINVAL;
656
657 if (!argc)
658 return 0;
659
660 do {
661 arg_name = dm_shift_arg(as);
662 argc--;
663
664 if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
665 v->mode = DM_VERITY_MODE_LOGGING;
666 continue;
667
668 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
669 v->mode = DM_VERITY_MODE_RESTART;
670 continue;
671 }
672
673 ti->error = "Unrecognized verity feature request";
674 return -EINVAL;
675 } while (argc && !r);
676
677 return r;
678 }
679
680 /*
681 * Target parameters:
682 * <version> The current format is version 1.
683 * Vsn 0 is compatible with original Chromium OS releases.
684 * <data device>
685 * <hash device>
686 * <data block size>
687 * <hash block size>
688 * <the number of data blocks>
689 * <hash start block>
690 * <algorithm>
691 * <digest>
692 * <salt> Hex string or "-" if no salt.
693 */
694 static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
695 {
696 struct dm_verity *v;
697 struct dm_arg_set as;
698 unsigned int num;
699 unsigned long long num_ll;
700 int r;
701 int i;
702 sector_t hash_position;
703 char dummy;
704
705 v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
706 if (!v) {
707 ti->error = "Cannot allocate verity structure";
708 return -ENOMEM;
709 }
710 ti->private = v;
711 v->ti = ti;
712
713 if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
714 ti->error = "Device must be readonly";
715 r = -EINVAL;
716 goto bad;
717 }
718
719 if (argc < 10) {
720 ti->error = "Not enough arguments";
721 r = -EINVAL;
722 goto bad;
723 }
724
725 if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
726 num > 1) {
727 ti->error = "Invalid version";
728 r = -EINVAL;
729 goto bad;
730 }
731 v->version = num;
732
733 r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
734 if (r) {
735 ti->error = "Data device lookup failed";
736 goto bad;
737 }
738
739 r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
740 if (r) {
741 ti->error = "Data device lookup failed";
742 goto bad;
743 }
744
745 if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
746 !num || (num & (num - 1)) ||
747 num < bdev_logical_block_size(v->data_dev->bdev) ||
748 num > PAGE_SIZE) {
749 ti->error = "Invalid data device block size";
750 r = -EINVAL;
751 goto bad;
752 }
753 v->data_dev_block_bits = __ffs(num);
754
755 if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
756 !num || (num & (num - 1)) ||
757 num < bdev_logical_block_size(v->hash_dev->bdev) ||
758 num > INT_MAX) {
759 ti->error = "Invalid hash device block size";
760 r = -EINVAL;
761 goto bad;
762 }
763 v->hash_dev_block_bits = __ffs(num);
764
765 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
766 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
767 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
768 ti->error = "Invalid data blocks";
769 r = -EINVAL;
770 goto bad;
771 }
772 v->data_blocks = num_ll;
773
774 if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
775 ti->error = "Data device is too small";
776 r = -EINVAL;
777 goto bad;
778 }
779
780 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
781 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
782 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
783 ti->error = "Invalid hash start";
784 r = -EINVAL;
785 goto bad;
786 }
787 v->hash_start = num_ll;
788
789 v->alg_name = kstrdup(argv[7], GFP_KERNEL);
790 if (!v->alg_name) {
791 ti->error = "Cannot allocate algorithm name";
792 r = -ENOMEM;
793 goto bad;
794 }
795
796 v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
797 if (IS_ERR(v->tfm)) {
798 ti->error = "Cannot initialize hash function";
799 r = PTR_ERR(v->tfm);
800 v->tfm = NULL;
801 goto bad;
802 }
803 v->digest_size = crypto_shash_digestsize(v->tfm);
804 if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
805 ti->error = "Digest size too big";
806 r = -EINVAL;
807 goto bad;
808 }
809 v->shash_descsize =
810 sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
811
812 v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
813 if (!v->root_digest) {
814 ti->error = "Cannot allocate root digest";
815 r = -ENOMEM;
816 goto bad;
817 }
818 if (strlen(argv[8]) != v->digest_size * 2 ||
819 hex2bin(v->root_digest, argv[8], v->digest_size)) {
820 ti->error = "Invalid root digest";
821 r = -EINVAL;
822 goto bad;
823 }
824
825 if (strcmp(argv[9], "-")) {
826 v->salt_size = strlen(argv[9]) / 2;
827 v->salt = kmalloc(v->salt_size, GFP_KERNEL);
828 if (!v->salt) {
829 ti->error = "Cannot allocate salt";
830 r = -ENOMEM;
831 goto bad;
832 }
833 if (strlen(argv[9]) != v->salt_size * 2 ||
834 hex2bin(v->salt, argv[9], v->salt_size)) {
835 ti->error = "Invalid salt";
836 r = -EINVAL;
837 goto bad;
838 }
839 }
840
841 argv += 10;
842 argc -= 10;
843
844 /* Optional parameters */
845 if (argc) {
846 as.argc = argc;
847 as.argv = argv;
848
849 r = verity_parse_opt_args(&as, v);
850 if (r < 0)
851 goto bad;
852 }
853
854 v->hash_per_block_bits =
855 __fls((1 << v->hash_dev_block_bits) / v->digest_size);
856
857 v->levels = 0;
858 if (v->data_blocks)
859 while (v->hash_per_block_bits * v->levels < 64 &&
860 (unsigned long long)(v->data_blocks - 1) >>
861 (v->hash_per_block_bits * v->levels))
862 v->levels++;
863
864 if (v->levels > DM_VERITY_MAX_LEVELS) {
865 ti->error = "Too many tree levels";
866 r = -E2BIG;
867 goto bad;
868 }
869
870 hash_position = v->hash_start;
871 for (i = v->levels - 1; i >= 0; i--) {
872 sector_t s;
873 v->hash_level_block[i] = hash_position;
874 s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
875 >> ((i + 1) * v->hash_per_block_bits);
876 if (hash_position + s < hash_position) {
877 ti->error = "Hash device offset overflow";
878 r = -E2BIG;
879 goto bad;
880 }
881 hash_position += s;
882 }
883 v->hash_blocks = hash_position;
884
885 v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
886 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
887 dm_bufio_alloc_callback, NULL);
888 if (IS_ERR(v->bufio)) {
889 ti->error = "Cannot initialize dm-bufio";
890 r = PTR_ERR(v->bufio);
891 v->bufio = NULL;
892 goto bad;
893 }
894
895 if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
896 ti->error = "Hash device is too small";
897 r = -E2BIG;
898 goto bad;
899 }
900
901 ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
902
903 /* WQ_UNBOUND greatly improves performance when running on ramdisk */
904 v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
905 if (!v->verify_wq) {
906 ti->error = "Cannot allocate workqueue";
907 r = -ENOMEM;
908 goto bad;
909 }
910
911 return 0;
912
913 bad:
914 verity_dtr(ti);
915
916 return r;
917 }
918
919 static struct target_type verity_target = {
920 .name = "verity",
921 .version = {1, 2, 0},
922 .module = THIS_MODULE,
923 .ctr = verity_ctr,
924 .dtr = verity_dtr,
925 .map = verity_map,
926 .status = verity_status,
927 .prepare_ioctl = verity_prepare_ioctl,
928 .iterate_devices = verity_iterate_devices,
929 .io_hints = verity_io_hints,
930 };
931
932 static int __init dm_verity_init(void)
933 {
934 int r;
935
936 r = dm_register_target(&verity_target);
937 if (r < 0)
938 DMERR("register failed %d", r);
939
940 return r;
941 }
942
943 static void __exit dm_verity_exit(void)
944 {
945 dm_unregister_target(&verity_target);
946 }
947
948 module_init(dm_verity_init);
949 module_exit(dm_verity_exit);
950
951 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
952 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
953 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
954 MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
955 MODULE_LICENSE("GPL");