2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
7 * This file is released under the GPL.
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/key.h>
16 #include <linux/bio.h>
17 #include <linux/blkdev.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/crypto.h>
21 #include <linux/workqueue.h>
22 #include <linux/kthread.h>
23 #include <linux/backing-dev.h>
24 #include <linux/atomic.h>
25 #include <linux/scatterlist.h>
26 #include <linux/rbtree.h>
27 #include <linux/ctype.h>
29 #include <asm/unaligned.h>
30 #include <crypto/hash.h>
31 #include <crypto/md5.h>
32 #include <crypto/algapi.h>
33 #include <crypto/skcipher.h>
34 #include <crypto/aead.h>
35 #include <crypto/authenc.h>
36 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
37 #include <keys/user-type.h>
39 #include <linux/device-mapper.h>
41 #define DM_MSG_PREFIX "crypt"
44 * context holding the current state of a multi-part conversion
46 struct convert_context
{
47 struct completion restart
;
50 struct bvec_iter iter_in
;
51 struct bvec_iter iter_out
;
55 struct skcipher_request
*req
;
56 struct aead_request
*req_aead
;
62 * per bio private data
65 struct crypt_config
*cc
;
67 u8
*integrity_metadata
;
68 bool integrity_metadata_from_pool
;
69 struct work_struct work
;
71 struct convert_context ctx
;
77 struct rb_node rb_node
;
78 } CRYPTO_MINALIGN_ATTR
;
80 struct dm_crypt_request
{
81 struct convert_context
*ctx
;
82 struct scatterlist sg_in
[4];
83 struct scatterlist sg_out
[4];
89 struct crypt_iv_operations
{
90 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
92 void (*dtr
)(struct crypt_config
*cc
);
93 int (*init
)(struct crypt_config
*cc
);
94 int (*wipe
)(struct crypt_config
*cc
);
95 int (*generator
)(struct crypt_config
*cc
, u8
*iv
,
96 struct dm_crypt_request
*dmreq
);
97 int (*post
)(struct crypt_config
*cc
, u8
*iv
,
98 struct dm_crypt_request
*dmreq
);
101 struct iv_essiv_private
{
102 struct crypto_ahash
*hash_tfm
;
106 struct iv_benbi_private
{
110 #define LMK_SEED_SIZE 64 /* hash + 0 */
111 struct iv_lmk_private
{
112 struct crypto_shash
*hash_tfm
;
116 #define TCW_WHITENING_SIZE 16
117 struct iv_tcw_private
{
118 struct crypto_shash
*crc32_tfm
;
124 * Crypt: maps a linear range of a block device
125 * and encrypts / decrypts at the same time.
127 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
,
128 DM_CRYPT_SAME_CPU
, DM_CRYPT_NO_OFFLOAD
};
131 CRYPT_MODE_INTEGRITY_AEAD
, /* Use authenticated mode for cihper */
132 CRYPT_IV_LARGE_SECTORS
, /* Calculate IV from sector_size, not 512B sectors */
136 * The fields in here must be read only after initialization.
138 struct crypt_config
{
143 * pool for per bio private data, crypto requests,
144 * encryption requeusts/buffer pages and integrity tags
147 mempool_t
*page_pool
;
149 unsigned tag_pool_max_sectors
;
152 struct mutex bio_alloc_lock
;
154 struct workqueue_struct
*io_queue
;
155 struct workqueue_struct
*crypt_queue
;
157 struct task_struct
*write_thread
;
158 wait_queue_head_t write_thread_wait
;
159 struct rb_root write_tree
;
166 const struct crypt_iv_operations
*iv_gen_ops
;
168 struct iv_essiv_private essiv
;
169 struct iv_benbi_private benbi
;
170 struct iv_lmk_private lmk
;
171 struct iv_tcw_private tcw
;
174 unsigned int iv_size
;
175 unsigned short int sector_size
;
176 unsigned char sector_shift
;
178 /* ESSIV: struct crypto_cipher *essiv_tfm */
181 struct crypto_skcipher
**tfms
;
182 struct crypto_aead
**tfms_aead
;
185 unsigned long cipher_flags
;
188 * Layout of each crypto request:
190 * struct skcipher_request
193 * struct dm_crypt_request
197 * The padding is added so that dm_crypt_request and the IV are
200 unsigned int dmreq_start
;
202 unsigned int per_bio_data_size
;
205 unsigned int key_size
;
206 unsigned int key_parts
; /* independent parts in key buffer */
207 unsigned int key_extra_size
; /* additional keys length */
208 unsigned int key_mac_size
; /* MAC key size for authenc(...) */
210 unsigned int integrity_tag_size
;
211 unsigned int integrity_iv_size
;
212 unsigned int on_disk_tag_size
;
214 u8
*authenc_key
; /* space for keys in authenc() format (if used) */
219 #define MAX_TAG_SIZE 480
220 #define POOL_ENTRY_SIZE 512
222 static void clone_init(struct dm_crypt_io
*, struct bio
*);
223 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
224 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
225 struct scatterlist
*sg
);
228 * Use this to access cipher attributes that are the same for each CPU.
230 static struct crypto_skcipher
*any_tfm(struct crypt_config
*cc
)
232 return cc
->cipher_tfm
.tfms
[0];
235 static struct crypto_aead
*any_tfm_aead(struct crypt_config
*cc
)
237 return cc
->cipher_tfm
.tfms_aead
[0];
241 * Different IV generation algorithms:
243 * plain: the initial vector is the 32-bit little-endian version of the sector
244 * number, padded with zeros if necessary.
246 * plain64: the initial vector is the 64-bit little-endian version of the sector
247 * number, padded with zeros if necessary.
249 * essiv: "encrypted sector|salt initial vector", the sector number is
250 * encrypted with the bulk cipher using a salt as key. The salt
251 * should be derived from the bulk cipher's key via hashing.
253 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
254 * (needed for LRW-32-AES and possible other narrow block modes)
256 * null: the initial vector is always zero. Provides compatibility with
257 * obsolete loop_fish2 devices. Do not use for new devices.
259 * lmk: Compatible implementation of the block chaining mode used
260 * by the Loop-AES block device encryption system
261 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
262 * It operates on full 512 byte sectors and uses CBC
263 * with an IV derived from the sector number, the data and
264 * optionally extra IV seed.
265 * This means that after decryption the first block
266 * of sector must be tweaked according to decrypted data.
267 * Loop-AES can use three encryption schemes:
268 * version 1: is plain aes-cbc mode
269 * version 2: uses 64 multikey scheme with lmk IV generator
270 * version 3: the same as version 2 with additional IV seed
271 * (it uses 65 keys, last key is used as IV seed)
273 * tcw: Compatible implementation of the block chaining mode used
274 * by the TrueCrypt device encryption system (prior to version 4.1).
275 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
276 * It operates on full 512 byte sectors and uses CBC
277 * with an IV derived from initial key and the sector number.
278 * In addition, whitening value is applied on every sector, whitening
279 * is calculated from initial key, sector number and mixed using CRC32.
280 * Note that this encryption scheme is vulnerable to watermarking attacks
281 * and should be used for old compatible containers access only.
283 * plumb: unimplemented, see:
284 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
287 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
,
288 struct dm_crypt_request
*dmreq
)
290 memset(iv
, 0, cc
->iv_size
);
291 *(__le32
*)iv
= cpu_to_le32(dmreq
->iv_sector
& 0xffffffff);
296 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
297 struct dm_crypt_request
*dmreq
)
299 memset(iv
, 0, cc
->iv_size
);
300 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
305 /* Initialise ESSIV - compute salt but no local memory allocations */
306 static int crypt_iv_essiv_init(struct crypt_config
*cc
)
308 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
309 AHASH_REQUEST_ON_STACK(req
, essiv
->hash_tfm
);
310 struct scatterlist sg
;
311 struct crypto_cipher
*essiv_tfm
;
314 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
315 ahash_request_set_tfm(req
, essiv
->hash_tfm
);
316 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
, NULL
, NULL
);
317 ahash_request_set_crypt(req
, &sg
, essiv
->salt
, cc
->key_size
);
319 err
= crypto_ahash_digest(req
);
320 ahash_request_zero(req
);
324 essiv_tfm
= cc
->iv_private
;
326 err
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
,
327 crypto_ahash_digestsize(essiv
->hash_tfm
));
334 /* Wipe salt and reset key derived from volume key */
335 static int crypt_iv_essiv_wipe(struct crypt_config
*cc
)
337 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
338 unsigned salt_size
= crypto_ahash_digestsize(essiv
->hash_tfm
);
339 struct crypto_cipher
*essiv_tfm
;
342 memset(essiv
->salt
, 0, salt_size
);
344 essiv_tfm
= cc
->iv_private
;
345 r
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
, salt_size
);
352 /* Set up per cpu cipher state */
353 static struct crypto_cipher
*setup_essiv_cpu(struct crypt_config
*cc
,
354 struct dm_target
*ti
,
355 u8
*salt
, unsigned saltsize
)
357 struct crypto_cipher
*essiv_tfm
;
360 /* Setup the essiv_tfm with the given salt */
361 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
362 if (IS_ERR(essiv_tfm
)) {
363 ti
->error
= "Error allocating crypto tfm for ESSIV";
367 if (crypto_cipher_blocksize(essiv_tfm
) != cc
->iv_size
) {
368 ti
->error
= "Block size of ESSIV cipher does "
369 "not match IV size of block cipher";
370 crypto_free_cipher(essiv_tfm
);
371 return ERR_PTR(-EINVAL
);
374 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
376 ti
->error
= "Failed to set key for ESSIV cipher";
377 crypto_free_cipher(essiv_tfm
);
384 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
386 struct crypto_cipher
*essiv_tfm
;
387 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
389 crypto_free_ahash(essiv
->hash_tfm
);
390 essiv
->hash_tfm
= NULL
;
395 essiv_tfm
= cc
->iv_private
;
398 crypto_free_cipher(essiv_tfm
);
400 cc
->iv_private
= NULL
;
403 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
406 struct crypto_cipher
*essiv_tfm
= NULL
;
407 struct crypto_ahash
*hash_tfm
= NULL
;
412 ti
->error
= "Digest algorithm missing for ESSIV mode";
416 /* Allocate hash algorithm */
417 hash_tfm
= crypto_alloc_ahash(opts
, 0, CRYPTO_ALG_ASYNC
);
418 if (IS_ERR(hash_tfm
)) {
419 ti
->error
= "Error initializing ESSIV hash";
420 err
= PTR_ERR(hash_tfm
);
424 salt
= kzalloc(crypto_ahash_digestsize(hash_tfm
), GFP_KERNEL
);
426 ti
->error
= "Error kmallocing salt storage in ESSIV";
431 cc
->iv_gen_private
.essiv
.salt
= salt
;
432 cc
->iv_gen_private
.essiv
.hash_tfm
= hash_tfm
;
434 essiv_tfm
= setup_essiv_cpu(cc
, ti
, salt
,
435 crypto_ahash_digestsize(hash_tfm
));
437 if (IS_ERR(essiv_tfm
)) {
438 crypt_iv_essiv_dtr(cc
);
439 return PTR_ERR(essiv_tfm
);
441 cc
->iv_private
= essiv_tfm
;
446 if (hash_tfm
&& !IS_ERR(hash_tfm
))
447 crypto_free_ahash(hash_tfm
);
452 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
,
453 struct dm_crypt_request
*dmreq
)
455 struct crypto_cipher
*essiv_tfm
= cc
->iv_private
;
457 memset(iv
, 0, cc
->iv_size
);
458 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
459 crypto_cipher_encrypt_one(essiv_tfm
, iv
, iv
);
464 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
467 unsigned bs
= crypto_skcipher_blocksize(any_tfm(cc
));
470 /* we need to calculate how far we must shift the sector count
471 * to get the cipher block count, we use this shift in _gen */
473 if (1 << log
!= bs
) {
474 ti
->error
= "cypher blocksize is not a power of 2";
479 ti
->error
= "cypher blocksize is > 512";
483 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
488 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
492 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
,
493 struct dm_crypt_request
*dmreq
)
497 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
499 val
= cpu_to_be64(((u64
)dmreq
->iv_sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
500 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
505 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
,
506 struct dm_crypt_request
*dmreq
)
508 memset(iv
, 0, cc
->iv_size
);
513 static void crypt_iv_lmk_dtr(struct crypt_config
*cc
)
515 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
517 if (lmk
->hash_tfm
&& !IS_ERR(lmk
->hash_tfm
))
518 crypto_free_shash(lmk
->hash_tfm
);
519 lmk
->hash_tfm
= NULL
;
525 static int crypt_iv_lmk_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
528 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
530 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
531 ti
->error
= "Unsupported sector size for LMK";
535 lmk
->hash_tfm
= crypto_alloc_shash("md5", 0, 0);
536 if (IS_ERR(lmk
->hash_tfm
)) {
537 ti
->error
= "Error initializing LMK hash";
538 return PTR_ERR(lmk
->hash_tfm
);
541 /* No seed in LMK version 2 */
542 if (cc
->key_parts
== cc
->tfms_count
) {
547 lmk
->seed
= kzalloc(LMK_SEED_SIZE
, GFP_KERNEL
);
549 crypt_iv_lmk_dtr(cc
);
550 ti
->error
= "Error kmallocing seed storage in LMK";
557 static int crypt_iv_lmk_init(struct crypt_config
*cc
)
559 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
560 int subkey_size
= cc
->key_size
/ cc
->key_parts
;
562 /* LMK seed is on the position of LMK_KEYS + 1 key */
564 memcpy(lmk
->seed
, cc
->key
+ (cc
->tfms_count
* subkey_size
),
565 crypto_shash_digestsize(lmk
->hash_tfm
));
570 static int crypt_iv_lmk_wipe(struct crypt_config
*cc
)
572 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
575 memset(lmk
->seed
, 0, LMK_SEED_SIZE
);
580 static int crypt_iv_lmk_one(struct crypt_config
*cc
, u8
*iv
,
581 struct dm_crypt_request
*dmreq
,
584 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
585 SHASH_DESC_ON_STACK(desc
, lmk
->hash_tfm
);
586 struct md5_state md5state
;
590 desc
->tfm
= lmk
->hash_tfm
;
591 desc
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
593 r
= crypto_shash_init(desc
);
598 r
= crypto_shash_update(desc
, lmk
->seed
, LMK_SEED_SIZE
);
603 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
604 r
= crypto_shash_update(desc
, data
+ 16, 16 * 31);
608 /* Sector is cropped to 56 bits here */
609 buf
[0] = cpu_to_le32(dmreq
->iv_sector
& 0xFFFFFFFF);
610 buf
[1] = cpu_to_le32((((u64
)dmreq
->iv_sector
>> 32) & 0x00FFFFFF) | 0x80000000);
611 buf
[2] = cpu_to_le32(4024);
613 r
= crypto_shash_update(desc
, (u8
*)buf
, sizeof(buf
));
617 /* No MD5 padding here */
618 r
= crypto_shash_export(desc
, &md5state
);
622 for (i
= 0; i
< MD5_HASH_WORDS
; i
++)
623 __cpu_to_le32s(&md5state
.hash
[i
]);
624 memcpy(iv
, &md5state
.hash
, cc
->iv_size
);
629 static int crypt_iv_lmk_gen(struct crypt_config
*cc
, u8
*iv
,
630 struct dm_crypt_request
*dmreq
)
632 struct scatterlist
*sg
;
636 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
637 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
638 src
= kmap_atomic(sg_page(sg
));
639 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, src
+ sg
->offset
);
642 memset(iv
, 0, cc
->iv_size
);
647 static int crypt_iv_lmk_post(struct crypt_config
*cc
, u8
*iv
,
648 struct dm_crypt_request
*dmreq
)
650 struct scatterlist
*sg
;
654 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
)
657 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
658 dst
= kmap_atomic(sg_page(sg
));
659 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, dst
+ sg
->offset
);
661 /* Tweak the first block of plaintext sector */
663 crypto_xor(dst
+ sg
->offset
, iv
, cc
->iv_size
);
669 static void crypt_iv_tcw_dtr(struct crypt_config
*cc
)
671 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
673 kzfree(tcw
->iv_seed
);
675 kzfree(tcw
->whitening
);
676 tcw
->whitening
= NULL
;
678 if (tcw
->crc32_tfm
&& !IS_ERR(tcw
->crc32_tfm
))
679 crypto_free_shash(tcw
->crc32_tfm
);
680 tcw
->crc32_tfm
= NULL
;
683 static int crypt_iv_tcw_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
686 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
688 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
689 ti
->error
= "Unsupported sector size for TCW";
693 if (cc
->key_size
<= (cc
->iv_size
+ TCW_WHITENING_SIZE
)) {
694 ti
->error
= "Wrong key size for TCW";
698 tcw
->crc32_tfm
= crypto_alloc_shash("crc32", 0, 0);
699 if (IS_ERR(tcw
->crc32_tfm
)) {
700 ti
->error
= "Error initializing CRC32 in TCW";
701 return PTR_ERR(tcw
->crc32_tfm
);
704 tcw
->iv_seed
= kzalloc(cc
->iv_size
, GFP_KERNEL
);
705 tcw
->whitening
= kzalloc(TCW_WHITENING_SIZE
, GFP_KERNEL
);
706 if (!tcw
->iv_seed
|| !tcw
->whitening
) {
707 crypt_iv_tcw_dtr(cc
);
708 ti
->error
= "Error allocating seed storage in TCW";
715 static int crypt_iv_tcw_init(struct crypt_config
*cc
)
717 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
718 int key_offset
= cc
->key_size
- cc
->iv_size
- TCW_WHITENING_SIZE
;
720 memcpy(tcw
->iv_seed
, &cc
->key
[key_offset
], cc
->iv_size
);
721 memcpy(tcw
->whitening
, &cc
->key
[key_offset
+ cc
->iv_size
],
727 static int crypt_iv_tcw_wipe(struct crypt_config
*cc
)
729 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
731 memset(tcw
->iv_seed
, 0, cc
->iv_size
);
732 memset(tcw
->whitening
, 0, TCW_WHITENING_SIZE
);
737 static int crypt_iv_tcw_whitening(struct crypt_config
*cc
,
738 struct dm_crypt_request
*dmreq
,
741 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
742 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
743 u8 buf
[TCW_WHITENING_SIZE
];
744 SHASH_DESC_ON_STACK(desc
, tcw
->crc32_tfm
);
747 /* xor whitening with sector number */
748 memcpy(buf
, tcw
->whitening
, TCW_WHITENING_SIZE
);
749 crypto_xor(buf
, (u8
*)§or
, 8);
750 crypto_xor(&buf
[8], (u8
*)§or
, 8);
752 /* calculate crc32 for every 32bit part and xor it */
753 desc
->tfm
= tcw
->crc32_tfm
;
754 desc
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
755 for (i
= 0; i
< 4; i
++) {
756 r
= crypto_shash_init(desc
);
759 r
= crypto_shash_update(desc
, &buf
[i
* 4], 4);
762 r
= crypto_shash_final(desc
, &buf
[i
* 4]);
766 crypto_xor(&buf
[0], &buf
[12], 4);
767 crypto_xor(&buf
[4], &buf
[8], 4);
769 /* apply whitening (8 bytes) to whole sector */
770 for (i
= 0; i
< ((1 << SECTOR_SHIFT
) / 8); i
++)
771 crypto_xor(data
+ i
* 8, buf
, 8);
773 memzero_explicit(buf
, sizeof(buf
));
777 static int crypt_iv_tcw_gen(struct crypt_config
*cc
, u8
*iv
,
778 struct dm_crypt_request
*dmreq
)
780 struct scatterlist
*sg
;
781 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
782 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
786 /* Remove whitening from ciphertext */
787 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
788 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
789 src
= kmap_atomic(sg_page(sg
));
790 r
= crypt_iv_tcw_whitening(cc
, dmreq
, src
+ sg
->offset
);
795 memcpy(iv
, tcw
->iv_seed
, cc
->iv_size
);
796 crypto_xor(iv
, (u8
*)§or
, 8);
798 crypto_xor(&iv
[8], (u8
*)§or
, cc
->iv_size
- 8);
803 static int crypt_iv_tcw_post(struct crypt_config
*cc
, u8
*iv
,
804 struct dm_crypt_request
*dmreq
)
806 struct scatterlist
*sg
;
810 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
813 /* Apply whitening on ciphertext */
814 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
815 dst
= kmap_atomic(sg_page(sg
));
816 r
= crypt_iv_tcw_whitening(cc
, dmreq
, dst
+ sg
->offset
);
822 static int crypt_iv_random_gen(struct crypt_config
*cc
, u8
*iv
,
823 struct dm_crypt_request
*dmreq
)
825 /* Used only for writes, there must be an additional space to store IV */
826 get_random_bytes(iv
, cc
->iv_size
);
830 static const struct crypt_iv_operations crypt_iv_plain_ops
= {
831 .generator
= crypt_iv_plain_gen
834 static const struct crypt_iv_operations crypt_iv_plain64_ops
= {
835 .generator
= crypt_iv_plain64_gen
838 static const struct crypt_iv_operations crypt_iv_essiv_ops
= {
839 .ctr
= crypt_iv_essiv_ctr
,
840 .dtr
= crypt_iv_essiv_dtr
,
841 .init
= crypt_iv_essiv_init
,
842 .wipe
= crypt_iv_essiv_wipe
,
843 .generator
= crypt_iv_essiv_gen
846 static const struct crypt_iv_operations crypt_iv_benbi_ops
= {
847 .ctr
= crypt_iv_benbi_ctr
,
848 .dtr
= crypt_iv_benbi_dtr
,
849 .generator
= crypt_iv_benbi_gen
852 static const struct crypt_iv_operations crypt_iv_null_ops
= {
853 .generator
= crypt_iv_null_gen
856 static const struct crypt_iv_operations crypt_iv_lmk_ops
= {
857 .ctr
= crypt_iv_lmk_ctr
,
858 .dtr
= crypt_iv_lmk_dtr
,
859 .init
= crypt_iv_lmk_init
,
860 .wipe
= crypt_iv_lmk_wipe
,
861 .generator
= crypt_iv_lmk_gen
,
862 .post
= crypt_iv_lmk_post
865 static const struct crypt_iv_operations crypt_iv_tcw_ops
= {
866 .ctr
= crypt_iv_tcw_ctr
,
867 .dtr
= crypt_iv_tcw_dtr
,
868 .init
= crypt_iv_tcw_init
,
869 .wipe
= crypt_iv_tcw_wipe
,
870 .generator
= crypt_iv_tcw_gen
,
871 .post
= crypt_iv_tcw_post
874 static struct crypt_iv_operations crypt_iv_random_ops
= {
875 .generator
= crypt_iv_random_gen
879 * Integrity extensions
881 static bool crypt_integrity_aead(struct crypt_config
*cc
)
883 return test_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
886 static bool crypt_integrity_hmac(struct crypt_config
*cc
)
888 return crypt_integrity_aead(cc
) && cc
->key_mac_size
;
891 /* Get sg containing data */
892 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
893 struct scatterlist
*sg
)
895 if (unlikely(crypt_integrity_aead(cc
)))
901 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io
*io
, struct bio
*bio
)
903 struct bio_integrity_payload
*bip
;
904 unsigned int tag_len
;
907 if (!bio_sectors(bio
) || !io
->cc
->on_disk_tag_size
)
910 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, 1);
914 tag_len
= io
->cc
->on_disk_tag_size
* bio_sectors(bio
);
916 bip
->bip_iter
.bi_size
= tag_len
;
917 bip
->bip_iter
.bi_sector
= io
->cc
->start
+ io
->sector
;
919 /* We own the metadata, do not let bio_free to release it */
920 bip
->bip_flags
&= ~BIP_BLOCK_INTEGRITY
;
922 ret
= bio_integrity_add_page(bio
, virt_to_page(io
->integrity_metadata
),
923 tag_len
, offset_in_page(io
->integrity_metadata
));
924 if (unlikely(ret
!= tag_len
))
930 static int crypt_integrity_ctr(struct crypt_config
*cc
, struct dm_target
*ti
)
932 #ifdef CONFIG_BLK_DEV_INTEGRITY
933 struct blk_integrity
*bi
= blk_get_integrity(cc
->dev
->bdev
->bd_disk
);
935 /* From now we require underlying device with our integrity profile */
936 if (!bi
|| strcasecmp(bi
->profile
->name
, "DM-DIF-EXT-TAG")) {
937 ti
->error
= "Integrity profile not supported.";
941 if (bi
->tag_size
!= cc
->on_disk_tag_size
) {
942 ti
->error
= "Integrity profile tag size mismatch.";
946 if (crypt_integrity_aead(cc
)) {
947 cc
->integrity_tag_size
= cc
->on_disk_tag_size
- cc
->integrity_iv_size
;
948 DMINFO("Integrity AEAD, tag size %u, IV size %u.",
949 cc
->integrity_tag_size
, cc
->integrity_iv_size
);
951 if (crypto_aead_setauthsize(any_tfm_aead(cc
), cc
->integrity_tag_size
)) {
952 ti
->error
= "Integrity AEAD auth tag size is not supported.";
955 } else if (cc
->integrity_iv_size
)
956 DMINFO("Additional per-sector space %u bytes for IV.",
957 cc
->integrity_iv_size
);
959 if ((cc
->integrity_tag_size
+ cc
->integrity_iv_size
) != bi
->tag_size
) {
960 ti
->error
= "Not enough space for integrity tag in the profile.";
966 ti
->error
= "Integrity profile not supported.";
971 static void crypt_convert_init(struct crypt_config
*cc
,
972 struct convert_context
*ctx
,
973 struct bio
*bio_out
, struct bio
*bio_in
,
976 ctx
->bio_in
= bio_in
;
977 ctx
->bio_out
= bio_out
;
979 ctx
->iter_in
= bio_in
->bi_iter
;
981 ctx
->iter_out
= bio_out
->bi_iter
;
982 ctx
->cc_sector
= sector
+ cc
->iv_offset
;
983 init_completion(&ctx
->restart
);
986 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
989 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
992 static void *req_of_dmreq(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
)
994 return (void *)((char *)dmreq
- cc
->dmreq_start
);
997 static u8
*iv_of_dmreq(struct crypt_config
*cc
,
998 struct dm_crypt_request
*dmreq
)
1000 if (crypt_integrity_aead(cc
))
1001 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1002 crypto_aead_alignmask(any_tfm_aead(cc
)) + 1);
1004 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1005 crypto_skcipher_alignmask(any_tfm(cc
)) + 1);
1008 static u8
*org_iv_of_dmreq(struct crypt_config
*cc
,
1009 struct dm_crypt_request
*dmreq
)
1011 return iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
;
1014 static uint64_t *org_sector_of_dmreq(struct crypt_config
*cc
,
1015 struct dm_crypt_request
*dmreq
)
1017 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+ cc
->iv_size
;
1018 return (uint64_t*) ptr
;
1021 static unsigned int *org_tag_of_dmreq(struct crypt_config
*cc
,
1022 struct dm_crypt_request
*dmreq
)
1024 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+
1025 cc
->iv_size
+ sizeof(uint64_t);
1026 return (unsigned int*)ptr
;
1029 static void *tag_from_dmreq(struct crypt_config
*cc
,
1030 struct dm_crypt_request
*dmreq
)
1032 struct convert_context
*ctx
= dmreq
->ctx
;
1033 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1035 return &io
->integrity_metadata
[*org_tag_of_dmreq(cc
, dmreq
) *
1036 cc
->on_disk_tag_size
];
1039 static void *iv_tag_from_dmreq(struct crypt_config
*cc
,
1040 struct dm_crypt_request
*dmreq
)
1042 return tag_from_dmreq(cc
, dmreq
) + cc
->integrity_tag_size
;
1045 static int crypt_convert_block_aead(struct crypt_config
*cc
,
1046 struct convert_context
*ctx
,
1047 struct aead_request
*req
,
1048 unsigned int tag_offset
)
1050 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1051 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1052 struct dm_crypt_request
*dmreq
;
1053 u8
*iv
, *org_iv
, *tag_iv
, *tag
;
1057 BUG_ON(cc
->integrity_iv_size
&& cc
->integrity_iv_size
!= cc
->iv_size
);
1059 /* Reject unexpected unaligned bio. */
1060 if (unlikely(bv_in
.bv_offset
& (cc
->sector_size
- 1)))
1063 dmreq
= dmreq_of_req(cc
, req
);
1064 dmreq
->iv_sector
= ctx
->cc_sector
;
1065 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1066 dmreq
->iv_sector
>>= cc
->sector_shift
;
1069 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1071 sector
= org_sector_of_dmreq(cc
, dmreq
);
1072 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1074 iv
= iv_of_dmreq(cc
, dmreq
);
1075 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1076 tag
= tag_from_dmreq(cc
, dmreq
);
1077 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1080 * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1081 * | (authenticated) | (auth+encryption) | |
1082 * | sector_LE | IV | sector in/out | tag in/out |
1084 sg_init_table(dmreq
->sg_in
, 4);
1085 sg_set_buf(&dmreq
->sg_in
[0], sector
, sizeof(uint64_t));
1086 sg_set_buf(&dmreq
->sg_in
[1], org_iv
, cc
->iv_size
);
1087 sg_set_page(&dmreq
->sg_in
[2], bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1088 sg_set_buf(&dmreq
->sg_in
[3], tag
, cc
->integrity_tag_size
);
1090 sg_init_table(dmreq
->sg_out
, 4);
1091 sg_set_buf(&dmreq
->sg_out
[0], sector
, sizeof(uint64_t));
1092 sg_set_buf(&dmreq
->sg_out
[1], org_iv
, cc
->iv_size
);
1093 sg_set_page(&dmreq
->sg_out
[2], bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1094 sg_set_buf(&dmreq
->sg_out
[3], tag
, cc
->integrity_tag_size
);
1096 if (cc
->iv_gen_ops
) {
1097 /* For READs use IV stored in integrity metadata */
1098 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1099 memcpy(org_iv
, tag_iv
, cc
->iv_size
);
1101 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1104 /* Store generated IV in integrity metadata */
1105 if (cc
->integrity_iv_size
)
1106 memcpy(tag_iv
, org_iv
, cc
->iv_size
);
1108 /* Working copy of IV, to be modified in crypto API */
1109 memcpy(iv
, org_iv
, cc
->iv_size
);
1112 aead_request_set_ad(req
, sizeof(uint64_t) + cc
->iv_size
);
1113 if (bio_data_dir(ctx
->bio_in
) == WRITE
) {
1114 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1115 cc
->sector_size
, iv
);
1116 r
= crypto_aead_encrypt(req
);
1117 if (cc
->integrity_tag_size
+ cc
->integrity_iv_size
!= cc
->on_disk_tag_size
)
1118 memset(tag
+ cc
->integrity_tag_size
+ cc
->integrity_iv_size
, 0,
1119 cc
->on_disk_tag_size
- (cc
->integrity_tag_size
+ cc
->integrity_iv_size
));
1121 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1122 cc
->sector_size
+ cc
->integrity_tag_size
, iv
);
1123 r
= crypto_aead_decrypt(req
);
1127 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1128 (unsigned long long)le64_to_cpu(*sector
));
1130 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1131 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1133 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1134 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1139 static int crypt_convert_block_skcipher(struct crypt_config
*cc
,
1140 struct convert_context
*ctx
,
1141 struct skcipher_request
*req
,
1142 unsigned int tag_offset
)
1144 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1145 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1146 struct scatterlist
*sg_in
, *sg_out
;
1147 struct dm_crypt_request
*dmreq
;
1148 u8
*iv
, *org_iv
, *tag_iv
;
1152 /* Reject unexpected unaligned bio. */
1153 if (unlikely(bv_in
.bv_offset
& (cc
->sector_size
- 1)))
1156 dmreq
= dmreq_of_req(cc
, req
);
1157 dmreq
->iv_sector
= ctx
->cc_sector
;
1158 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1159 dmreq
->iv_sector
>>= cc
->sector_shift
;
1162 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1164 iv
= iv_of_dmreq(cc
, dmreq
);
1165 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1166 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1168 sector
= org_sector_of_dmreq(cc
, dmreq
);
1169 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1171 /* For skcipher we use only the first sg item */
1172 sg_in
= &dmreq
->sg_in
[0];
1173 sg_out
= &dmreq
->sg_out
[0];
1175 sg_init_table(sg_in
, 1);
1176 sg_set_page(sg_in
, bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1178 sg_init_table(sg_out
, 1);
1179 sg_set_page(sg_out
, bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1181 if (cc
->iv_gen_ops
) {
1182 /* For READs use IV stored in integrity metadata */
1183 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1184 memcpy(org_iv
, tag_iv
, cc
->integrity_iv_size
);
1186 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1189 /* Store generated IV in integrity metadata */
1190 if (cc
->integrity_iv_size
)
1191 memcpy(tag_iv
, org_iv
, cc
->integrity_iv_size
);
1193 /* Working copy of IV, to be modified in crypto API */
1194 memcpy(iv
, org_iv
, cc
->iv_size
);
1197 skcipher_request_set_crypt(req
, sg_in
, sg_out
, cc
->sector_size
, iv
);
1199 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
1200 r
= crypto_skcipher_encrypt(req
);
1202 r
= crypto_skcipher_decrypt(req
);
1204 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1205 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1207 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1208 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1213 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
1216 static void crypt_alloc_req_skcipher(struct crypt_config
*cc
,
1217 struct convert_context
*ctx
)
1219 unsigned key_index
= ctx
->cc_sector
& (cc
->tfms_count
- 1);
1222 ctx
->r
.req
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
1224 skcipher_request_set_tfm(ctx
->r
.req
, cc
->cipher_tfm
.tfms
[key_index
]);
1227 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1228 * requests if driver request queue is full.
1230 skcipher_request_set_callback(ctx
->r
.req
,
1231 CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
1232 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req
));
1235 static void crypt_alloc_req_aead(struct crypt_config
*cc
,
1236 struct convert_context
*ctx
)
1238 if (!ctx
->r
.req_aead
)
1239 ctx
->r
.req_aead
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
1241 aead_request_set_tfm(ctx
->r
.req_aead
, cc
->cipher_tfm
.tfms_aead
[0]);
1244 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1245 * requests if driver request queue is full.
1247 aead_request_set_callback(ctx
->r
.req_aead
,
1248 CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
1249 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req_aead
));
1252 static void crypt_alloc_req(struct crypt_config
*cc
,
1253 struct convert_context
*ctx
)
1255 if (crypt_integrity_aead(cc
))
1256 crypt_alloc_req_aead(cc
, ctx
);
1258 crypt_alloc_req_skcipher(cc
, ctx
);
1261 static void crypt_free_req_skcipher(struct crypt_config
*cc
,
1262 struct skcipher_request
*req
, struct bio
*base_bio
)
1264 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1266 if ((struct skcipher_request
*)(io
+ 1) != req
)
1267 mempool_free(req
, cc
->req_pool
);
1270 static void crypt_free_req_aead(struct crypt_config
*cc
,
1271 struct aead_request
*req
, struct bio
*base_bio
)
1273 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1275 if ((struct aead_request
*)(io
+ 1) != req
)
1276 mempool_free(req
, cc
->req_pool
);
1279 static void crypt_free_req(struct crypt_config
*cc
, void *req
, struct bio
*base_bio
)
1281 if (crypt_integrity_aead(cc
))
1282 crypt_free_req_aead(cc
, req
, base_bio
);
1284 crypt_free_req_skcipher(cc
, req
, base_bio
);
1288 * Encrypt / decrypt data from one bio to another one (can be the same one)
1290 static int crypt_convert(struct crypt_config
*cc
,
1291 struct convert_context
*ctx
)
1293 unsigned int tag_offset
= 0;
1294 unsigned int sector_step
= cc
->sector_size
>> SECTOR_SHIFT
;
1297 atomic_set(&ctx
->cc_pending
, 1);
1299 while (ctx
->iter_in
.bi_size
&& ctx
->iter_out
.bi_size
) {
1301 crypt_alloc_req(cc
, ctx
);
1302 atomic_inc(&ctx
->cc_pending
);
1304 if (crypt_integrity_aead(cc
))
1305 r
= crypt_convert_block_aead(cc
, ctx
, ctx
->r
.req_aead
, tag_offset
);
1307 r
= crypt_convert_block_skcipher(cc
, ctx
, ctx
->r
.req
, tag_offset
);
1311 * The request was queued by a crypto driver
1312 * but the driver request queue is full, let's wait.
1315 wait_for_completion(&ctx
->restart
);
1316 reinit_completion(&ctx
->restart
);
1319 * The request is queued and processed asynchronously,
1320 * completion function kcryptd_async_done() will be called.
1324 ctx
->cc_sector
+= sector_step
;
1325 tag_offset
+= sector_step
;
1328 * The request was already processed (synchronously).
1331 atomic_dec(&ctx
->cc_pending
);
1332 ctx
->cc_sector
+= sector_step
;
1333 tag_offset
+= sector_step
;
1337 * There was a data integrity error.
1340 atomic_dec(&ctx
->cc_pending
);
1343 * There was an error while processing the request.
1346 atomic_dec(&ctx
->cc_pending
);
1354 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
);
1357 * Generate a new unfragmented bio with the given size
1358 * This should never violate the device limitations (but only because
1359 * max_segment_size is being constrained to PAGE_SIZE).
1361 * This function may be called concurrently. If we allocate from the mempool
1362 * concurrently, there is a possibility of deadlock. For example, if we have
1363 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1364 * the mempool concurrently, it may deadlock in a situation where both processes
1365 * have allocated 128 pages and the mempool is exhausted.
1367 * In order to avoid this scenario we allocate the pages under a mutex.
1369 * In order to not degrade performance with excessive locking, we try
1370 * non-blocking allocations without a mutex first but on failure we fallback
1371 * to blocking allocations with a mutex.
1373 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
)
1375 struct crypt_config
*cc
= io
->cc
;
1377 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1378 gfp_t gfp_mask
= GFP_NOWAIT
| __GFP_HIGHMEM
;
1379 unsigned i
, len
, remaining_size
;
1383 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1384 mutex_lock(&cc
->bio_alloc_lock
);
1386 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
1390 clone_init(io
, clone
);
1392 remaining_size
= size
;
1394 for (i
= 0; i
< nr_iovecs
; i
++) {
1395 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
1397 crypt_free_buffer_pages(cc
, clone
);
1399 gfp_mask
|= __GFP_DIRECT_RECLAIM
;
1403 len
= (remaining_size
> PAGE_SIZE
) ? PAGE_SIZE
: remaining_size
;
1405 bio_add_page(clone
, page
, len
, 0);
1407 remaining_size
-= len
;
1410 /* Allocate space for integrity tags */
1411 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1412 crypt_free_buffer_pages(cc
, clone
);
1417 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1418 mutex_unlock(&cc
->bio_alloc_lock
);
1423 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
1428 bio_for_each_segment_all(bv
, clone
, i
) {
1429 BUG_ON(!bv
->bv_page
);
1430 mempool_free(bv
->bv_page
, cc
->page_pool
);
1435 static void crypt_io_init(struct dm_crypt_io
*io
, struct crypt_config
*cc
,
1436 struct bio
*bio
, sector_t sector
)
1440 io
->sector
= sector
;
1442 io
->ctx
.r
.req
= NULL
;
1443 io
->integrity_metadata
= NULL
;
1444 io
->integrity_metadata_from_pool
= false;
1445 atomic_set(&io
->io_pending
, 0);
1448 static void crypt_inc_pending(struct dm_crypt_io
*io
)
1450 atomic_inc(&io
->io_pending
);
1454 * One of the bios was finished. Check for completion of
1455 * the whole request and correctly clean up the buffer.
1457 static void crypt_dec_pending(struct dm_crypt_io
*io
)
1459 struct crypt_config
*cc
= io
->cc
;
1460 struct bio
*base_bio
= io
->base_bio
;
1461 int error
= io
->error
;
1463 if (!atomic_dec_and_test(&io
->io_pending
))
1467 crypt_free_req(cc
, io
->ctx
.r
.req
, base_bio
);
1469 if (unlikely(io
->integrity_metadata_from_pool
))
1470 mempool_free(io
->integrity_metadata
, io
->cc
->tag_pool
);
1472 kfree(io
->integrity_metadata
);
1474 base_bio
->bi_error
= error
;
1475 bio_endio(base_bio
);
1479 * kcryptd/kcryptd_io:
1481 * Needed because it would be very unwise to do decryption in an
1482 * interrupt context.
1484 * kcryptd performs the actual encryption or decryption.
1486 * kcryptd_io performs the IO submission.
1488 * They must be separated as otherwise the final stages could be
1489 * starved by new requests which can block in the first stages due
1490 * to memory allocation.
1492 * The work is done per CPU global for all dm-crypt instances.
1493 * They should not depend on each other and do not block.
1495 static void crypt_endio(struct bio
*clone
)
1497 struct dm_crypt_io
*io
= clone
->bi_private
;
1498 struct crypt_config
*cc
= io
->cc
;
1499 unsigned rw
= bio_data_dir(clone
);
1503 * free the processed pages
1506 crypt_free_buffer_pages(cc
, clone
);
1508 error
= clone
->bi_error
;
1511 if (rw
== READ
&& !error
) {
1512 kcryptd_queue_crypt(io
);
1516 if (unlikely(error
))
1519 crypt_dec_pending(io
);
1522 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
1524 struct crypt_config
*cc
= io
->cc
;
1526 clone
->bi_private
= io
;
1527 clone
->bi_end_io
= crypt_endio
;
1528 clone
->bi_bdev
= cc
->dev
->bdev
;
1529 clone
->bi_opf
= io
->base_bio
->bi_opf
;
1532 static int kcryptd_io_read(struct dm_crypt_io
*io
, gfp_t gfp
)
1534 struct crypt_config
*cc
= io
->cc
;
1538 * We need the original biovec array in order to decrypt
1539 * the whole bio data *afterwards* -- thanks to immutable
1540 * biovecs we don't need to worry about the block layer
1541 * modifying the biovec array; so leverage bio_clone_fast().
1543 clone
= bio_clone_fast(io
->base_bio
, gfp
, cc
->bs
);
1547 crypt_inc_pending(io
);
1549 clone_init(io
, clone
);
1550 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1552 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1553 crypt_dec_pending(io
);
1558 generic_make_request(clone
);
1562 static void kcryptd_io_read_work(struct work_struct
*work
)
1564 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1566 crypt_inc_pending(io
);
1567 if (kcryptd_io_read(io
, GFP_NOIO
))
1568 io
->error
= -ENOMEM
;
1569 crypt_dec_pending(io
);
1572 static void kcryptd_queue_read(struct dm_crypt_io
*io
)
1574 struct crypt_config
*cc
= io
->cc
;
1576 INIT_WORK(&io
->work
, kcryptd_io_read_work
);
1577 queue_work(cc
->io_queue
, &io
->work
);
1580 static void kcryptd_io_write(struct dm_crypt_io
*io
)
1582 struct bio
*clone
= io
->ctx
.bio_out
;
1584 generic_make_request(clone
);
1587 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1589 static int dmcrypt_write(void *data
)
1591 struct crypt_config
*cc
= data
;
1592 struct dm_crypt_io
*io
;
1595 struct rb_root write_tree
;
1596 struct blk_plug plug
;
1598 DECLARE_WAITQUEUE(wait
, current
);
1600 spin_lock_irq(&cc
->write_thread_wait
.lock
);
1603 if (!RB_EMPTY_ROOT(&cc
->write_tree
))
1606 set_current_state(TASK_INTERRUPTIBLE
);
1607 __add_wait_queue(&cc
->write_thread_wait
, &wait
);
1609 spin_unlock_irq(&cc
->write_thread_wait
.lock
);
1611 if (unlikely(kthread_should_stop())) {
1612 set_current_state(TASK_RUNNING
);
1613 remove_wait_queue(&cc
->write_thread_wait
, &wait
);
1619 set_current_state(TASK_RUNNING
);
1620 spin_lock_irq(&cc
->write_thread_wait
.lock
);
1621 __remove_wait_queue(&cc
->write_thread_wait
, &wait
);
1622 goto continue_locked
;
1625 write_tree
= cc
->write_tree
;
1626 cc
->write_tree
= RB_ROOT
;
1627 spin_unlock_irq(&cc
->write_thread_wait
.lock
);
1629 BUG_ON(rb_parent(write_tree
.rb_node
));
1632 * Note: we cannot walk the tree here with rb_next because
1633 * the structures may be freed when kcryptd_io_write is called.
1635 blk_start_plug(&plug
);
1637 io
= crypt_io_from_node(rb_first(&write_tree
));
1638 rb_erase(&io
->rb_node
, &write_tree
);
1639 kcryptd_io_write(io
);
1640 } while (!RB_EMPTY_ROOT(&write_tree
));
1641 blk_finish_plug(&plug
);
1646 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
, int async
)
1648 struct bio
*clone
= io
->ctx
.bio_out
;
1649 struct crypt_config
*cc
= io
->cc
;
1650 unsigned long flags
;
1652 struct rb_node
**rbp
, *parent
;
1654 if (unlikely(io
->error
< 0)) {
1655 crypt_free_buffer_pages(cc
, clone
);
1657 crypt_dec_pending(io
);
1661 /* crypt_convert should have filled the clone bio */
1662 BUG_ON(io
->ctx
.iter_out
.bi_size
);
1664 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1666 if (likely(!async
) && test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
)) {
1667 generic_make_request(clone
);
1671 spin_lock_irqsave(&cc
->write_thread_wait
.lock
, flags
);
1672 rbp
= &cc
->write_tree
.rb_node
;
1674 sector
= io
->sector
;
1677 if (sector
< crypt_io_from_node(parent
)->sector
)
1678 rbp
= &(*rbp
)->rb_left
;
1680 rbp
= &(*rbp
)->rb_right
;
1682 rb_link_node(&io
->rb_node
, parent
, rbp
);
1683 rb_insert_color(&io
->rb_node
, &cc
->write_tree
);
1685 wake_up_locked(&cc
->write_thread_wait
);
1686 spin_unlock_irqrestore(&cc
->write_thread_wait
.lock
, flags
);
1689 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
1691 struct crypt_config
*cc
= io
->cc
;
1694 sector_t sector
= io
->sector
;
1698 * Prevent io from disappearing until this function completes.
1700 crypt_inc_pending(io
);
1701 crypt_convert_init(cc
, &io
->ctx
, NULL
, io
->base_bio
, sector
);
1703 clone
= crypt_alloc_buffer(io
, io
->base_bio
->bi_iter
.bi_size
);
1704 if (unlikely(!clone
)) {
1709 io
->ctx
.bio_out
= clone
;
1710 io
->ctx
.iter_out
= clone
->bi_iter
;
1712 sector
+= bio_sectors(clone
);
1714 crypt_inc_pending(io
);
1715 r
= crypt_convert(cc
, &io
->ctx
);
1718 crypt_finished
= atomic_dec_and_test(&io
->ctx
.cc_pending
);
1720 /* Encryption was already finished, submit io now */
1721 if (crypt_finished
) {
1722 kcryptd_crypt_write_io_submit(io
, 0);
1723 io
->sector
= sector
;
1727 crypt_dec_pending(io
);
1730 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
)
1732 crypt_dec_pending(io
);
1735 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
1737 struct crypt_config
*cc
= io
->cc
;
1740 crypt_inc_pending(io
);
1742 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
1745 r
= crypt_convert(cc
, &io
->ctx
);
1749 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
1750 kcryptd_crypt_read_done(io
);
1752 crypt_dec_pending(io
);
1755 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
1758 struct dm_crypt_request
*dmreq
= async_req
->data
;
1759 struct convert_context
*ctx
= dmreq
->ctx
;
1760 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1761 struct crypt_config
*cc
= io
->cc
;
1764 * A request from crypto driver backlog is going to be processed now,
1765 * finish the completion and continue in crypt_convert().
1766 * (Callback will be called for the second time for this request.)
1768 if (error
== -EINPROGRESS
) {
1769 complete(&ctx
->restart
);
1773 if (!error
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1774 error
= cc
->iv_gen_ops
->post(cc
, org_iv_of_dmreq(cc
, dmreq
), dmreq
);
1776 if (error
== -EBADMSG
) {
1777 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1778 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc
, dmreq
)));
1779 io
->error
= -EILSEQ
;
1780 } else if (error
< 0)
1783 crypt_free_req(cc
, req_of_dmreq(cc
, dmreq
), io
->base_bio
);
1785 if (!atomic_dec_and_test(&ctx
->cc_pending
))
1788 if (bio_data_dir(io
->base_bio
) == READ
)
1789 kcryptd_crypt_read_done(io
);
1791 kcryptd_crypt_write_io_submit(io
, 1);
1794 static void kcryptd_crypt(struct work_struct
*work
)
1796 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1798 if (bio_data_dir(io
->base_bio
) == READ
)
1799 kcryptd_crypt_read_convert(io
);
1801 kcryptd_crypt_write_convert(io
);
1804 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
1806 struct crypt_config
*cc
= io
->cc
;
1808 INIT_WORK(&io
->work
, kcryptd_crypt
);
1809 queue_work(cc
->crypt_queue
, &io
->work
);
1813 * Decode key from its hex representation
1815 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
1822 for (i
= 0; i
< size
; i
++) {
1826 if (kstrtou8(buffer
, 16, &key
[i
]))
1836 static void crypt_free_tfms_aead(struct crypt_config
*cc
)
1838 if (!cc
->cipher_tfm
.tfms_aead
)
1841 if (cc
->cipher_tfm
.tfms_aead
[0] && !IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
1842 crypto_free_aead(cc
->cipher_tfm
.tfms_aead
[0]);
1843 cc
->cipher_tfm
.tfms_aead
[0] = NULL
;
1846 kfree(cc
->cipher_tfm
.tfms_aead
);
1847 cc
->cipher_tfm
.tfms_aead
= NULL
;
1850 static void crypt_free_tfms_skcipher(struct crypt_config
*cc
)
1854 if (!cc
->cipher_tfm
.tfms
)
1857 for (i
= 0; i
< cc
->tfms_count
; i
++)
1858 if (cc
->cipher_tfm
.tfms
[i
] && !IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
1859 crypto_free_skcipher(cc
->cipher_tfm
.tfms
[i
]);
1860 cc
->cipher_tfm
.tfms
[i
] = NULL
;
1863 kfree(cc
->cipher_tfm
.tfms
);
1864 cc
->cipher_tfm
.tfms
= NULL
;
1867 static void crypt_free_tfms(struct crypt_config
*cc
)
1869 if (crypt_integrity_aead(cc
))
1870 crypt_free_tfms_aead(cc
);
1872 crypt_free_tfms_skcipher(cc
);
1875 static int crypt_alloc_tfms_skcipher(struct crypt_config
*cc
, char *ciphermode
)
1880 cc
->cipher_tfm
.tfms
= kzalloc(cc
->tfms_count
*
1881 sizeof(struct crypto_skcipher
*), GFP_KERNEL
);
1882 if (!cc
->cipher_tfm
.tfms
)
1885 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1886 cc
->cipher_tfm
.tfms
[i
] = crypto_alloc_skcipher(ciphermode
, 0, 0);
1887 if (IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
1888 err
= PTR_ERR(cc
->cipher_tfm
.tfms
[i
]);
1889 crypt_free_tfms(cc
);
1897 static int crypt_alloc_tfms_aead(struct crypt_config
*cc
, char *ciphermode
)
1901 cc
->cipher_tfm
.tfms
= kmalloc(sizeof(struct crypto_aead
*), GFP_KERNEL
);
1902 if (!cc
->cipher_tfm
.tfms
)
1905 cc
->cipher_tfm
.tfms_aead
[0] = crypto_alloc_aead(ciphermode
, 0, 0);
1906 if (IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
1907 err
= PTR_ERR(cc
->cipher_tfm
.tfms_aead
[0]);
1908 crypt_free_tfms(cc
);
1915 static int crypt_alloc_tfms(struct crypt_config
*cc
, char *ciphermode
)
1917 if (crypt_integrity_aead(cc
))
1918 return crypt_alloc_tfms_aead(cc
, ciphermode
);
1920 return crypt_alloc_tfms_skcipher(cc
, ciphermode
);
1923 static unsigned crypt_subkey_size(struct crypt_config
*cc
)
1925 return (cc
->key_size
- cc
->key_extra_size
) >> ilog2(cc
->tfms_count
);
1928 static unsigned crypt_authenckey_size(struct crypt_config
*cc
)
1930 return crypt_subkey_size(cc
) + RTA_SPACE(sizeof(struct crypto_authenc_key_param
));
1934 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
1935 * the key must be for some reason in special format.
1936 * This funcion converts cc->key to this special format.
1938 static void crypt_copy_authenckey(char *p
, const void *key
,
1939 unsigned enckeylen
, unsigned authkeylen
)
1941 struct crypto_authenc_key_param
*param
;
1944 rta
= (struct rtattr
*)p
;
1945 param
= RTA_DATA(rta
);
1946 param
->enckeylen
= cpu_to_be32(enckeylen
);
1947 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
1948 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
1949 p
+= RTA_SPACE(sizeof(*param
));
1950 memcpy(p
, key
+ enckeylen
, authkeylen
);
1952 memcpy(p
, key
, enckeylen
);
1955 static int crypt_setkey(struct crypt_config
*cc
)
1957 unsigned subkey_size
;
1960 /* Ignore extra keys (which are used for IV etc) */
1961 subkey_size
= crypt_subkey_size(cc
);
1963 if (crypt_integrity_hmac(cc
))
1964 crypt_copy_authenckey(cc
->authenc_key
, cc
->key
,
1965 subkey_size
- cc
->key_mac_size
,
1967 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1968 if (crypt_integrity_hmac(cc
))
1969 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
1970 cc
->authenc_key
, crypt_authenckey_size(cc
));
1971 else if (crypt_integrity_aead(cc
))
1972 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
1973 cc
->key
+ (i
* subkey_size
),
1976 r
= crypto_skcipher_setkey(cc
->cipher_tfm
.tfms
[i
],
1977 cc
->key
+ (i
* subkey_size
),
1983 if (crypt_integrity_hmac(cc
))
1984 memzero_explicit(cc
->authenc_key
, crypt_authenckey_size(cc
));
1991 static bool contains_whitespace(const char *str
)
1994 if (isspace(*str
++))
1999 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
2001 char *new_key_string
, *key_desc
;
2004 const struct user_key_payload
*ukp
;
2007 * Reject key_string with whitespace. dm core currently lacks code for
2008 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2010 if (contains_whitespace(key_string
)) {
2011 DMERR("whitespace chars not allowed in key string");
2015 /* look for next ':' separating key_type from key_description */
2016 key_desc
= strpbrk(key_string
, ":");
2017 if (!key_desc
|| key_desc
== key_string
|| !strlen(key_desc
+ 1))
2020 if (strncmp(key_string
, "logon:", key_desc
- key_string
+ 1) &&
2021 strncmp(key_string
, "user:", key_desc
- key_string
+ 1))
2024 new_key_string
= kstrdup(key_string
, GFP_KERNEL
);
2025 if (!new_key_string
)
2028 key
= request_key(key_string
[0] == 'l' ? &key_type_logon
: &key_type_user
,
2029 key_desc
+ 1, NULL
);
2031 kzfree(new_key_string
);
2032 return PTR_ERR(key
);
2035 down_read(&key
->sem
);
2037 ukp
= user_key_payload_locked(key
);
2041 kzfree(new_key_string
);
2042 return -EKEYREVOKED
;
2045 if (cc
->key_size
!= ukp
->datalen
) {
2048 kzfree(new_key_string
);
2052 memcpy(cc
->key
, ukp
->data
, cc
->key_size
);
2057 /* clear the flag since following operations may invalidate previously valid key */
2058 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2060 ret
= crypt_setkey(cc
);
2062 /* wipe the kernel key payload copy in each case */
2063 memset(cc
->key
, 0, cc
->key_size
* sizeof(u8
));
2066 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2067 kzfree(cc
->key_string
);
2068 cc
->key_string
= new_key_string
;
2070 kzfree(new_key_string
);
2075 static int get_key_size(char **key_string
)
2080 if (*key_string
[0] != ':')
2081 return strlen(*key_string
) >> 1;
2083 /* look for next ':' in key string */
2084 colon
= strpbrk(*key_string
+ 1, ":");
2088 if (sscanf(*key_string
+ 1, "%u%c", &ret
, &dummy
) != 2 || dummy
!= ':')
2091 *key_string
= colon
;
2093 /* remaining key string should be :<logon|user>:<key_desc> */
2100 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
2105 static int get_key_size(char **key_string
)
2107 return (*key_string
[0] == ':') ? -EINVAL
: strlen(*key_string
) >> 1;
2112 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
2115 int key_string_len
= strlen(key
);
2117 /* Hyphen (which gives a key_size of zero) means there is no key. */
2118 if (!cc
->key_size
&& strcmp(key
, "-"))
2121 /* ':' means the key is in kernel keyring, short-circuit normal key processing */
2122 if (key
[0] == ':') {
2123 r
= crypt_set_keyring_key(cc
, key
+ 1);
2127 /* clear the flag since following operations may invalidate previously valid key */
2128 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2130 /* wipe references to any kernel keyring key */
2131 kzfree(cc
->key_string
);
2132 cc
->key_string
= NULL
;
2134 if (cc
->key_size
&& crypt_decode_key(cc
->key
, key
, cc
->key_size
) < 0)
2137 r
= crypt_setkey(cc
);
2139 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2142 /* Hex key string not needed after here, so wipe it. */
2143 memset(key
, '0', key_string_len
);
2148 static int crypt_wipe_key(struct crypt_config
*cc
)
2150 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2151 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
2152 kzfree(cc
->key_string
);
2153 cc
->key_string
= NULL
;
2155 return crypt_setkey(cc
);
2158 static void crypt_dtr(struct dm_target
*ti
)
2160 struct crypt_config
*cc
= ti
->private;
2167 if (cc
->write_thread
)
2168 kthread_stop(cc
->write_thread
);
2171 destroy_workqueue(cc
->io_queue
);
2172 if (cc
->crypt_queue
)
2173 destroy_workqueue(cc
->crypt_queue
);
2175 crypt_free_tfms(cc
);
2178 bioset_free(cc
->bs
);
2180 mempool_destroy(cc
->page_pool
);
2181 mempool_destroy(cc
->req_pool
);
2182 mempool_destroy(cc
->tag_pool
);
2184 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
2185 cc
->iv_gen_ops
->dtr(cc
);
2188 dm_put_device(ti
, cc
->dev
);
2191 kzfree(cc
->cipher_string
);
2192 kzfree(cc
->key_string
);
2193 kzfree(cc
->cipher_auth
);
2194 kzfree(cc
->authenc_key
);
2196 /* Must zero key material before freeing */
2200 static int crypt_ctr_ivmode(struct dm_target
*ti
, const char *ivmode
)
2202 struct crypt_config
*cc
= ti
->private;
2204 if (crypt_integrity_aead(cc
))
2205 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
2207 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
2210 /* at least a 64 bit sector number should fit in our buffer */
2211 cc
->iv_size
= max(cc
->iv_size
,
2212 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
2214 DMWARN("Selected cipher does not support IVs");
2218 /* Choose ivmode, see comments at iv code. */
2220 cc
->iv_gen_ops
= NULL
;
2221 else if (strcmp(ivmode
, "plain") == 0)
2222 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
2223 else if (strcmp(ivmode
, "plain64") == 0)
2224 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
2225 else if (strcmp(ivmode
, "essiv") == 0)
2226 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
2227 else if (strcmp(ivmode
, "benbi") == 0)
2228 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
2229 else if (strcmp(ivmode
, "null") == 0)
2230 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
2231 else if (strcmp(ivmode
, "lmk") == 0) {
2232 cc
->iv_gen_ops
= &crypt_iv_lmk_ops
;
2234 * Version 2 and 3 is recognised according
2235 * to length of provided multi-key string.
2236 * If present (version 3), last key is used as IV seed.
2237 * All keys (including IV seed) are always the same size.
2239 if (cc
->key_size
% cc
->key_parts
) {
2241 cc
->key_extra_size
= cc
->key_size
/ cc
->key_parts
;
2243 } else if (strcmp(ivmode
, "tcw") == 0) {
2244 cc
->iv_gen_ops
= &crypt_iv_tcw_ops
;
2245 cc
->key_parts
+= 2; /* IV + whitening */
2246 cc
->key_extra_size
= cc
->iv_size
+ TCW_WHITENING_SIZE
;
2247 } else if (strcmp(ivmode
, "random") == 0) {
2248 cc
->iv_gen_ops
= &crypt_iv_random_ops
;
2249 /* Need storage space in integrity fields. */
2250 cc
->integrity_iv_size
= cc
->iv_size
;
2252 ti
->error
= "Invalid IV mode";
2260 * Workaround to parse cipher algorithm from crypto API spec.
2261 * The cc->cipher is currently used only in ESSIV.
2262 * This should be probably done by crypto-api calls (once available...)
2264 static int crypt_ctr_blkdev_cipher(struct crypt_config
*cc
)
2266 const char *alg_name
= NULL
;
2269 if (crypt_integrity_aead(cc
)) {
2270 alg_name
= crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc
)));
2273 if (crypt_integrity_hmac(cc
)) {
2274 alg_name
= strchr(alg_name
, ',');
2280 alg_name
= crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc
)));
2285 start
= strchr(alg_name
, '(');
2286 end
= strchr(alg_name
, ')');
2288 if (!start
&& !end
) {
2289 cc
->cipher
= kstrdup(alg_name
, GFP_KERNEL
);
2290 return cc
->cipher
? 0 : -ENOMEM
;
2293 if (!start
|| !end
|| ++start
>= end
)
2296 cc
->cipher
= kzalloc(end
- start
+ 1, GFP_KERNEL
);
2300 strncpy(cc
->cipher
, start
, end
- start
);
2306 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2307 * The HMAC is needed to calculate tag size (HMAC digest size).
2308 * This should be probably done by crypto-api calls (once available...)
2310 static int crypt_ctr_auth_cipher(struct crypt_config
*cc
, char *cipher_api
)
2312 char *start
, *end
, *mac_alg
= NULL
;
2313 struct crypto_ahash
*mac
;
2315 if (!strstarts(cipher_api
, "authenc("))
2318 start
= strchr(cipher_api
, '(');
2319 end
= strchr(cipher_api
, ',');
2320 if (!start
|| !end
|| ++start
> end
)
2323 mac_alg
= kzalloc(end
- start
+ 1, GFP_KERNEL
);
2326 strncpy(mac_alg
, start
, end
- start
);
2328 mac
= crypto_alloc_ahash(mac_alg
, 0, 0);
2332 return PTR_ERR(mac
);
2334 cc
->key_mac_size
= crypto_ahash_digestsize(mac
);
2335 crypto_free_ahash(mac
);
2337 cc
->authenc_key
= kmalloc(crypt_authenckey_size(cc
), GFP_KERNEL
);
2338 if (!cc
->authenc_key
)
2344 static int crypt_ctr_cipher_new(struct dm_target
*ti
, char *cipher_in
, char *key
,
2345 char **ivmode
, char **ivopts
)
2347 struct crypt_config
*cc
= ti
->private;
2348 char *tmp
, *cipher_api
;
2354 * New format (capi: prefix)
2355 * capi:cipher_api_spec-iv:ivopts
2357 tmp
= &cipher_in
[strlen("capi:")];
2358 cipher_api
= strsep(&tmp
, "-");
2359 *ivmode
= strsep(&tmp
, ":");
2362 if (*ivmode
&& !strcmp(*ivmode
, "lmk"))
2363 cc
->tfms_count
= 64;
2365 cc
->key_parts
= cc
->tfms_count
;
2367 /* Allocate cipher */
2368 ret
= crypt_alloc_tfms(cc
, cipher_api
);
2370 ti
->error
= "Error allocating crypto tfm";
2374 /* Alloc AEAD, can be used only in new format. */
2375 if (crypt_integrity_aead(cc
)) {
2376 ret
= crypt_ctr_auth_cipher(cc
, cipher_api
);
2378 ti
->error
= "Invalid AEAD cipher spec";
2381 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
2383 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
2385 ret
= crypt_ctr_blkdev_cipher(cc
);
2387 ti
->error
= "Cannot allocate cipher string";
2394 static int crypt_ctr_cipher_old(struct dm_target
*ti
, char *cipher_in
, char *key
,
2395 char **ivmode
, char **ivopts
)
2397 struct crypt_config
*cc
= ti
->private;
2398 char *tmp
, *cipher
, *chainmode
, *keycount
;
2399 char *cipher_api
= NULL
;
2403 if (strchr(cipher_in
, '(') || crypt_integrity_aead(cc
)) {
2404 ti
->error
= "Bad cipher specification";
2409 * Legacy dm-crypt cipher specification
2410 * cipher[:keycount]-mode-iv:ivopts
2413 keycount
= strsep(&tmp
, "-");
2414 cipher
= strsep(&keycount
, ":");
2418 else if (sscanf(keycount
, "%u%c", &cc
->tfms_count
, &dummy
) != 1 ||
2419 !is_power_of_2(cc
->tfms_count
)) {
2420 ti
->error
= "Bad cipher key count specification";
2423 cc
->key_parts
= cc
->tfms_count
;
2425 cc
->cipher
= kstrdup(cipher
, GFP_KERNEL
);
2429 chainmode
= strsep(&tmp
, "-");
2430 *ivopts
= strsep(&tmp
, "-");
2431 *ivmode
= strsep(&*ivopts
, ":");
2434 DMWARN("Ignoring unexpected additional cipher options");
2437 * For compatibility with the original dm-crypt mapping format, if
2438 * only the cipher name is supplied, use cbc-plain.
2440 if (!chainmode
|| (!strcmp(chainmode
, "plain") && !*ivmode
)) {
2445 if (strcmp(chainmode
, "ecb") && !*ivmode
) {
2446 ti
->error
= "IV mechanism required";
2450 cipher_api
= kmalloc(CRYPTO_MAX_ALG_NAME
, GFP_KERNEL
);
2454 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
2455 "%s(%s)", chainmode
, cipher
);
2461 /* Allocate cipher */
2462 ret
= crypt_alloc_tfms(cc
, cipher_api
);
2464 ti
->error
= "Error allocating crypto tfm";
2471 ti
->error
= "Cannot allocate cipher strings";
2475 static int crypt_ctr_cipher(struct dm_target
*ti
, char *cipher_in
, char *key
)
2477 struct crypt_config
*cc
= ti
->private;
2478 char *ivmode
= NULL
, *ivopts
= NULL
;
2481 cc
->cipher_string
= kstrdup(cipher_in
, GFP_KERNEL
);
2482 if (!cc
->cipher_string
) {
2483 ti
->error
= "Cannot allocate cipher strings";
2487 if (strstarts(cipher_in
, "capi:"))
2488 ret
= crypt_ctr_cipher_new(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
2490 ret
= crypt_ctr_cipher_old(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
2495 ret
= crypt_ctr_ivmode(ti
, ivmode
);
2499 /* Initialize and set key */
2500 ret
= crypt_set_key(cc
, key
);
2502 ti
->error
= "Error decoding and setting key";
2507 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
) {
2508 ret
= cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
);
2510 ti
->error
= "Error creating IV";
2515 /* Initialize IV (set keys for ESSIV etc) */
2516 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
) {
2517 ret
= cc
->iv_gen_ops
->init(cc
);
2519 ti
->error
= "Error initialising IV";
2527 static int crypt_ctr_optional(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2529 struct crypt_config
*cc
= ti
->private;
2530 struct dm_arg_set as
;
2531 static struct dm_arg _args
[] = {
2532 {0, 6, "Invalid number of feature args"},
2534 unsigned int opt_params
, val
;
2535 const char *opt_string
, *sval
;
2539 /* Optional parameters */
2543 ret
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
2547 while (opt_params
--) {
2548 opt_string
= dm_shift_arg(&as
);
2550 ti
->error
= "Not enough feature arguments";
2554 if (!strcasecmp(opt_string
, "allow_discards"))
2555 ti
->num_discard_bios
= 1;
2557 else if (!strcasecmp(opt_string
, "same_cpu_crypt"))
2558 set_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
2560 else if (!strcasecmp(opt_string
, "submit_from_crypt_cpus"))
2561 set_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
2562 else if (sscanf(opt_string
, "integrity:%u:", &val
) == 1) {
2563 if (val
== 0 || val
> MAX_TAG_SIZE
) {
2564 ti
->error
= "Invalid integrity arguments";
2567 cc
->on_disk_tag_size
= val
;
2568 sval
= strchr(opt_string
+ strlen("integrity:"), ':') + 1;
2569 if (!strcasecmp(sval
, "aead")) {
2570 set_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
2571 } else if (strcasecmp(sval
, "none")) {
2572 ti
->error
= "Unknown integrity profile";
2576 cc
->cipher_auth
= kstrdup(sval
, GFP_KERNEL
);
2577 if (!cc
->cipher_auth
)
2579 } else if (sscanf(opt_string
, "sector_size:%hu%c", &cc
->sector_size
, &dummy
) == 1) {
2580 if (cc
->sector_size
< (1 << SECTOR_SHIFT
) ||
2581 cc
->sector_size
> 4096 ||
2582 (cc
->sector_size
& (cc
->sector_size
- 1))) {
2583 ti
->error
= "Invalid feature value for sector_size";
2586 cc
->sector_shift
= __ffs(cc
->sector_size
) - SECTOR_SHIFT
;
2587 } else if (!strcasecmp(opt_string
, "iv_large_sectors"))
2588 set_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
2590 ti
->error
= "Invalid feature arguments";
2599 * Construct an encryption mapping:
2600 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
2602 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2604 struct crypt_config
*cc
;
2606 unsigned int align_mask
;
2607 unsigned long long tmpll
;
2609 size_t iv_size_padding
, additional_req_size
;
2613 ti
->error
= "Not enough arguments";
2617 key_size
= get_key_size(&argv
[1]);
2619 ti
->error
= "Cannot parse key size";
2623 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
2625 ti
->error
= "Cannot allocate encryption context";
2628 cc
->key_size
= key_size
;
2629 cc
->sector_size
= (1 << SECTOR_SHIFT
);
2630 cc
->sector_shift
= 0;
2634 /* Optional parameters need to be read before cipher constructor */
2636 ret
= crypt_ctr_optional(ti
, argc
- 5, &argv
[5]);
2641 ret
= crypt_ctr_cipher(ti
, argv
[0], argv
[1]);
2645 if (crypt_integrity_aead(cc
)) {
2646 cc
->dmreq_start
= sizeof(struct aead_request
);
2647 cc
->dmreq_start
+= crypto_aead_reqsize(any_tfm_aead(cc
));
2648 align_mask
= crypto_aead_alignmask(any_tfm_aead(cc
));
2650 cc
->dmreq_start
= sizeof(struct skcipher_request
);
2651 cc
->dmreq_start
+= crypto_skcipher_reqsize(any_tfm(cc
));
2652 align_mask
= crypto_skcipher_alignmask(any_tfm(cc
));
2654 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, __alignof__(struct dm_crypt_request
));
2656 if (align_mask
< CRYPTO_MINALIGN
) {
2657 /* Allocate the padding exactly */
2658 iv_size_padding
= -(cc
->dmreq_start
+ sizeof(struct dm_crypt_request
))
2662 * If the cipher requires greater alignment than kmalloc
2663 * alignment, we don't know the exact position of the
2664 * initialization vector. We must assume worst case.
2666 iv_size_padding
= align_mask
;
2671 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
2672 additional_req_size
= sizeof(struct dm_crypt_request
) +
2673 iv_size_padding
+ cc
->iv_size
+
2676 sizeof(unsigned int);
2678 cc
->req_pool
= mempool_create_kmalloc_pool(MIN_IOS
, cc
->dmreq_start
+ additional_req_size
);
2679 if (!cc
->req_pool
) {
2680 ti
->error
= "Cannot allocate crypt request mempool";
2684 cc
->per_bio_data_size
= ti
->per_io_data_size
=
2685 ALIGN(sizeof(struct dm_crypt_io
) + cc
->dmreq_start
+ additional_req_size
,
2686 ARCH_KMALLOC_MINALIGN
);
2688 cc
->page_pool
= mempool_create_page_pool(BIO_MAX_PAGES
, 0);
2689 if (!cc
->page_pool
) {
2690 ti
->error
= "Cannot allocate page mempool";
2694 cc
->bs
= bioset_create(MIN_IOS
, 0);
2696 ti
->error
= "Cannot allocate crypt bioset";
2700 mutex_init(&cc
->bio_alloc_lock
);
2703 if ((sscanf(argv
[2], "%llu%c", &tmpll
, &dummy
) != 1) ||
2704 (tmpll
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1))) {
2705 ti
->error
= "Invalid iv_offset sector";
2708 cc
->iv_offset
= tmpll
;
2710 ret
= dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
);
2712 ti
->error
= "Device lookup failed";
2717 if (sscanf(argv
[4], "%llu%c", &tmpll
, &dummy
) != 1) {
2718 ti
->error
= "Invalid device sector";
2723 if (crypt_integrity_aead(cc
) || cc
->integrity_iv_size
) {
2724 ret
= crypt_integrity_ctr(cc
, ti
);
2728 cc
->tag_pool_max_sectors
= POOL_ENTRY_SIZE
/ cc
->on_disk_tag_size
;
2729 if (!cc
->tag_pool_max_sectors
)
2730 cc
->tag_pool_max_sectors
= 1;
2732 cc
->tag_pool
= mempool_create_kmalloc_pool(MIN_IOS
,
2733 cc
->tag_pool_max_sectors
* cc
->on_disk_tag_size
);
2734 if (!cc
->tag_pool
) {
2735 ti
->error
= "Cannot allocate integrity tags mempool";
2741 cc
->io_queue
= alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM
, 1);
2742 if (!cc
->io_queue
) {
2743 ti
->error
= "Couldn't create kcryptd io queue";
2747 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
2748 cc
->crypt_queue
= alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
, 1);
2750 cc
->crypt_queue
= alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
| WQ_UNBOUND
,
2752 if (!cc
->crypt_queue
) {
2753 ti
->error
= "Couldn't create kcryptd queue";
2757 init_waitqueue_head(&cc
->write_thread_wait
);
2758 cc
->write_tree
= RB_ROOT
;
2760 cc
->write_thread
= kthread_create(dmcrypt_write
, cc
, "dmcrypt_write");
2761 if (IS_ERR(cc
->write_thread
)) {
2762 ret
= PTR_ERR(cc
->write_thread
);
2763 cc
->write_thread
= NULL
;
2764 ti
->error
= "Couldn't spawn write thread";
2767 wake_up_process(cc
->write_thread
);
2769 ti
->num_flush_bios
= 1;
2770 ti
->discard_zeroes_data_unsupported
= true;
2779 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
)
2781 struct dm_crypt_io
*io
;
2782 struct crypt_config
*cc
= ti
->private;
2785 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
2786 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
2787 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
2789 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
||
2790 bio_op(bio
) == REQ_OP_DISCARD
)) {
2791 bio
->bi_bdev
= cc
->dev
->bdev
;
2792 if (bio_sectors(bio
))
2793 bio
->bi_iter
.bi_sector
= cc
->start
+
2794 dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
2795 return DM_MAPIO_REMAPPED
;
2799 * Check if bio is too large, split as needed.
2801 if (unlikely(bio
->bi_iter
.bi_size
> (BIO_MAX_PAGES
<< PAGE_SHIFT
)) &&
2802 (bio_data_dir(bio
) == WRITE
|| cc
->on_disk_tag_size
))
2803 dm_accept_partial_bio(bio
, ((BIO_MAX_PAGES
<< PAGE_SHIFT
) >> SECTOR_SHIFT
));
2806 * Ensure that bio is a multiple of internal sector encryption size
2807 * and is aligned to this size as defined in IO hints.
2809 if (unlikely((bio
->bi_iter
.bi_sector
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1)) != 0))
2812 if (unlikely(bio
->bi_iter
.bi_size
& (cc
->sector_size
- 1)))
2815 io
= dm_per_bio_data(bio
, cc
->per_bio_data_size
);
2816 crypt_io_init(io
, cc
, bio
, dm_target_offset(ti
, bio
->bi_iter
.bi_sector
));
2818 if (cc
->on_disk_tag_size
) {
2819 unsigned tag_len
= cc
->on_disk_tag_size
* bio_sectors(bio
);
2821 if (unlikely(tag_len
> KMALLOC_MAX_SIZE
) ||
2822 unlikely(!(io
->integrity_metadata
= kzalloc(tag_len
,
2823 GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
)))) {
2824 if (bio_sectors(bio
) > cc
->tag_pool_max_sectors
)
2825 dm_accept_partial_bio(bio
, cc
->tag_pool_max_sectors
);
2826 io
->integrity_metadata
= mempool_alloc(cc
->tag_pool
, GFP_NOIO
);
2827 io
->integrity_metadata_from_pool
= true;
2828 memset(io
->integrity_metadata
, 0, cc
->tag_pool_max_sectors
* (1 << SECTOR_SHIFT
));
2832 if (crypt_integrity_aead(cc
))
2833 io
->ctx
.r
.req_aead
= (struct aead_request
*)(io
+ 1);
2835 io
->ctx
.r
.req
= (struct skcipher_request
*)(io
+ 1);
2837 if (bio_data_dir(io
->base_bio
) == READ
) {
2838 if (kcryptd_io_read(io
, GFP_NOWAIT
))
2839 kcryptd_queue_read(io
);
2841 kcryptd_queue_crypt(io
);
2843 return DM_MAPIO_SUBMITTED
;
2846 static void crypt_status(struct dm_target
*ti
, status_type_t type
,
2847 unsigned status_flags
, char *result
, unsigned maxlen
)
2849 struct crypt_config
*cc
= ti
->private;
2851 int num_feature_args
= 0;
2854 case STATUSTYPE_INFO
:
2858 case STATUSTYPE_TABLE
:
2859 DMEMIT("%s ", cc
->cipher_string
);
2861 if (cc
->key_size
> 0) {
2863 DMEMIT(":%u:%s", cc
->key_size
, cc
->key_string
);
2865 for (i
= 0; i
< cc
->key_size
; i
++)
2866 DMEMIT("%02x", cc
->key
[i
]);
2870 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
2871 cc
->dev
->name
, (unsigned long long)cc
->start
);
2873 num_feature_args
+= !!ti
->num_discard_bios
;
2874 num_feature_args
+= test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
2875 num_feature_args
+= test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
2876 num_feature_args
+= cc
->sector_size
!= (1 << SECTOR_SHIFT
);
2877 num_feature_args
+= test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
2878 if (cc
->on_disk_tag_size
)
2880 if (num_feature_args
) {
2881 DMEMIT(" %d", num_feature_args
);
2882 if (ti
->num_discard_bios
)
2883 DMEMIT(" allow_discards");
2884 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
2885 DMEMIT(" same_cpu_crypt");
2886 if (test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
))
2887 DMEMIT(" submit_from_crypt_cpus");
2888 if (cc
->on_disk_tag_size
)
2889 DMEMIT(" integrity:%u:%s", cc
->on_disk_tag_size
, cc
->cipher_auth
);
2890 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
))
2891 DMEMIT(" sector_size:%d", cc
->sector_size
);
2892 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
2893 DMEMIT(" iv_large_sectors");
2900 static void crypt_postsuspend(struct dm_target
*ti
)
2902 struct crypt_config
*cc
= ti
->private;
2904 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
2907 static int crypt_preresume(struct dm_target
*ti
)
2909 struct crypt_config
*cc
= ti
->private;
2911 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
2912 DMERR("aborting resume - crypt key is not set.");
2919 static void crypt_resume(struct dm_target
*ti
)
2921 struct crypt_config
*cc
= ti
->private;
2923 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
2926 /* Message interface
2930 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
2932 struct crypt_config
*cc
= ti
->private;
2933 int key_size
, ret
= -EINVAL
;
2938 if (!strcasecmp(argv
[0], "key")) {
2939 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
2940 DMWARN("not suspended during key manipulation.");
2943 if (argc
== 3 && !strcasecmp(argv
[1], "set")) {
2944 /* The key size may not be changed. */
2945 key_size
= get_key_size(&argv
[2]);
2946 if (key_size
< 0 || cc
->key_size
!= key_size
) {
2947 memset(argv
[2], '0', strlen(argv
[2]));
2951 ret
= crypt_set_key(cc
, argv
[2]);
2954 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
2955 ret
= cc
->iv_gen_ops
->init(cc
);
2958 if (argc
== 2 && !strcasecmp(argv
[1], "wipe")) {
2959 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
2960 ret
= cc
->iv_gen_ops
->wipe(cc
);
2964 return crypt_wipe_key(cc
);
2969 DMWARN("unrecognised message received.");
2973 static int crypt_iterate_devices(struct dm_target
*ti
,
2974 iterate_devices_callout_fn fn
, void *data
)
2976 struct crypt_config
*cc
= ti
->private;
2978 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
2981 static void crypt_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
2983 struct crypt_config
*cc
= ti
->private;
2986 * Unfortunate constraint that is required to avoid the potential
2987 * for exceeding underlying device's max_segments limits -- due to
2988 * crypt_alloc_buffer() possibly allocating pages for the encryption
2989 * bio that are not as physically contiguous as the original bio.
2991 limits
->max_segment_size
= PAGE_SIZE
;
2993 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
2994 limits
->logical_block_size
= cc
->sector_size
;
2995 limits
->physical_block_size
= cc
->sector_size
;
2996 blk_limits_io_min(limits
, cc
->sector_size
);
3000 static struct target_type crypt_target
= {
3002 .version
= {1, 17, 0},
3003 .module
= THIS_MODULE
,
3007 .status
= crypt_status
,
3008 .postsuspend
= crypt_postsuspend
,
3009 .preresume
= crypt_preresume
,
3010 .resume
= crypt_resume
,
3011 .message
= crypt_message
,
3012 .iterate_devices
= crypt_iterate_devices
,
3013 .io_hints
= crypt_io_hints
,
3016 static int __init
dm_crypt_init(void)
3020 r
= dm_register_target(&crypt_target
);
3022 DMERR("register failed %d", r
);
3027 static void __exit
dm_crypt_exit(void)
3029 dm_unregister_target(&crypt_target
);
3032 module_init(dm_crypt_init
);
3033 module_exit(dm_crypt_exit
);
3035 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3036 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
3037 MODULE_LICENSE("GPL");