2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
7 * This file is released under the GPL.
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/key.h>
16 #include <linux/bio.h>
17 #include <linux/blkdev.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/crypto.h>
21 #include <linux/workqueue.h>
22 #include <linux/kthread.h>
23 #include <linux/backing-dev.h>
24 #include <linux/atomic.h>
25 #include <linux/scatterlist.h>
26 #include <linux/rbtree.h>
27 #include <linux/ctype.h>
29 #include <asm/unaligned.h>
30 #include <crypto/hash.h>
31 #include <crypto/md5.h>
32 #include <crypto/algapi.h>
33 #include <crypto/skcipher.h>
34 #include <crypto/aead.h>
35 #include <crypto/authenc.h>
36 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
37 #include <keys/user-type.h>
39 #include <linux/device-mapper.h>
41 #define DM_MSG_PREFIX "crypt"
44 * context holding the current state of a multi-part conversion
46 struct convert_context
{
47 struct completion restart
;
50 struct bvec_iter iter_in
;
51 struct bvec_iter iter_out
;
55 struct skcipher_request
*req
;
56 struct aead_request
*req_aead
;
62 * per bio private data
65 struct crypt_config
*cc
;
67 u8
*integrity_metadata
;
68 bool integrity_metadata_from_pool
;
69 struct work_struct work
;
71 struct convert_context ctx
;
77 struct rb_node rb_node
;
78 } CRYPTO_MINALIGN_ATTR
;
80 struct dm_crypt_request
{
81 struct convert_context
*ctx
;
82 struct scatterlist sg_in
[4];
83 struct scatterlist sg_out
[4];
89 struct crypt_iv_operations
{
90 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
92 void (*dtr
)(struct crypt_config
*cc
);
93 int (*init
)(struct crypt_config
*cc
);
94 int (*wipe
)(struct crypt_config
*cc
);
95 int (*generator
)(struct crypt_config
*cc
, u8
*iv
,
96 struct dm_crypt_request
*dmreq
);
97 int (*post
)(struct crypt_config
*cc
, u8
*iv
,
98 struct dm_crypt_request
*dmreq
);
101 struct iv_essiv_private
{
102 struct crypto_ahash
*hash_tfm
;
106 struct iv_benbi_private
{
110 #define LMK_SEED_SIZE 64 /* hash + 0 */
111 struct iv_lmk_private
{
112 struct crypto_shash
*hash_tfm
;
116 #define TCW_WHITENING_SIZE 16
117 struct iv_tcw_private
{
118 struct crypto_shash
*crc32_tfm
;
124 * Crypt: maps a linear range of a block device
125 * and encrypts / decrypts at the same time.
127 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
,
128 DM_CRYPT_SAME_CPU
, DM_CRYPT_NO_OFFLOAD
};
131 CRYPT_MODE_INTEGRITY_AEAD
, /* Use authenticated mode for cihper */
132 CRYPT_IV_LARGE_SECTORS
, /* Calculate IV from sector_size, not 512B sectors */
136 * The fields in here must be read only after initialization.
138 struct crypt_config
{
143 * pool for per bio private data, crypto requests,
144 * encryption requeusts/buffer pages and integrity tags
147 mempool_t
*page_pool
;
149 unsigned tag_pool_max_sectors
;
152 struct mutex bio_alloc_lock
;
154 struct workqueue_struct
*io_queue
;
155 struct workqueue_struct
*crypt_queue
;
157 struct task_struct
*write_thread
;
158 wait_queue_head_t write_thread_wait
;
159 struct rb_root write_tree
;
166 const struct crypt_iv_operations
*iv_gen_ops
;
168 struct iv_essiv_private essiv
;
169 struct iv_benbi_private benbi
;
170 struct iv_lmk_private lmk
;
171 struct iv_tcw_private tcw
;
174 unsigned int iv_size
;
175 unsigned short int sector_size
;
176 unsigned char sector_shift
;
178 /* ESSIV: struct crypto_cipher *essiv_tfm */
181 struct crypto_skcipher
**tfms
;
182 struct crypto_aead
**tfms_aead
;
185 unsigned long cipher_flags
;
188 * Layout of each crypto request:
190 * struct skcipher_request
193 * struct dm_crypt_request
197 * The padding is added so that dm_crypt_request and the IV are
200 unsigned int dmreq_start
;
202 unsigned int per_bio_data_size
;
205 unsigned int key_size
;
206 unsigned int key_parts
; /* independent parts in key buffer */
207 unsigned int key_extra_size
; /* additional keys length */
208 unsigned int key_mac_size
; /* MAC key size for authenc(...) */
210 unsigned int integrity_tag_size
;
211 unsigned int integrity_iv_size
;
212 unsigned int on_disk_tag_size
;
214 u8
*authenc_key
; /* space for keys in authenc() format (if used) */
219 #define MAX_TAG_SIZE 480
220 #define POOL_ENTRY_SIZE 512
222 static void clone_init(struct dm_crypt_io
*, struct bio
*);
223 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
224 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
225 struct scatterlist
*sg
);
228 * Use this to access cipher attributes that are independent of the key.
230 static struct crypto_skcipher
*any_tfm(struct crypt_config
*cc
)
232 return cc
->cipher_tfm
.tfms
[0];
235 static struct crypto_aead
*any_tfm_aead(struct crypt_config
*cc
)
237 return cc
->cipher_tfm
.tfms_aead
[0];
241 * Different IV generation algorithms:
243 * plain: the initial vector is the 32-bit little-endian version of the sector
244 * number, padded with zeros if necessary.
246 * plain64: the initial vector is the 64-bit little-endian version of the sector
247 * number, padded with zeros if necessary.
249 * plain64be: the initial vector is the 64-bit big-endian version of the sector
250 * number, padded with zeros if necessary.
252 * essiv: "encrypted sector|salt initial vector", the sector number is
253 * encrypted with the bulk cipher using a salt as key. The salt
254 * should be derived from the bulk cipher's key via hashing.
256 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
257 * (needed for LRW-32-AES and possible other narrow block modes)
259 * null: the initial vector is always zero. Provides compatibility with
260 * obsolete loop_fish2 devices. Do not use for new devices.
262 * lmk: Compatible implementation of the block chaining mode used
263 * by the Loop-AES block device encryption system
264 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
265 * It operates on full 512 byte sectors and uses CBC
266 * with an IV derived from the sector number, the data and
267 * optionally extra IV seed.
268 * This means that after decryption the first block
269 * of sector must be tweaked according to decrypted data.
270 * Loop-AES can use three encryption schemes:
271 * version 1: is plain aes-cbc mode
272 * version 2: uses 64 multikey scheme with lmk IV generator
273 * version 3: the same as version 2 with additional IV seed
274 * (it uses 65 keys, last key is used as IV seed)
276 * tcw: Compatible implementation of the block chaining mode used
277 * by the TrueCrypt device encryption system (prior to version 4.1).
278 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
279 * It operates on full 512 byte sectors and uses CBC
280 * with an IV derived from initial key and the sector number.
281 * In addition, whitening value is applied on every sector, whitening
282 * is calculated from initial key, sector number and mixed using CRC32.
283 * Note that this encryption scheme is vulnerable to watermarking attacks
284 * and should be used for old compatible containers access only.
286 * plumb: unimplemented, see:
287 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
290 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
,
291 struct dm_crypt_request
*dmreq
)
293 memset(iv
, 0, cc
->iv_size
);
294 *(__le32
*)iv
= cpu_to_le32(dmreq
->iv_sector
& 0xffffffff);
299 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
300 struct dm_crypt_request
*dmreq
)
302 memset(iv
, 0, cc
->iv_size
);
303 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
308 static int crypt_iv_plain64be_gen(struct crypt_config
*cc
, u8
*iv
,
309 struct dm_crypt_request
*dmreq
)
311 memset(iv
, 0, cc
->iv_size
);
312 /* iv_size is at least of size u64; usually it is 16 bytes */
313 *(__be64
*)&iv
[cc
->iv_size
- sizeof(u64
)] = cpu_to_be64(dmreq
->iv_sector
);
318 /* Initialise ESSIV - compute salt but no local memory allocations */
319 static int crypt_iv_essiv_init(struct crypt_config
*cc
)
321 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
322 AHASH_REQUEST_ON_STACK(req
, essiv
->hash_tfm
);
323 struct scatterlist sg
;
324 struct crypto_cipher
*essiv_tfm
;
327 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
328 ahash_request_set_tfm(req
, essiv
->hash_tfm
);
329 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
, NULL
, NULL
);
330 ahash_request_set_crypt(req
, &sg
, essiv
->salt
, cc
->key_size
);
332 err
= crypto_ahash_digest(req
);
333 ahash_request_zero(req
);
337 essiv_tfm
= cc
->iv_private
;
339 err
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
,
340 crypto_ahash_digestsize(essiv
->hash_tfm
));
347 /* Wipe salt and reset key derived from volume key */
348 static int crypt_iv_essiv_wipe(struct crypt_config
*cc
)
350 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
351 unsigned salt_size
= crypto_ahash_digestsize(essiv
->hash_tfm
);
352 struct crypto_cipher
*essiv_tfm
;
355 memset(essiv
->salt
, 0, salt_size
);
357 essiv_tfm
= cc
->iv_private
;
358 r
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
, salt_size
);
365 /* Allocate the cipher for ESSIV */
366 static struct crypto_cipher
*alloc_essiv_cipher(struct crypt_config
*cc
,
367 struct dm_target
*ti
,
369 unsigned int saltsize
)
371 struct crypto_cipher
*essiv_tfm
;
374 /* Setup the essiv_tfm with the given salt */
375 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
376 if (IS_ERR(essiv_tfm
)) {
377 ti
->error
= "Error allocating crypto tfm for ESSIV";
381 if (crypto_cipher_blocksize(essiv_tfm
) != cc
->iv_size
) {
382 ti
->error
= "Block size of ESSIV cipher does "
383 "not match IV size of block cipher";
384 crypto_free_cipher(essiv_tfm
);
385 return ERR_PTR(-EINVAL
);
388 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
390 ti
->error
= "Failed to set key for ESSIV cipher";
391 crypto_free_cipher(essiv_tfm
);
398 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
400 struct crypto_cipher
*essiv_tfm
;
401 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
403 crypto_free_ahash(essiv
->hash_tfm
);
404 essiv
->hash_tfm
= NULL
;
409 essiv_tfm
= cc
->iv_private
;
412 crypto_free_cipher(essiv_tfm
);
414 cc
->iv_private
= NULL
;
417 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
420 struct crypto_cipher
*essiv_tfm
= NULL
;
421 struct crypto_ahash
*hash_tfm
= NULL
;
426 ti
->error
= "Digest algorithm missing for ESSIV mode";
430 /* Allocate hash algorithm */
431 hash_tfm
= crypto_alloc_ahash(opts
, 0, CRYPTO_ALG_ASYNC
);
432 if (IS_ERR(hash_tfm
)) {
433 ti
->error
= "Error initializing ESSIV hash";
434 err
= PTR_ERR(hash_tfm
);
438 salt
= kzalloc(crypto_ahash_digestsize(hash_tfm
), GFP_KERNEL
);
440 ti
->error
= "Error kmallocing salt storage in ESSIV";
445 cc
->iv_gen_private
.essiv
.salt
= salt
;
446 cc
->iv_gen_private
.essiv
.hash_tfm
= hash_tfm
;
448 essiv_tfm
= alloc_essiv_cipher(cc
, ti
, salt
,
449 crypto_ahash_digestsize(hash_tfm
));
450 if (IS_ERR(essiv_tfm
)) {
451 crypt_iv_essiv_dtr(cc
);
452 return PTR_ERR(essiv_tfm
);
454 cc
->iv_private
= essiv_tfm
;
459 if (hash_tfm
&& !IS_ERR(hash_tfm
))
460 crypto_free_ahash(hash_tfm
);
465 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
,
466 struct dm_crypt_request
*dmreq
)
468 struct crypto_cipher
*essiv_tfm
= cc
->iv_private
;
470 memset(iv
, 0, cc
->iv_size
);
471 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
472 crypto_cipher_encrypt_one(essiv_tfm
, iv
, iv
);
477 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
480 unsigned bs
= crypto_skcipher_blocksize(any_tfm(cc
));
483 /* we need to calculate how far we must shift the sector count
484 * to get the cipher block count, we use this shift in _gen */
486 if (1 << log
!= bs
) {
487 ti
->error
= "cypher blocksize is not a power of 2";
492 ti
->error
= "cypher blocksize is > 512";
496 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
501 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
505 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
,
506 struct dm_crypt_request
*dmreq
)
510 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
512 val
= cpu_to_be64(((u64
)dmreq
->iv_sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
513 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
518 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
,
519 struct dm_crypt_request
*dmreq
)
521 memset(iv
, 0, cc
->iv_size
);
526 static void crypt_iv_lmk_dtr(struct crypt_config
*cc
)
528 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
530 if (lmk
->hash_tfm
&& !IS_ERR(lmk
->hash_tfm
))
531 crypto_free_shash(lmk
->hash_tfm
);
532 lmk
->hash_tfm
= NULL
;
538 static int crypt_iv_lmk_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
541 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
543 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
544 ti
->error
= "Unsupported sector size for LMK";
548 lmk
->hash_tfm
= crypto_alloc_shash("md5", 0, 0);
549 if (IS_ERR(lmk
->hash_tfm
)) {
550 ti
->error
= "Error initializing LMK hash";
551 return PTR_ERR(lmk
->hash_tfm
);
554 /* No seed in LMK version 2 */
555 if (cc
->key_parts
== cc
->tfms_count
) {
560 lmk
->seed
= kzalloc(LMK_SEED_SIZE
, GFP_KERNEL
);
562 crypt_iv_lmk_dtr(cc
);
563 ti
->error
= "Error kmallocing seed storage in LMK";
570 static int crypt_iv_lmk_init(struct crypt_config
*cc
)
572 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
573 int subkey_size
= cc
->key_size
/ cc
->key_parts
;
575 /* LMK seed is on the position of LMK_KEYS + 1 key */
577 memcpy(lmk
->seed
, cc
->key
+ (cc
->tfms_count
* subkey_size
),
578 crypto_shash_digestsize(lmk
->hash_tfm
));
583 static int crypt_iv_lmk_wipe(struct crypt_config
*cc
)
585 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
588 memset(lmk
->seed
, 0, LMK_SEED_SIZE
);
593 static int crypt_iv_lmk_one(struct crypt_config
*cc
, u8
*iv
,
594 struct dm_crypt_request
*dmreq
,
597 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
598 SHASH_DESC_ON_STACK(desc
, lmk
->hash_tfm
);
599 struct md5_state md5state
;
603 desc
->tfm
= lmk
->hash_tfm
;
604 desc
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
606 r
= crypto_shash_init(desc
);
611 r
= crypto_shash_update(desc
, lmk
->seed
, LMK_SEED_SIZE
);
616 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
617 r
= crypto_shash_update(desc
, data
+ 16, 16 * 31);
621 /* Sector is cropped to 56 bits here */
622 buf
[0] = cpu_to_le32(dmreq
->iv_sector
& 0xFFFFFFFF);
623 buf
[1] = cpu_to_le32((((u64
)dmreq
->iv_sector
>> 32) & 0x00FFFFFF) | 0x80000000);
624 buf
[2] = cpu_to_le32(4024);
626 r
= crypto_shash_update(desc
, (u8
*)buf
, sizeof(buf
));
630 /* No MD5 padding here */
631 r
= crypto_shash_export(desc
, &md5state
);
635 for (i
= 0; i
< MD5_HASH_WORDS
; i
++)
636 __cpu_to_le32s(&md5state
.hash
[i
]);
637 memcpy(iv
, &md5state
.hash
, cc
->iv_size
);
642 static int crypt_iv_lmk_gen(struct crypt_config
*cc
, u8
*iv
,
643 struct dm_crypt_request
*dmreq
)
645 struct scatterlist
*sg
;
649 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
650 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
651 src
= kmap_atomic(sg_page(sg
));
652 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, src
+ sg
->offset
);
655 memset(iv
, 0, cc
->iv_size
);
660 static int crypt_iv_lmk_post(struct crypt_config
*cc
, u8
*iv
,
661 struct dm_crypt_request
*dmreq
)
663 struct scatterlist
*sg
;
667 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
)
670 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
671 dst
= kmap_atomic(sg_page(sg
));
672 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, dst
+ sg
->offset
);
674 /* Tweak the first block of plaintext sector */
676 crypto_xor(dst
+ sg
->offset
, iv
, cc
->iv_size
);
682 static void crypt_iv_tcw_dtr(struct crypt_config
*cc
)
684 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
686 kzfree(tcw
->iv_seed
);
688 kzfree(tcw
->whitening
);
689 tcw
->whitening
= NULL
;
691 if (tcw
->crc32_tfm
&& !IS_ERR(tcw
->crc32_tfm
))
692 crypto_free_shash(tcw
->crc32_tfm
);
693 tcw
->crc32_tfm
= NULL
;
696 static int crypt_iv_tcw_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
699 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
701 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
702 ti
->error
= "Unsupported sector size for TCW";
706 if (cc
->key_size
<= (cc
->iv_size
+ TCW_WHITENING_SIZE
)) {
707 ti
->error
= "Wrong key size for TCW";
711 tcw
->crc32_tfm
= crypto_alloc_shash("crc32", 0, 0);
712 if (IS_ERR(tcw
->crc32_tfm
)) {
713 ti
->error
= "Error initializing CRC32 in TCW";
714 return PTR_ERR(tcw
->crc32_tfm
);
717 tcw
->iv_seed
= kzalloc(cc
->iv_size
, GFP_KERNEL
);
718 tcw
->whitening
= kzalloc(TCW_WHITENING_SIZE
, GFP_KERNEL
);
719 if (!tcw
->iv_seed
|| !tcw
->whitening
) {
720 crypt_iv_tcw_dtr(cc
);
721 ti
->error
= "Error allocating seed storage in TCW";
728 static int crypt_iv_tcw_init(struct crypt_config
*cc
)
730 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
731 int key_offset
= cc
->key_size
- cc
->iv_size
- TCW_WHITENING_SIZE
;
733 memcpy(tcw
->iv_seed
, &cc
->key
[key_offset
], cc
->iv_size
);
734 memcpy(tcw
->whitening
, &cc
->key
[key_offset
+ cc
->iv_size
],
740 static int crypt_iv_tcw_wipe(struct crypt_config
*cc
)
742 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
744 memset(tcw
->iv_seed
, 0, cc
->iv_size
);
745 memset(tcw
->whitening
, 0, TCW_WHITENING_SIZE
);
750 static int crypt_iv_tcw_whitening(struct crypt_config
*cc
,
751 struct dm_crypt_request
*dmreq
,
754 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
755 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
756 u8 buf
[TCW_WHITENING_SIZE
];
757 SHASH_DESC_ON_STACK(desc
, tcw
->crc32_tfm
);
760 /* xor whitening with sector number */
761 memcpy(buf
, tcw
->whitening
, TCW_WHITENING_SIZE
);
762 crypto_xor(buf
, (u8
*)§or
, 8);
763 crypto_xor(&buf
[8], (u8
*)§or
, 8);
765 /* calculate crc32 for every 32bit part and xor it */
766 desc
->tfm
= tcw
->crc32_tfm
;
767 desc
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
768 for (i
= 0; i
< 4; i
++) {
769 r
= crypto_shash_init(desc
);
772 r
= crypto_shash_update(desc
, &buf
[i
* 4], 4);
775 r
= crypto_shash_final(desc
, &buf
[i
* 4]);
779 crypto_xor(&buf
[0], &buf
[12], 4);
780 crypto_xor(&buf
[4], &buf
[8], 4);
782 /* apply whitening (8 bytes) to whole sector */
783 for (i
= 0; i
< ((1 << SECTOR_SHIFT
) / 8); i
++)
784 crypto_xor(data
+ i
* 8, buf
, 8);
786 memzero_explicit(buf
, sizeof(buf
));
790 static int crypt_iv_tcw_gen(struct crypt_config
*cc
, u8
*iv
,
791 struct dm_crypt_request
*dmreq
)
793 struct scatterlist
*sg
;
794 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
795 __le64 sector
= cpu_to_le64(dmreq
->iv_sector
);
799 /* Remove whitening from ciphertext */
800 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
801 sg
= crypt_get_sg_data(cc
, dmreq
->sg_in
);
802 src
= kmap_atomic(sg_page(sg
));
803 r
= crypt_iv_tcw_whitening(cc
, dmreq
, src
+ sg
->offset
);
808 memcpy(iv
, tcw
->iv_seed
, cc
->iv_size
);
809 crypto_xor(iv
, (u8
*)§or
, 8);
811 crypto_xor(&iv
[8], (u8
*)§or
, cc
->iv_size
- 8);
816 static int crypt_iv_tcw_post(struct crypt_config
*cc
, u8
*iv
,
817 struct dm_crypt_request
*dmreq
)
819 struct scatterlist
*sg
;
823 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
826 /* Apply whitening on ciphertext */
827 sg
= crypt_get_sg_data(cc
, dmreq
->sg_out
);
828 dst
= kmap_atomic(sg_page(sg
));
829 r
= crypt_iv_tcw_whitening(cc
, dmreq
, dst
+ sg
->offset
);
835 static int crypt_iv_random_gen(struct crypt_config
*cc
, u8
*iv
,
836 struct dm_crypt_request
*dmreq
)
838 /* Used only for writes, there must be an additional space to store IV */
839 get_random_bytes(iv
, cc
->iv_size
);
843 static const struct crypt_iv_operations crypt_iv_plain_ops
= {
844 .generator
= crypt_iv_plain_gen
847 static const struct crypt_iv_operations crypt_iv_plain64_ops
= {
848 .generator
= crypt_iv_plain64_gen
851 static const struct crypt_iv_operations crypt_iv_plain64be_ops
= {
852 .generator
= crypt_iv_plain64be_gen
855 static const struct crypt_iv_operations crypt_iv_essiv_ops
= {
856 .ctr
= crypt_iv_essiv_ctr
,
857 .dtr
= crypt_iv_essiv_dtr
,
858 .init
= crypt_iv_essiv_init
,
859 .wipe
= crypt_iv_essiv_wipe
,
860 .generator
= crypt_iv_essiv_gen
863 static const struct crypt_iv_operations crypt_iv_benbi_ops
= {
864 .ctr
= crypt_iv_benbi_ctr
,
865 .dtr
= crypt_iv_benbi_dtr
,
866 .generator
= crypt_iv_benbi_gen
869 static const struct crypt_iv_operations crypt_iv_null_ops
= {
870 .generator
= crypt_iv_null_gen
873 static const struct crypt_iv_operations crypt_iv_lmk_ops
= {
874 .ctr
= crypt_iv_lmk_ctr
,
875 .dtr
= crypt_iv_lmk_dtr
,
876 .init
= crypt_iv_lmk_init
,
877 .wipe
= crypt_iv_lmk_wipe
,
878 .generator
= crypt_iv_lmk_gen
,
879 .post
= crypt_iv_lmk_post
882 static const struct crypt_iv_operations crypt_iv_tcw_ops
= {
883 .ctr
= crypt_iv_tcw_ctr
,
884 .dtr
= crypt_iv_tcw_dtr
,
885 .init
= crypt_iv_tcw_init
,
886 .wipe
= crypt_iv_tcw_wipe
,
887 .generator
= crypt_iv_tcw_gen
,
888 .post
= crypt_iv_tcw_post
891 static struct crypt_iv_operations crypt_iv_random_ops
= {
892 .generator
= crypt_iv_random_gen
896 * Integrity extensions
898 static bool crypt_integrity_aead(struct crypt_config
*cc
)
900 return test_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
903 static bool crypt_integrity_hmac(struct crypt_config
*cc
)
905 return crypt_integrity_aead(cc
) && cc
->key_mac_size
;
908 /* Get sg containing data */
909 static struct scatterlist
*crypt_get_sg_data(struct crypt_config
*cc
,
910 struct scatterlist
*sg
)
912 if (unlikely(crypt_integrity_aead(cc
)))
918 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io
*io
, struct bio
*bio
)
920 struct bio_integrity_payload
*bip
;
921 unsigned int tag_len
;
924 if (!bio_sectors(bio
) || !io
->cc
->on_disk_tag_size
)
927 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, 1);
931 tag_len
= io
->cc
->on_disk_tag_size
* bio_sectors(bio
);
933 bip
->bip_iter
.bi_size
= tag_len
;
934 bip
->bip_iter
.bi_sector
= io
->cc
->start
+ io
->sector
;
936 /* We own the metadata, do not let bio_free to release it */
937 bip
->bip_flags
&= ~BIP_BLOCK_INTEGRITY
;
939 ret
= bio_integrity_add_page(bio
, virt_to_page(io
->integrity_metadata
),
940 tag_len
, offset_in_page(io
->integrity_metadata
));
941 if (unlikely(ret
!= tag_len
))
947 static int crypt_integrity_ctr(struct crypt_config
*cc
, struct dm_target
*ti
)
949 #ifdef CONFIG_BLK_DEV_INTEGRITY
950 struct blk_integrity
*bi
= blk_get_integrity(cc
->dev
->bdev
->bd_disk
);
952 /* From now we require underlying device with our integrity profile */
953 if (!bi
|| strcasecmp(bi
->profile
->name
, "DM-DIF-EXT-TAG")) {
954 ti
->error
= "Integrity profile not supported.";
958 if (bi
->tag_size
!= cc
->on_disk_tag_size
||
959 bi
->tuple_size
!= cc
->on_disk_tag_size
) {
960 ti
->error
= "Integrity profile tag size mismatch.";
963 if (1 << bi
->interval_exp
!= cc
->sector_size
) {
964 ti
->error
= "Integrity profile sector size mismatch.";
968 if (crypt_integrity_aead(cc
)) {
969 cc
->integrity_tag_size
= cc
->on_disk_tag_size
- cc
->integrity_iv_size
;
970 DMINFO("Integrity AEAD, tag size %u, IV size %u.",
971 cc
->integrity_tag_size
, cc
->integrity_iv_size
);
973 if (crypto_aead_setauthsize(any_tfm_aead(cc
), cc
->integrity_tag_size
)) {
974 ti
->error
= "Integrity AEAD auth tag size is not supported.";
977 } else if (cc
->integrity_iv_size
)
978 DMINFO("Additional per-sector space %u bytes for IV.",
979 cc
->integrity_iv_size
);
981 if ((cc
->integrity_tag_size
+ cc
->integrity_iv_size
) != bi
->tag_size
) {
982 ti
->error
= "Not enough space for integrity tag in the profile.";
988 ti
->error
= "Integrity profile not supported.";
993 static void crypt_convert_init(struct crypt_config
*cc
,
994 struct convert_context
*ctx
,
995 struct bio
*bio_out
, struct bio
*bio_in
,
998 ctx
->bio_in
= bio_in
;
999 ctx
->bio_out
= bio_out
;
1001 ctx
->iter_in
= bio_in
->bi_iter
;
1003 ctx
->iter_out
= bio_out
->bi_iter
;
1004 ctx
->cc_sector
= sector
+ cc
->iv_offset
;
1005 init_completion(&ctx
->restart
);
1008 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
1011 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
1014 static void *req_of_dmreq(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
)
1016 return (void *)((char *)dmreq
- cc
->dmreq_start
);
1019 static u8
*iv_of_dmreq(struct crypt_config
*cc
,
1020 struct dm_crypt_request
*dmreq
)
1022 if (crypt_integrity_aead(cc
))
1023 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1024 crypto_aead_alignmask(any_tfm_aead(cc
)) + 1);
1026 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
1027 crypto_skcipher_alignmask(any_tfm(cc
)) + 1);
1030 static u8
*org_iv_of_dmreq(struct crypt_config
*cc
,
1031 struct dm_crypt_request
*dmreq
)
1033 return iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
;
1036 static uint64_t *org_sector_of_dmreq(struct crypt_config
*cc
,
1037 struct dm_crypt_request
*dmreq
)
1039 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+ cc
->iv_size
;
1040 return (uint64_t*) ptr
;
1043 static unsigned int *org_tag_of_dmreq(struct crypt_config
*cc
,
1044 struct dm_crypt_request
*dmreq
)
1046 u8
*ptr
= iv_of_dmreq(cc
, dmreq
) + cc
->iv_size
+
1047 cc
->iv_size
+ sizeof(uint64_t);
1048 return (unsigned int*)ptr
;
1051 static void *tag_from_dmreq(struct crypt_config
*cc
,
1052 struct dm_crypt_request
*dmreq
)
1054 struct convert_context
*ctx
= dmreq
->ctx
;
1055 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1057 return &io
->integrity_metadata
[*org_tag_of_dmreq(cc
, dmreq
) *
1058 cc
->on_disk_tag_size
];
1061 static void *iv_tag_from_dmreq(struct crypt_config
*cc
,
1062 struct dm_crypt_request
*dmreq
)
1064 return tag_from_dmreq(cc
, dmreq
) + cc
->integrity_tag_size
;
1067 static int crypt_convert_block_aead(struct crypt_config
*cc
,
1068 struct convert_context
*ctx
,
1069 struct aead_request
*req
,
1070 unsigned int tag_offset
)
1072 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1073 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1074 struct dm_crypt_request
*dmreq
;
1075 u8
*iv
, *org_iv
, *tag_iv
, *tag
;
1079 BUG_ON(cc
->integrity_iv_size
&& cc
->integrity_iv_size
!= cc
->iv_size
);
1081 /* Reject unexpected unaligned bio. */
1082 if (unlikely(bv_in
.bv_offset
& (cc
->sector_size
- 1)))
1085 dmreq
= dmreq_of_req(cc
, req
);
1086 dmreq
->iv_sector
= ctx
->cc_sector
;
1087 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1088 dmreq
->iv_sector
>>= cc
->sector_shift
;
1091 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1093 sector
= org_sector_of_dmreq(cc
, dmreq
);
1094 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1096 iv
= iv_of_dmreq(cc
, dmreq
);
1097 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1098 tag
= tag_from_dmreq(cc
, dmreq
);
1099 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1102 * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1103 * | (authenticated) | (auth+encryption) | |
1104 * | sector_LE | IV | sector in/out | tag in/out |
1106 sg_init_table(dmreq
->sg_in
, 4);
1107 sg_set_buf(&dmreq
->sg_in
[0], sector
, sizeof(uint64_t));
1108 sg_set_buf(&dmreq
->sg_in
[1], org_iv
, cc
->iv_size
);
1109 sg_set_page(&dmreq
->sg_in
[2], bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1110 sg_set_buf(&dmreq
->sg_in
[3], tag
, cc
->integrity_tag_size
);
1112 sg_init_table(dmreq
->sg_out
, 4);
1113 sg_set_buf(&dmreq
->sg_out
[0], sector
, sizeof(uint64_t));
1114 sg_set_buf(&dmreq
->sg_out
[1], org_iv
, cc
->iv_size
);
1115 sg_set_page(&dmreq
->sg_out
[2], bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1116 sg_set_buf(&dmreq
->sg_out
[3], tag
, cc
->integrity_tag_size
);
1118 if (cc
->iv_gen_ops
) {
1119 /* For READs use IV stored in integrity metadata */
1120 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1121 memcpy(org_iv
, tag_iv
, cc
->iv_size
);
1123 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1126 /* Store generated IV in integrity metadata */
1127 if (cc
->integrity_iv_size
)
1128 memcpy(tag_iv
, org_iv
, cc
->iv_size
);
1130 /* Working copy of IV, to be modified in crypto API */
1131 memcpy(iv
, org_iv
, cc
->iv_size
);
1134 aead_request_set_ad(req
, sizeof(uint64_t) + cc
->iv_size
);
1135 if (bio_data_dir(ctx
->bio_in
) == WRITE
) {
1136 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1137 cc
->sector_size
, iv
);
1138 r
= crypto_aead_encrypt(req
);
1139 if (cc
->integrity_tag_size
+ cc
->integrity_iv_size
!= cc
->on_disk_tag_size
)
1140 memset(tag
+ cc
->integrity_tag_size
+ cc
->integrity_iv_size
, 0,
1141 cc
->on_disk_tag_size
- (cc
->integrity_tag_size
+ cc
->integrity_iv_size
));
1143 aead_request_set_crypt(req
, dmreq
->sg_in
, dmreq
->sg_out
,
1144 cc
->sector_size
+ cc
->integrity_tag_size
, iv
);
1145 r
= crypto_aead_decrypt(req
);
1149 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1150 (unsigned long long)le64_to_cpu(*sector
));
1152 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1153 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1155 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1156 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1161 static int crypt_convert_block_skcipher(struct crypt_config
*cc
,
1162 struct convert_context
*ctx
,
1163 struct skcipher_request
*req
,
1164 unsigned int tag_offset
)
1166 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
1167 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
1168 struct scatterlist
*sg_in
, *sg_out
;
1169 struct dm_crypt_request
*dmreq
;
1170 u8
*iv
, *org_iv
, *tag_iv
;
1174 /* Reject unexpected unaligned bio. */
1175 if (unlikely(bv_in
.bv_offset
& (cc
->sector_size
- 1)))
1178 dmreq
= dmreq_of_req(cc
, req
);
1179 dmreq
->iv_sector
= ctx
->cc_sector
;
1180 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
1181 dmreq
->iv_sector
>>= cc
->sector_shift
;
1184 *org_tag_of_dmreq(cc
, dmreq
) = tag_offset
;
1186 iv
= iv_of_dmreq(cc
, dmreq
);
1187 org_iv
= org_iv_of_dmreq(cc
, dmreq
);
1188 tag_iv
= iv_tag_from_dmreq(cc
, dmreq
);
1190 sector
= org_sector_of_dmreq(cc
, dmreq
);
1191 *sector
= cpu_to_le64(ctx
->cc_sector
- cc
->iv_offset
);
1193 /* For skcipher we use only the first sg item */
1194 sg_in
= &dmreq
->sg_in
[0];
1195 sg_out
= &dmreq
->sg_out
[0];
1197 sg_init_table(sg_in
, 1);
1198 sg_set_page(sg_in
, bv_in
.bv_page
, cc
->sector_size
, bv_in
.bv_offset
);
1200 sg_init_table(sg_out
, 1);
1201 sg_set_page(sg_out
, bv_out
.bv_page
, cc
->sector_size
, bv_out
.bv_offset
);
1203 if (cc
->iv_gen_ops
) {
1204 /* For READs use IV stored in integrity metadata */
1205 if (cc
->integrity_iv_size
&& bio_data_dir(ctx
->bio_in
) != WRITE
) {
1206 memcpy(org_iv
, tag_iv
, cc
->integrity_iv_size
);
1208 r
= cc
->iv_gen_ops
->generator(cc
, org_iv
, dmreq
);
1211 /* Store generated IV in integrity metadata */
1212 if (cc
->integrity_iv_size
)
1213 memcpy(tag_iv
, org_iv
, cc
->integrity_iv_size
);
1215 /* Working copy of IV, to be modified in crypto API */
1216 memcpy(iv
, org_iv
, cc
->iv_size
);
1219 skcipher_request_set_crypt(req
, sg_in
, sg_out
, cc
->sector_size
, iv
);
1221 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
1222 r
= crypto_skcipher_encrypt(req
);
1224 r
= crypto_skcipher_decrypt(req
);
1226 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1227 r
= cc
->iv_gen_ops
->post(cc
, org_iv
, dmreq
);
1229 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, cc
->sector_size
);
1230 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, cc
->sector_size
);
1235 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
1238 static void crypt_alloc_req_skcipher(struct crypt_config
*cc
,
1239 struct convert_context
*ctx
)
1241 unsigned key_index
= ctx
->cc_sector
& (cc
->tfms_count
- 1);
1244 ctx
->r
.req
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
1246 skcipher_request_set_tfm(ctx
->r
.req
, cc
->cipher_tfm
.tfms
[key_index
]);
1249 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1250 * requests if driver request queue is full.
1252 skcipher_request_set_callback(ctx
->r
.req
,
1253 CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
1254 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req
));
1257 static void crypt_alloc_req_aead(struct crypt_config
*cc
,
1258 struct convert_context
*ctx
)
1260 if (!ctx
->r
.req_aead
)
1261 ctx
->r
.req_aead
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
1263 aead_request_set_tfm(ctx
->r
.req_aead
, cc
->cipher_tfm
.tfms_aead
[0]);
1266 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1267 * requests if driver request queue is full.
1269 aead_request_set_callback(ctx
->r
.req_aead
,
1270 CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
1271 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->r
.req_aead
));
1274 static void crypt_alloc_req(struct crypt_config
*cc
,
1275 struct convert_context
*ctx
)
1277 if (crypt_integrity_aead(cc
))
1278 crypt_alloc_req_aead(cc
, ctx
);
1280 crypt_alloc_req_skcipher(cc
, ctx
);
1283 static void crypt_free_req_skcipher(struct crypt_config
*cc
,
1284 struct skcipher_request
*req
, struct bio
*base_bio
)
1286 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1288 if ((struct skcipher_request
*)(io
+ 1) != req
)
1289 mempool_free(req
, cc
->req_pool
);
1292 static void crypt_free_req_aead(struct crypt_config
*cc
,
1293 struct aead_request
*req
, struct bio
*base_bio
)
1295 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
1297 if ((struct aead_request
*)(io
+ 1) != req
)
1298 mempool_free(req
, cc
->req_pool
);
1301 static void crypt_free_req(struct crypt_config
*cc
, void *req
, struct bio
*base_bio
)
1303 if (crypt_integrity_aead(cc
))
1304 crypt_free_req_aead(cc
, req
, base_bio
);
1306 crypt_free_req_skcipher(cc
, req
, base_bio
);
1310 * Encrypt / decrypt data from one bio to another one (can be the same one)
1312 static blk_status_t
crypt_convert(struct crypt_config
*cc
,
1313 struct convert_context
*ctx
)
1315 unsigned int tag_offset
= 0;
1316 unsigned int sector_step
= cc
->sector_size
>> SECTOR_SHIFT
;
1319 atomic_set(&ctx
->cc_pending
, 1);
1321 while (ctx
->iter_in
.bi_size
&& ctx
->iter_out
.bi_size
) {
1323 crypt_alloc_req(cc
, ctx
);
1324 atomic_inc(&ctx
->cc_pending
);
1326 if (crypt_integrity_aead(cc
))
1327 r
= crypt_convert_block_aead(cc
, ctx
, ctx
->r
.req_aead
, tag_offset
);
1329 r
= crypt_convert_block_skcipher(cc
, ctx
, ctx
->r
.req
, tag_offset
);
1333 * The request was queued by a crypto driver
1334 * but the driver request queue is full, let's wait.
1337 wait_for_completion(&ctx
->restart
);
1338 reinit_completion(&ctx
->restart
);
1341 * The request is queued and processed asynchronously,
1342 * completion function kcryptd_async_done() will be called.
1346 ctx
->cc_sector
+= sector_step
;
1350 * The request was already processed (synchronously).
1353 atomic_dec(&ctx
->cc_pending
);
1354 ctx
->cc_sector
+= sector_step
;
1359 * There was a data integrity error.
1362 atomic_dec(&ctx
->cc_pending
);
1363 return BLK_STS_PROTECTION
;
1365 * There was an error while processing the request.
1368 atomic_dec(&ctx
->cc_pending
);
1369 return BLK_STS_IOERR
;
1376 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
);
1379 * Generate a new unfragmented bio with the given size
1380 * This should never violate the device limitations (but only because
1381 * max_segment_size is being constrained to PAGE_SIZE).
1383 * This function may be called concurrently. If we allocate from the mempool
1384 * concurrently, there is a possibility of deadlock. For example, if we have
1385 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1386 * the mempool concurrently, it may deadlock in a situation where both processes
1387 * have allocated 128 pages and the mempool is exhausted.
1389 * In order to avoid this scenario we allocate the pages under a mutex.
1391 * In order to not degrade performance with excessive locking, we try
1392 * non-blocking allocations without a mutex first but on failure we fallback
1393 * to blocking allocations with a mutex.
1395 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
)
1397 struct crypt_config
*cc
= io
->cc
;
1399 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1400 gfp_t gfp_mask
= GFP_NOWAIT
| __GFP_HIGHMEM
;
1401 unsigned i
, len
, remaining_size
;
1405 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1406 mutex_lock(&cc
->bio_alloc_lock
);
1408 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
1412 clone_init(io
, clone
);
1414 remaining_size
= size
;
1416 for (i
= 0; i
< nr_iovecs
; i
++) {
1417 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
1419 crypt_free_buffer_pages(cc
, clone
);
1421 gfp_mask
|= __GFP_DIRECT_RECLAIM
;
1425 len
= (remaining_size
> PAGE_SIZE
) ? PAGE_SIZE
: remaining_size
;
1427 bio_add_page(clone
, page
, len
, 0);
1429 remaining_size
-= len
;
1432 /* Allocate space for integrity tags */
1433 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1434 crypt_free_buffer_pages(cc
, clone
);
1439 if (unlikely(gfp_mask
& __GFP_DIRECT_RECLAIM
))
1440 mutex_unlock(&cc
->bio_alloc_lock
);
1445 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
1450 bio_for_each_segment_all(bv
, clone
, i
) {
1451 BUG_ON(!bv
->bv_page
);
1452 mempool_free(bv
->bv_page
, cc
->page_pool
);
1457 static void crypt_io_init(struct dm_crypt_io
*io
, struct crypt_config
*cc
,
1458 struct bio
*bio
, sector_t sector
)
1462 io
->sector
= sector
;
1464 io
->ctx
.r
.req
= NULL
;
1465 io
->integrity_metadata
= NULL
;
1466 io
->integrity_metadata_from_pool
= false;
1467 atomic_set(&io
->io_pending
, 0);
1470 static void crypt_inc_pending(struct dm_crypt_io
*io
)
1472 atomic_inc(&io
->io_pending
);
1476 * One of the bios was finished. Check for completion of
1477 * the whole request and correctly clean up the buffer.
1479 static void crypt_dec_pending(struct dm_crypt_io
*io
)
1481 struct crypt_config
*cc
= io
->cc
;
1482 struct bio
*base_bio
= io
->base_bio
;
1483 blk_status_t error
= io
->error
;
1485 if (!atomic_dec_and_test(&io
->io_pending
))
1489 crypt_free_req(cc
, io
->ctx
.r
.req
, base_bio
);
1491 if (unlikely(io
->integrity_metadata_from_pool
))
1492 mempool_free(io
->integrity_metadata
, io
->cc
->tag_pool
);
1494 kfree(io
->integrity_metadata
);
1496 base_bio
->bi_status
= error
;
1497 bio_endio(base_bio
);
1501 * kcryptd/kcryptd_io:
1503 * Needed because it would be very unwise to do decryption in an
1504 * interrupt context.
1506 * kcryptd performs the actual encryption or decryption.
1508 * kcryptd_io performs the IO submission.
1510 * They must be separated as otherwise the final stages could be
1511 * starved by new requests which can block in the first stages due
1512 * to memory allocation.
1514 * The work is done per CPU global for all dm-crypt instances.
1515 * They should not depend on each other and do not block.
1517 static void crypt_endio(struct bio
*clone
)
1519 struct dm_crypt_io
*io
= clone
->bi_private
;
1520 struct crypt_config
*cc
= io
->cc
;
1521 unsigned rw
= bio_data_dir(clone
);
1525 * free the processed pages
1528 crypt_free_buffer_pages(cc
, clone
);
1530 error
= clone
->bi_status
;
1533 if (rw
== READ
&& !error
) {
1534 kcryptd_queue_crypt(io
);
1538 if (unlikely(error
))
1541 crypt_dec_pending(io
);
1544 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
1546 struct crypt_config
*cc
= io
->cc
;
1548 clone
->bi_private
= io
;
1549 clone
->bi_end_io
= crypt_endio
;
1550 clone
->bi_bdev
= cc
->dev
->bdev
;
1551 clone
->bi_opf
= io
->base_bio
->bi_opf
;
1554 static int kcryptd_io_read(struct dm_crypt_io
*io
, gfp_t gfp
)
1556 struct crypt_config
*cc
= io
->cc
;
1560 * We need the original biovec array in order to decrypt
1561 * the whole bio data *afterwards* -- thanks to immutable
1562 * biovecs we don't need to worry about the block layer
1563 * modifying the biovec array; so leverage bio_clone_fast().
1565 clone
= bio_clone_fast(io
->base_bio
, gfp
, cc
->bs
);
1569 crypt_inc_pending(io
);
1571 clone_init(io
, clone
);
1572 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1574 if (dm_crypt_integrity_io_alloc(io
, clone
)) {
1575 crypt_dec_pending(io
);
1580 generic_make_request(clone
);
1584 static void kcryptd_io_read_work(struct work_struct
*work
)
1586 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1588 crypt_inc_pending(io
);
1589 if (kcryptd_io_read(io
, GFP_NOIO
))
1590 io
->error
= BLK_STS_RESOURCE
;
1591 crypt_dec_pending(io
);
1594 static void kcryptd_queue_read(struct dm_crypt_io
*io
)
1596 struct crypt_config
*cc
= io
->cc
;
1598 INIT_WORK(&io
->work
, kcryptd_io_read_work
);
1599 queue_work(cc
->io_queue
, &io
->work
);
1602 static void kcryptd_io_write(struct dm_crypt_io
*io
)
1604 struct bio
*clone
= io
->ctx
.bio_out
;
1606 generic_make_request(clone
);
1609 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1611 static int dmcrypt_write(void *data
)
1613 struct crypt_config
*cc
= data
;
1614 struct dm_crypt_io
*io
;
1617 struct rb_root write_tree
;
1618 struct blk_plug plug
;
1620 DECLARE_WAITQUEUE(wait
, current
);
1622 spin_lock_irq(&cc
->write_thread_wait
.lock
);
1625 if (!RB_EMPTY_ROOT(&cc
->write_tree
))
1628 set_current_state(TASK_INTERRUPTIBLE
);
1629 __add_wait_queue(&cc
->write_thread_wait
, &wait
);
1631 spin_unlock_irq(&cc
->write_thread_wait
.lock
);
1633 if (unlikely(kthread_should_stop())) {
1634 set_current_state(TASK_RUNNING
);
1635 remove_wait_queue(&cc
->write_thread_wait
, &wait
);
1641 set_current_state(TASK_RUNNING
);
1642 spin_lock_irq(&cc
->write_thread_wait
.lock
);
1643 __remove_wait_queue(&cc
->write_thread_wait
, &wait
);
1644 goto continue_locked
;
1647 write_tree
= cc
->write_tree
;
1648 cc
->write_tree
= RB_ROOT
;
1649 spin_unlock_irq(&cc
->write_thread_wait
.lock
);
1651 BUG_ON(rb_parent(write_tree
.rb_node
));
1654 * Note: we cannot walk the tree here with rb_next because
1655 * the structures may be freed when kcryptd_io_write is called.
1657 blk_start_plug(&plug
);
1659 io
= crypt_io_from_node(rb_first(&write_tree
));
1660 rb_erase(&io
->rb_node
, &write_tree
);
1661 kcryptd_io_write(io
);
1662 } while (!RB_EMPTY_ROOT(&write_tree
));
1663 blk_finish_plug(&plug
);
1668 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
, int async
)
1670 struct bio
*clone
= io
->ctx
.bio_out
;
1671 struct crypt_config
*cc
= io
->cc
;
1672 unsigned long flags
;
1674 struct rb_node
**rbp
, *parent
;
1676 if (unlikely(io
->error
)) {
1677 crypt_free_buffer_pages(cc
, clone
);
1679 crypt_dec_pending(io
);
1683 /* crypt_convert should have filled the clone bio */
1684 BUG_ON(io
->ctx
.iter_out
.bi_size
);
1686 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1688 if (likely(!async
) && test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
)) {
1689 generic_make_request(clone
);
1693 spin_lock_irqsave(&cc
->write_thread_wait
.lock
, flags
);
1694 rbp
= &cc
->write_tree
.rb_node
;
1696 sector
= io
->sector
;
1699 if (sector
< crypt_io_from_node(parent
)->sector
)
1700 rbp
= &(*rbp
)->rb_left
;
1702 rbp
= &(*rbp
)->rb_right
;
1704 rb_link_node(&io
->rb_node
, parent
, rbp
);
1705 rb_insert_color(&io
->rb_node
, &cc
->write_tree
);
1707 wake_up_locked(&cc
->write_thread_wait
);
1708 spin_unlock_irqrestore(&cc
->write_thread_wait
.lock
, flags
);
1711 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
1713 struct crypt_config
*cc
= io
->cc
;
1716 sector_t sector
= io
->sector
;
1720 * Prevent io from disappearing until this function completes.
1722 crypt_inc_pending(io
);
1723 crypt_convert_init(cc
, &io
->ctx
, NULL
, io
->base_bio
, sector
);
1725 clone
= crypt_alloc_buffer(io
, io
->base_bio
->bi_iter
.bi_size
);
1726 if (unlikely(!clone
)) {
1727 io
->error
= BLK_STS_IOERR
;
1731 io
->ctx
.bio_out
= clone
;
1732 io
->ctx
.iter_out
= clone
->bi_iter
;
1734 sector
+= bio_sectors(clone
);
1736 crypt_inc_pending(io
);
1737 r
= crypt_convert(cc
, &io
->ctx
);
1740 crypt_finished
= atomic_dec_and_test(&io
->ctx
.cc_pending
);
1742 /* Encryption was already finished, submit io now */
1743 if (crypt_finished
) {
1744 kcryptd_crypt_write_io_submit(io
, 0);
1745 io
->sector
= sector
;
1749 crypt_dec_pending(io
);
1752 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
)
1754 crypt_dec_pending(io
);
1757 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
1759 struct crypt_config
*cc
= io
->cc
;
1762 crypt_inc_pending(io
);
1764 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
1767 r
= crypt_convert(cc
, &io
->ctx
);
1771 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
1772 kcryptd_crypt_read_done(io
);
1774 crypt_dec_pending(io
);
1777 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
1780 struct dm_crypt_request
*dmreq
= async_req
->data
;
1781 struct convert_context
*ctx
= dmreq
->ctx
;
1782 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1783 struct crypt_config
*cc
= io
->cc
;
1786 * A request from crypto driver backlog is going to be processed now,
1787 * finish the completion and continue in crypt_convert().
1788 * (Callback will be called for the second time for this request.)
1790 if (error
== -EINPROGRESS
) {
1791 complete(&ctx
->restart
);
1795 if (!error
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1796 error
= cc
->iv_gen_ops
->post(cc
, org_iv_of_dmreq(cc
, dmreq
), dmreq
);
1798 if (error
== -EBADMSG
) {
1799 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1800 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc
, dmreq
)));
1801 io
->error
= BLK_STS_PROTECTION
;
1802 } else if (error
< 0)
1803 io
->error
= BLK_STS_IOERR
;
1805 crypt_free_req(cc
, req_of_dmreq(cc
, dmreq
), io
->base_bio
);
1807 if (!atomic_dec_and_test(&ctx
->cc_pending
))
1810 if (bio_data_dir(io
->base_bio
) == READ
)
1811 kcryptd_crypt_read_done(io
);
1813 kcryptd_crypt_write_io_submit(io
, 1);
1816 static void kcryptd_crypt(struct work_struct
*work
)
1818 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1820 if (bio_data_dir(io
->base_bio
) == READ
)
1821 kcryptd_crypt_read_convert(io
);
1823 kcryptd_crypt_write_convert(io
);
1826 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
1828 struct crypt_config
*cc
= io
->cc
;
1830 INIT_WORK(&io
->work
, kcryptd_crypt
);
1831 queue_work(cc
->crypt_queue
, &io
->work
);
1834 static void crypt_free_tfms_aead(struct crypt_config
*cc
)
1836 if (!cc
->cipher_tfm
.tfms_aead
)
1839 if (cc
->cipher_tfm
.tfms_aead
[0] && !IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
1840 crypto_free_aead(cc
->cipher_tfm
.tfms_aead
[0]);
1841 cc
->cipher_tfm
.tfms_aead
[0] = NULL
;
1844 kfree(cc
->cipher_tfm
.tfms_aead
);
1845 cc
->cipher_tfm
.tfms_aead
= NULL
;
1848 static void crypt_free_tfms_skcipher(struct crypt_config
*cc
)
1852 if (!cc
->cipher_tfm
.tfms
)
1855 for (i
= 0; i
< cc
->tfms_count
; i
++)
1856 if (cc
->cipher_tfm
.tfms
[i
] && !IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
1857 crypto_free_skcipher(cc
->cipher_tfm
.tfms
[i
]);
1858 cc
->cipher_tfm
.tfms
[i
] = NULL
;
1861 kfree(cc
->cipher_tfm
.tfms
);
1862 cc
->cipher_tfm
.tfms
= NULL
;
1865 static void crypt_free_tfms(struct crypt_config
*cc
)
1867 if (crypt_integrity_aead(cc
))
1868 crypt_free_tfms_aead(cc
);
1870 crypt_free_tfms_skcipher(cc
);
1873 static int crypt_alloc_tfms_skcipher(struct crypt_config
*cc
, char *ciphermode
)
1878 cc
->cipher_tfm
.tfms
= kzalloc(cc
->tfms_count
*
1879 sizeof(struct crypto_skcipher
*), GFP_KERNEL
);
1880 if (!cc
->cipher_tfm
.tfms
)
1883 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1884 cc
->cipher_tfm
.tfms
[i
] = crypto_alloc_skcipher(ciphermode
, 0, 0);
1885 if (IS_ERR(cc
->cipher_tfm
.tfms
[i
])) {
1886 err
= PTR_ERR(cc
->cipher_tfm
.tfms
[i
]);
1887 crypt_free_tfms(cc
);
1895 static int crypt_alloc_tfms_aead(struct crypt_config
*cc
, char *ciphermode
)
1899 cc
->cipher_tfm
.tfms
= kmalloc(sizeof(struct crypto_aead
*), GFP_KERNEL
);
1900 if (!cc
->cipher_tfm
.tfms
)
1903 cc
->cipher_tfm
.tfms_aead
[0] = crypto_alloc_aead(ciphermode
, 0, 0);
1904 if (IS_ERR(cc
->cipher_tfm
.tfms_aead
[0])) {
1905 err
= PTR_ERR(cc
->cipher_tfm
.tfms_aead
[0]);
1906 crypt_free_tfms(cc
);
1913 static int crypt_alloc_tfms(struct crypt_config
*cc
, char *ciphermode
)
1915 if (crypt_integrity_aead(cc
))
1916 return crypt_alloc_tfms_aead(cc
, ciphermode
);
1918 return crypt_alloc_tfms_skcipher(cc
, ciphermode
);
1921 static unsigned crypt_subkey_size(struct crypt_config
*cc
)
1923 return (cc
->key_size
- cc
->key_extra_size
) >> ilog2(cc
->tfms_count
);
1926 static unsigned crypt_authenckey_size(struct crypt_config
*cc
)
1928 return crypt_subkey_size(cc
) + RTA_SPACE(sizeof(struct crypto_authenc_key_param
));
1932 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
1933 * the key must be for some reason in special format.
1934 * This funcion converts cc->key to this special format.
1936 static void crypt_copy_authenckey(char *p
, const void *key
,
1937 unsigned enckeylen
, unsigned authkeylen
)
1939 struct crypto_authenc_key_param
*param
;
1942 rta
= (struct rtattr
*)p
;
1943 param
= RTA_DATA(rta
);
1944 param
->enckeylen
= cpu_to_be32(enckeylen
);
1945 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
1946 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
1947 p
+= RTA_SPACE(sizeof(*param
));
1948 memcpy(p
, key
+ enckeylen
, authkeylen
);
1950 memcpy(p
, key
, enckeylen
);
1953 static int crypt_setkey(struct crypt_config
*cc
)
1955 unsigned subkey_size
;
1958 /* Ignore extra keys (which are used for IV etc) */
1959 subkey_size
= crypt_subkey_size(cc
);
1961 if (crypt_integrity_hmac(cc
))
1962 crypt_copy_authenckey(cc
->authenc_key
, cc
->key
,
1963 subkey_size
- cc
->key_mac_size
,
1965 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1966 if (crypt_integrity_hmac(cc
))
1967 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
1968 cc
->authenc_key
, crypt_authenckey_size(cc
));
1969 else if (crypt_integrity_aead(cc
))
1970 r
= crypto_aead_setkey(cc
->cipher_tfm
.tfms_aead
[i
],
1971 cc
->key
+ (i
* subkey_size
),
1974 r
= crypto_skcipher_setkey(cc
->cipher_tfm
.tfms
[i
],
1975 cc
->key
+ (i
* subkey_size
),
1981 if (crypt_integrity_hmac(cc
))
1982 memzero_explicit(cc
->authenc_key
, crypt_authenckey_size(cc
));
1989 static bool contains_whitespace(const char *str
)
1992 if (isspace(*str
++))
1997 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
1999 char *new_key_string
, *key_desc
;
2002 const struct user_key_payload
*ukp
;
2005 * Reject key_string with whitespace. dm core currently lacks code for
2006 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2008 if (contains_whitespace(key_string
)) {
2009 DMERR("whitespace chars not allowed in key string");
2013 /* look for next ':' separating key_type from key_description */
2014 key_desc
= strpbrk(key_string
, ":");
2015 if (!key_desc
|| key_desc
== key_string
|| !strlen(key_desc
+ 1))
2018 if (strncmp(key_string
, "logon:", key_desc
- key_string
+ 1) &&
2019 strncmp(key_string
, "user:", key_desc
- key_string
+ 1))
2022 new_key_string
= kstrdup(key_string
, GFP_KERNEL
);
2023 if (!new_key_string
)
2026 key
= request_key(key_string
[0] == 'l' ? &key_type_logon
: &key_type_user
,
2027 key_desc
+ 1, NULL
);
2029 kzfree(new_key_string
);
2030 return PTR_ERR(key
);
2033 down_read(&key
->sem
);
2035 ukp
= user_key_payload_locked(key
);
2039 kzfree(new_key_string
);
2040 return -EKEYREVOKED
;
2043 if (cc
->key_size
!= ukp
->datalen
) {
2046 kzfree(new_key_string
);
2050 memcpy(cc
->key
, ukp
->data
, cc
->key_size
);
2055 /* clear the flag since following operations may invalidate previously valid key */
2056 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2058 ret
= crypt_setkey(cc
);
2060 /* wipe the kernel key payload copy in each case */
2061 memset(cc
->key
, 0, cc
->key_size
* sizeof(u8
));
2064 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2065 kzfree(cc
->key_string
);
2066 cc
->key_string
= new_key_string
;
2068 kzfree(new_key_string
);
2073 static int get_key_size(char **key_string
)
2078 if (*key_string
[0] != ':')
2079 return strlen(*key_string
) >> 1;
2081 /* look for next ':' in key string */
2082 colon
= strpbrk(*key_string
+ 1, ":");
2086 if (sscanf(*key_string
+ 1, "%u%c", &ret
, &dummy
) != 2 || dummy
!= ':')
2089 *key_string
= colon
;
2091 /* remaining key string should be :<logon|user>:<key_desc> */
2098 static int crypt_set_keyring_key(struct crypt_config
*cc
, const char *key_string
)
2103 static int get_key_size(char **key_string
)
2105 return (*key_string
[0] == ':') ? -EINVAL
: strlen(*key_string
) >> 1;
2110 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
2113 int key_string_len
= strlen(key
);
2115 /* Hyphen (which gives a key_size of zero) means there is no key. */
2116 if (!cc
->key_size
&& strcmp(key
, "-"))
2119 /* ':' means the key is in kernel keyring, short-circuit normal key processing */
2120 if (key
[0] == ':') {
2121 r
= crypt_set_keyring_key(cc
, key
+ 1);
2125 /* clear the flag since following operations may invalidate previously valid key */
2126 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2128 /* wipe references to any kernel keyring key */
2129 kzfree(cc
->key_string
);
2130 cc
->key_string
= NULL
;
2132 /* Decode key from its hex representation. */
2133 if (cc
->key_size
&& hex2bin(cc
->key
, key
, cc
->key_size
) < 0)
2136 r
= crypt_setkey(cc
);
2138 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2141 /* Hex key string not needed after here, so wipe it. */
2142 memset(key
, '0', key_string_len
);
2147 static int crypt_wipe_key(struct crypt_config
*cc
)
2151 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
2152 get_random_bytes(&cc
->key
, cc
->key_size
);
2153 kzfree(cc
->key_string
);
2154 cc
->key_string
= NULL
;
2155 r
= crypt_setkey(cc
);
2156 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
2161 static void crypt_dtr(struct dm_target
*ti
)
2163 struct crypt_config
*cc
= ti
->private;
2170 if (cc
->write_thread
)
2171 kthread_stop(cc
->write_thread
);
2174 destroy_workqueue(cc
->io_queue
);
2175 if (cc
->crypt_queue
)
2176 destroy_workqueue(cc
->crypt_queue
);
2178 crypt_free_tfms(cc
);
2181 bioset_free(cc
->bs
);
2183 mempool_destroy(cc
->page_pool
);
2184 mempool_destroy(cc
->req_pool
);
2185 mempool_destroy(cc
->tag_pool
);
2187 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
2188 cc
->iv_gen_ops
->dtr(cc
);
2191 dm_put_device(ti
, cc
->dev
);
2194 kzfree(cc
->cipher_string
);
2195 kzfree(cc
->key_string
);
2196 kzfree(cc
->cipher_auth
);
2197 kzfree(cc
->authenc_key
);
2199 /* Must zero key material before freeing */
2203 static int crypt_ctr_ivmode(struct dm_target
*ti
, const char *ivmode
)
2205 struct crypt_config
*cc
= ti
->private;
2207 if (crypt_integrity_aead(cc
))
2208 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
2210 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
2213 /* at least a 64 bit sector number should fit in our buffer */
2214 cc
->iv_size
= max(cc
->iv_size
,
2215 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
2217 DMWARN("Selected cipher does not support IVs");
2221 /* Choose ivmode, see comments at iv code. */
2223 cc
->iv_gen_ops
= NULL
;
2224 else if (strcmp(ivmode
, "plain") == 0)
2225 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
2226 else if (strcmp(ivmode
, "plain64") == 0)
2227 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
2228 else if (strcmp(ivmode
, "plain64be") == 0)
2229 cc
->iv_gen_ops
= &crypt_iv_plain64be_ops
;
2230 else if (strcmp(ivmode
, "essiv") == 0)
2231 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
2232 else if (strcmp(ivmode
, "benbi") == 0)
2233 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
2234 else if (strcmp(ivmode
, "null") == 0)
2235 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
2236 else if (strcmp(ivmode
, "lmk") == 0) {
2237 cc
->iv_gen_ops
= &crypt_iv_lmk_ops
;
2239 * Version 2 and 3 is recognised according
2240 * to length of provided multi-key string.
2241 * If present (version 3), last key is used as IV seed.
2242 * All keys (including IV seed) are always the same size.
2244 if (cc
->key_size
% cc
->key_parts
) {
2246 cc
->key_extra_size
= cc
->key_size
/ cc
->key_parts
;
2248 } else if (strcmp(ivmode
, "tcw") == 0) {
2249 cc
->iv_gen_ops
= &crypt_iv_tcw_ops
;
2250 cc
->key_parts
+= 2; /* IV + whitening */
2251 cc
->key_extra_size
= cc
->iv_size
+ TCW_WHITENING_SIZE
;
2252 } else if (strcmp(ivmode
, "random") == 0) {
2253 cc
->iv_gen_ops
= &crypt_iv_random_ops
;
2254 /* Need storage space in integrity fields. */
2255 cc
->integrity_iv_size
= cc
->iv_size
;
2257 ti
->error
= "Invalid IV mode";
2265 * Workaround to parse cipher algorithm from crypto API spec.
2266 * The cc->cipher is currently used only in ESSIV.
2267 * This should be probably done by crypto-api calls (once available...)
2269 static int crypt_ctr_blkdev_cipher(struct crypt_config
*cc
)
2271 const char *alg_name
= NULL
;
2274 if (crypt_integrity_aead(cc
)) {
2275 alg_name
= crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc
)));
2278 if (crypt_integrity_hmac(cc
)) {
2279 alg_name
= strchr(alg_name
, ',');
2285 alg_name
= crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc
)));
2290 start
= strchr(alg_name
, '(');
2291 end
= strchr(alg_name
, ')');
2293 if (!start
&& !end
) {
2294 cc
->cipher
= kstrdup(alg_name
, GFP_KERNEL
);
2295 return cc
->cipher
? 0 : -ENOMEM
;
2298 if (!start
|| !end
|| ++start
>= end
)
2301 cc
->cipher
= kzalloc(end
- start
+ 1, GFP_KERNEL
);
2305 strncpy(cc
->cipher
, start
, end
- start
);
2311 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2312 * The HMAC is needed to calculate tag size (HMAC digest size).
2313 * This should be probably done by crypto-api calls (once available...)
2315 static int crypt_ctr_auth_cipher(struct crypt_config
*cc
, char *cipher_api
)
2317 char *start
, *end
, *mac_alg
= NULL
;
2318 struct crypto_ahash
*mac
;
2320 if (!strstarts(cipher_api
, "authenc("))
2323 start
= strchr(cipher_api
, '(');
2324 end
= strchr(cipher_api
, ',');
2325 if (!start
|| !end
|| ++start
> end
)
2328 mac_alg
= kzalloc(end
- start
+ 1, GFP_KERNEL
);
2331 strncpy(mac_alg
, start
, end
- start
);
2333 mac
= crypto_alloc_ahash(mac_alg
, 0, 0);
2337 return PTR_ERR(mac
);
2339 cc
->key_mac_size
= crypto_ahash_digestsize(mac
);
2340 crypto_free_ahash(mac
);
2342 cc
->authenc_key
= kmalloc(crypt_authenckey_size(cc
), GFP_KERNEL
);
2343 if (!cc
->authenc_key
)
2349 static int crypt_ctr_cipher_new(struct dm_target
*ti
, char *cipher_in
, char *key
,
2350 char **ivmode
, char **ivopts
)
2352 struct crypt_config
*cc
= ti
->private;
2353 char *tmp
, *cipher_api
;
2359 * New format (capi: prefix)
2360 * capi:cipher_api_spec-iv:ivopts
2362 tmp
= &cipher_in
[strlen("capi:")];
2363 cipher_api
= strsep(&tmp
, "-");
2364 *ivmode
= strsep(&tmp
, ":");
2367 if (*ivmode
&& !strcmp(*ivmode
, "lmk"))
2368 cc
->tfms_count
= 64;
2370 cc
->key_parts
= cc
->tfms_count
;
2372 /* Allocate cipher */
2373 ret
= crypt_alloc_tfms(cc
, cipher_api
);
2375 ti
->error
= "Error allocating crypto tfm";
2379 /* Alloc AEAD, can be used only in new format. */
2380 if (crypt_integrity_aead(cc
)) {
2381 ret
= crypt_ctr_auth_cipher(cc
, cipher_api
);
2383 ti
->error
= "Invalid AEAD cipher spec";
2386 cc
->iv_size
= crypto_aead_ivsize(any_tfm_aead(cc
));
2388 cc
->iv_size
= crypto_skcipher_ivsize(any_tfm(cc
));
2390 ret
= crypt_ctr_blkdev_cipher(cc
);
2392 ti
->error
= "Cannot allocate cipher string";
2399 static int crypt_ctr_cipher_old(struct dm_target
*ti
, char *cipher_in
, char *key
,
2400 char **ivmode
, char **ivopts
)
2402 struct crypt_config
*cc
= ti
->private;
2403 char *tmp
, *cipher
, *chainmode
, *keycount
;
2404 char *cipher_api
= NULL
;
2408 if (strchr(cipher_in
, '(') || crypt_integrity_aead(cc
)) {
2409 ti
->error
= "Bad cipher specification";
2414 * Legacy dm-crypt cipher specification
2415 * cipher[:keycount]-mode-iv:ivopts
2418 keycount
= strsep(&tmp
, "-");
2419 cipher
= strsep(&keycount
, ":");
2423 else if (sscanf(keycount
, "%u%c", &cc
->tfms_count
, &dummy
) != 1 ||
2424 !is_power_of_2(cc
->tfms_count
)) {
2425 ti
->error
= "Bad cipher key count specification";
2428 cc
->key_parts
= cc
->tfms_count
;
2430 cc
->cipher
= kstrdup(cipher
, GFP_KERNEL
);
2434 chainmode
= strsep(&tmp
, "-");
2435 *ivopts
= strsep(&tmp
, "-");
2436 *ivmode
= strsep(&*ivopts
, ":");
2439 DMWARN("Ignoring unexpected additional cipher options");
2442 * For compatibility with the original dm-crypt mapping format, if
2443 * only the cipher name is supplied, use cbc-plain.
2445 if (!chainmode
|| (!strcmp(chainmode
, "plain") && !*ivmode
)) {
2450 if (strcmp(chainmode
, "ecb") && !*ivmode
) {
2451 ti
->error
= "IV mechanism required";
2455 cipher_api
= kmalloc(CRYPTO_MAX_ALG_NAME
, GFP_KERNEL
);
2459 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
2460 "%s(%s)", chainmode
, cipher
);
2466 /* Allocate cipher */
2467 ret
= crypt_alloc_tfms(cc
, cipher_api
);
2469 ti
->error
= "Error allocating crypto tfm";
2476 ti
->error
= "Cannot allocate cipher strings";
2480 static int crypt_ctr_cipher(struct dm_target
*ti
, char *cipher_in
, char *key
)
2482 struct crypt_config
*cc
= ti
->private;
2483 char *ivmode
= NULL
, *ivopts
= NULL
;
2486 cc
->cipher_string
= kstrdup(cipher_in
, GFP_KERNEL
);
2487 if (!cc
->cipher_string
) {
2488 ti
->error
= "Cannot allocate cipher strings";
2492 if (strstarts(cipher_in
, "capi:"))
2493 ret
= crypt_ctr_cipher_new(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
2495 ret
= crypt_ctr_cipher_old(ti
, cipher_in
, key
, &ivmode
, &ivopts
);
2500 ret
= crypt_ctr_ivmode(ti
, ivmode
);
2504 /* Initialize and set key */
2505 ret
= crypt_set_key(cc
, key
);
2507 ti
->error
= "Error decoding and setting key";
2512 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
) {
2513 ret
= cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
);
2515 ti
->error
= "Error creating IV";
2520 /* Initialize IV (set keys for ESSIV etc) */
2521 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
) {
2522 ret
= cc
->iv_gen_ops
->init(cc
);
2524 ti
->error
= "Error initialising IV";
2532 static int crypt_ctr_optional(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2534 struct crypt_config
*cc
= ti
->private;
2535 struct dm_arg_set as
;
2536 static struct dm_arg _args
[] = {
2537 {0, 6, "Invalid number of feature args"},
2539 unsigned int opt_params
, val
;
2540 const char *opt_string
, *sval
;
2544 /* Optional parameters */
2548 ret
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
2552 while (opt_params
--) {
2553 opt_string
= dm_shift_arg(&as
);
2555 ti
->error
= "Not enough feature arguments";
2559 if (!strcasecmp(opt_string
, "allow_discards"))
2560 ti
->num_discard_bios
= 1;
2562 else if (!strcasecmp(opt_string
, "same_cpu_crypt"))
2563 set_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
2565 else if (!strcasecmp(opt_string
, "submit_from_crypt_cpus"))
2566 set_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
2567 else if (sscanf(opt_string
, "integrity:%u:", &val
) == 1) {
2568 if (val
== 0 || val
> MAX_TAG_SIZE
) {
2569 ti
->error
= "Invalid integrity arguments";
2572 cc
->on_disk_tag_size
= val
;
2573 sval
= strchr(opt_string
+ strlen("integrity:"), ':') + 1;
2574 if (!strcasecmp(sval
, "aead")) {
2575 set_bit(CRYPT_MODE_INTEGRITY_AEAD
, &cc
->cipher_flags
);
2576 } else if (strcasecmp(sval
, "none")) {
2577 ti
->error
= "Unknown integrity profile";
2581 cc
->cipher_auth
= kstrdup(sval
, GFP_KERNEL
);
2582 if (!cc
->cipher_auth
)
2584 } else if (sscanf(opt_string
, "sector_size:%hu%c", &cc
->sector_size
, &dummy
) == 1) {
2585 if (cc
->sector_size
< (1 << SECTOR_SHIFT
) ||
2586 cc
->sector_size
> 4096 ||
2587 (cc
->sector_size
& (cc
->sector_size
- 1))) {
2588 ti
->error
= "Invalid feature value for sector_size";
2591 cc
->sector_shift
= __ffs(cc
->sector_size
) - SECTOR_SHIFT
;
2592 } else if (!strcasecmp(opt_string
, "iv_large_sectors"))
2593 set_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
2595 ti
->error
= "Invalid feature arguments";
2604 * Construct an encryption mapping:
2605 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
2607 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2609 struct crypt_config
*cc
;
2611 unsigned int align_mask
;
2612 unsigned long long tmpll
;
2614 size_t iv_size_padding
, additional_req_size
;
2618 ti
->error
= "Not enough arguments";
2622 key_size
= get_key_size(&argv
[1]);
2624 ti
->error
= "Cannot parse key size";
2628 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
2630 ti
->error
= "Cannot allocate encryption context";
2633 cc
->key_size
= key_size
;
2634 cc
->sector_size
= (1 << SECTOR_SHIFT
);
2635 cc
->sector_shift
= 0;
2639 /* Optional parameters need to be read before cipher constructor */
2641 ret
= crypt_ctr_optional(ti
, argc
- 5, &argv
[5]);
2646 ret
= crypt_ctr_cipher(ti
, argv
[0], argv
[1]);
2650 if (crypt_integrity_aead(cc
)) {
2651 cc
->dmreq_start
= sizeof(struct aead_request
);
2652 cc
->dmreq_start
+= crypto_aead_reqsize(any_tfm_aead(cc
));
2653 align_mask
= crypto_aead_alignmask(any_tfm_aead(cc
));
2655 cc
->dmreq_start
= sizeof(struct skcipher_request
);
2656 cc
->dmreq_start
+= crypto_skcipher_reqsize(any_tfm(cc
));
2657 align_mask
= crypto_skcipher_alignmask(any_tfm(cc
));
2659 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, __alignof__(struct dm_crypt_request
));
2661 if (align_mask
< CRYPTO_MINALIGN
) {
2662 /* Allocate the padding exactly */
2663 iv_size_padding
= -(cc
->dmreq_start
+ sizeof(struct dm_crypt_request
))
2667 * If the cipher requires greater alignment than kmalloc
2668 * alignment, we don't know the exact position of the
2669 * initialization vector. We must assume worst case.
2671 iv_size_padding
= align_mask
;
2676 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
2677 additional_req_size
= sizeof(struct dm_crypt_request
) +
2678 iv_size_padding
+ cc
->iv_size
+
2681 sizeof(unsigned int);
2683 cc
->req_pool
= mempool_create_kmalloc_pool(MIN_IOS
, cc
->dmreq_start
+ additional_req_size
);
2684 if (!cc
->req_pool
) {
2685 ti
->error
= "Cannot allocate crypt request mempool";
2689 cc
->per_bio_data_size
= ti
->per_io_data_size
=
2690 ALIGN(sizeof(struct dm_crypt_io
) + cc
->dmreq_start
+ additional_req_size
,
2691 ARCH_KMALLOC_MINALIGN
);
2693 cc
->page_pool
= mempool_create_page_pool(BIO_MAX_PAGES
, 0);
2694 if (!cc
->page_pool
) {
2695 ti
->error
= "Cannot allocate page mempool";
2699 cc
->bs
= bioset_create(MIN_IOS
, 0, (BIOSET_NEED_BVECS
|
2700 BIOSET_NEED_RESCUER
));
2702 ti
->error
= "Cannot allocate crypt bioset";
2706 mutex_init(&cc
->bio_alloc_lock
);
2709 if ((sscanf(argv
[2], "%llu%c", &tmpll
, &dummy
) != 1) ||
2710 (tmpll
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1))) {
2711 ti
->error
= "Invalid iv_offset sector";
2714 cc
->iv_offset
= tmpll
;
2716 ret
= dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
);
2718 ti
->error
= "Device lookup failed";
2723 if (sscanf(argv
[4], "%llu%c", &tmpll
, &dummy
) != 1) {
2724 ti
->error
= "Invalid device sector";
2729 if (crypt_integrity_aead(cc
) || cc
->integrity_iv_size
) {
2730 ret
= crypt_integrity_ctr(cc
, ti
);
2734 cc
->tag_pool_max_sectors
= POOL_ENTRY_SIZE
/ cc
->on_disk_tag_size
;
2735 if (!cc
->tag_pool_max_sectors
)
2736 cc
->tag_pool_max_sectors
= 1;
2738 cc
->tag_pool
= mempool_create_kmalloc_pool(MIN_IOS
,
2739 cc
->tag_pool_max_sectors
* cc
->on_disk_tag_size
);
2740 if (!cc
->tag_pool
) {
2741 ti
->error
= "Cannot allocate integrity tags mempool";
2745 cc
->tag_pool_max_sectors
<<= cc
->sector_shift
;
2749 cc
->io_queue
= alloc_workqueue("kcryptd_io", WQ_HIGHPRI
| WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
, 1);
2750 if (!cc
->io_queue
) {
2751 ti
->error
= "Couldn't create kcryptd io queue";
2755 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
2756 cc
->crypt_queue
= alloc_workqueue("kcryptd", WQ_HIGHPRI
| WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
, 1);
2758 cc
->crypt_queue
= alloc_workqueue("kcryptd",
2759 WQ_HIGHPRI
| WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
| WQ_UNBOUND
,
2761 if (!cc
->crypt_queue
) {
2762 ti
->error
= "Couldn't create kcryptd queue";
2766 init_waitqueue_head(&cc
->write_thread_wait
);
2767 cc
->write_tree
= RB_ROOT
;
2769 cc
->write_thread
= kthread_create(dmcrypt_write
, cc
, "dmcrypt_write");
2770 if (IS_ERR(cc
->write_thread
)) {
2771 ret
= PTR_ERR(cc
->write_thread
);
2772 cc
->write_thread
= NULL
;
2773 ti
->error
= "Couldn't spawn write thread";
2776 wake_up_process(cc
->write_thread
);
2778 ti
->num_flush_bios
= 1;
2787 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
)
2789 struct dm_crypt_io
*io
;
2790 struct crypt_config
*cc
= ti
->private;
2793 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
2794 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
2795 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
2797 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
||
2798 bio_op(bio
) == REQ_OP_DISCARD
)) {
2799 bio
->bi_bdev
= cc
->dev
->bdev
;
2800 if (bio_sectors(bio
))
2801 bio
->bi_iter
.bi_sector
= cc
->start
+
2802 dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
2803 return DM_MAPIO_REMAPPED
;
2807 * Check if bio is too large, split as needed.
2809 if (unlikely(bio
->bi_iter
.bi_size
> (BIO_MAX_PAGES
<< PAGE_SHIFT
)) &&
2810 (bio_data_dir(bio
) == WRITE
|| cc
->on_disk_tag_size
))
2811 dm_accept_partial_bio(bio
, ((BIO_MAX_PAGES
<< PAGE_SHIFT
) >> SECTOR_SHIFT
));
2814 * Ensure that bio is a multiple of internal sector encryption size
2815 * and is aligned to this size as defined in IO hints.
2817 if (unlikely((bio
->bi_iter
.bi_sector
& ((cc
->sector_size
>> SECTOR_SHIFT
) - 1)) != 0))
2818 return DM_MAPIO_KILL
;
2820 if (unlikely(bio
->bi_iter
.bi_size
& (cc
->sector_size
- 1)))
2821 return DM_MAPIO_KILL
;
2823 io
= dm_per_bio_data(bio
, cc
->per_bio_data_size
);
2824 crypt_io_init(io
, cc
, bio
, dm_target_offset(ti
, bio
->bi_iter
.bi_sector
));
2826 if (cc
->on_disk_tag_size
) {
2827 unsigned tag_len
= cc
->on_disk_tag_size
* (bio_sectors(bio
) >> cc
->sector_shift
);
2829 if (unlikely(tag_len
> KMALLOC_MAX_SIZE
) ||
2830 unlikely(!(io
->integrity_metadata
= kmalloc(tag_len
,
2831 GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
)))) {
2832 if (bio_sectors(bio
) > cc
->tag_pool_max_sectors
)
2833 dm_accept_partial_bio(bio
, cc
->tag_pool_max_sectors
);
2834 io
->integrity_metadata
= mempool_alloc(cc
->tag_pool
, GFP_NOIO
);
2835 io
->integrity_metadata_from_pool
= true;
2839 if (crypt_integrity_aead(cc
))
2840 io
->ctx
.r
.req_aead
= (struct aead_request
*)(io
+ 1);
2842 io
->ctx
.r
.req
= (struct skcipher_request
*)(io
+ 1);
2844 if (bio_data_dir(io
->base_bio
) == READ
) {
2845 if (kcryptd_io_read(io
, GFP_NOWAIT
))
2846 kcryptd_queue_read(io
);
2848 kcryptd_queue_crypt(io
);
2850 return DM_MAPIO_SUBMITTED
;
2853 static void crypt_status(struct dm_target
*ti
, status_type_t type
,
2854 unsigned status_flags
, char *result
, unsigned maxlen
)
2856 struct crypt_config
*cc
= ti
->private;
2858 int num_feature_args
= 0;
2861 case STATUSTYPE_INFO
:
2865 case STATUSTYPE_TABLE
:
2866 DMEMIT("%s ", cc
->cipher_string
);
2868 if (cc
->key_size
> 0) {
2870 DMEMIT(":%u:%s", cc
->key_size
, cc
->key_string
);
2872 for (i
= 0; i
< cc
->key_size
; i
++)
2873 DMEMIT("%02x", cc
->key
[i
]);
2877 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
2878 cc
->dev
->name
, (unsigned long long)cc
->start
);
2880 num_feature_args
+= !!ti
->num_discard_bios
;
2881 num_feature_args
+= test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
2882 num_feature_args
+= test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
);
2883 num_feature_args
+= cc
->sector_size
!= (1 << SECTOR_SHIFT
);
2884 num_feature_args
+= test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
);
2885 if (cc
->on_disk_tag_size
)
2887 if (num_feature_args
) {
2888 DMEMIT(" %d", num_feature_args
);
2889 if (ti
->num_discard_bios
)
2890 DMEMIT(" allow_discards");
2891 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
2892 DMEMIT(" same_cpu_crypt");
2893 if (test_bit(DM_CRYPT_NO_OFFLOAD
, &cc
->flags
))
2894 DMEMIT(" submit_from_crypt_cpus");
2895 if (cc
->on_disk_tag_size
)
2896 DMEMIT(" integrity:%u:%s", cc
->on_disk_tag_size
, cc
->cipher_auth
);
2897 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
))
2898 DMEMIT(" sector_size:%d", cc
->sector_size
);
2899 if (test_bit(CRYPT_IV_LARGE_SECTORS
, &cc
->cipher_flags
))
2900 DMEMIT(" iv_large_sectors");
2907 static void crypt_postsuspend(struct dm_target
*ti
)
2909 struct crypt_config
*cc
= ti
->private;
2911 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
2914 static int crypt_preresume(struct dm_target
*ti
)
2916 struct crypt_config
*cc
= ti
->private;
2918 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
2919 DMERR("aborting resume - crypt key is not set.");
2926 static void crypt_resume(struct dm_target
*ti
)
2928 struct crypt_config
*cc
= ti
->private;
2930 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
2933 /* Message interface
2937 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
2939 struct crypt_config
*cc
= ti
->private;
2940 int key_size
, ret
= -EINVAL
;
2945 if (!strcasecmp(argv
[0], "key")) {
2946 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
2947 DMWARN("not suspended during key manipulation.");
2950 if (argc
== 3 && !strcasecmp(argv
[1], "set")) {
2951 /* The key size may not be changed. */
2952 key_size
= get_key_size(&argv
[2]);
2953 if (key_size
< 0 || cc
->key_size
!= key_size
) {
2954 memset(argv
[2], '0', strlen(argv
[2]));
2958 ret
= crypt_set_key(cc
, argv
[2]);
2961 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
2962 ret
= cc
->iv_gen_ops
->init(cc
);
2965 if (argc
== 2 && !strcasecmp(argv
[1], "wipe")) {
2966 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
2967 ret
= cc
->iv_gen_ops
->wipe(cc
);
2971 return crypt_wipe_key(cc
);
2976 DMWARN("unrecognised message received.");
2980 static int crypt_iterate_devices(struct dm_target
*ti
,
2981 iterate_devices_callout_fn fn
, void *data
)
2983 struct crypt_config
*cc
= ti
->private;
2985 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
2988 static void crypt_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
2990 struct crypt_config
*cc
= ti
->private;
2993 * Unfortunate constraint that is required to avoid the potential
2994 * for exceeding underlying device's max_segments limits -- due to
2995 * crypt_alloc_buffer() possibly allocating pages for the encryption
2996 * bio that are not as physically contiguous as the original bio.
2998 limits
->max_segment_size
= PAGE_SIZE
;
3000 if (cc
->sector_size
!= (1 << SECTOR_SHIFT
)) {
3001 limits
->logical_block_size
= cc
->sector_size
;
3002 limits
->physical_block_size
= cc
->sector_size
;
3003 blk_limits_io_min(limits
, cc
->sector_size
);
3007 static struct target_type crypt_target
= {
3009 .version
= {1, 18, 0},
3010 .module
= THIS_MODULE
,
3014 .status
= crypt_status
,
3015 .postsuspend
= crypt_postsuspend
,
3016 .preresume
= crypt_preresume
,
3017 .resume
= crypt_resume
,
3018 .message
= crypt_message
,
3019 .iterate_devices
= crypt_iterate_devices
,
3020 .io_hints
= crypt_io_hints
,
3023 static int __init
dm_crypt_init(void)
3027 r
= dm_register_target(&crypt_target
);
3029 DMERR("register failed %d", r
);
3034 static void __exit
dm_crypt_exit(void)
3036 dm_unregister_target(&crypt_target
);
3039 module_init(dm_crypt_init
);
3040 module_exit(dm_crypt_exit
);
3042 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3043 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
3044 MODULE_LICENSE("GPL");