]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/md/dm-crypt.c
dm crypt: add 'submit_from_crypt_cpus' option
[mirror_ubuntu-zesty-kernel.git] / drivers / md / dm-crypt.c
CommitLineData
1da177e4 1/*
bf14299f 2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
1da177e4 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
542da317 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
ed04d981 5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
1da177e4
LT
6 *
7 * This file is released under the GPL.
8 */
9
43d69034 10#include <linux/completion.h>
d1806f6a 11#include <linux/err.h>
1da177e4
LT
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
dc267621 21#include <linux/kthread.h>
3fcfab16 22#include <linux/backing-dev.h>
60063497 23#include <linux/atomic.h>
378f058c 24#include <linux/scatterlist.h>
1da177e4 25#include <asm/page.h>
48527fa7 26#include <asm/unaligned.h>
34745785
MB
27#include <crypto/hash.h>
28#include <crypto/md5.h>
29#include <crypto/algapi.h>
1da177e4 30
586e80e6 31#include <linux/device-mapper.h>
1da177e4 32
72d94861 33#define DM_MSG_PREFIX "crypt"
1da177e4 34
1da177e4
LT
35/*
36 * context holding the current state of a multi-part conversion
37 */
38struct convert_context {
43d69034 39 struct completion restart;
1da177e4
LT
40 struct bio *bio_in;
41 struct bio *bio_out;
003b5c57
KO
42 struct bvec_iter iter_in;
43 struct bvec_iter iter_out;
c66029f4 44 sector_t cc_sector;
40b6229b 45 atomic_t cc_pending;
610f2de3 46 struct ablkcipher_request *req;
1da177e4
LT
47};
48
53017030
MB
49/*
50 * per bio private data
51 */
52struct dm_crypt_io {
49a8a920 53 struct crypt_config *cc;
53017030
MB
54 struct bio *base_bio;
55 struct work_struct work;
56
57 struct convert_context ctx;
58
40b6229b 59 atomic_t io_pending;
53017030 60 int error;
0c395b0f 61 sector_t sector;
dc267621
MP
62
63 struct list_head list;
298a9fa0 64} CRYPTO_MINALIGN_ATTR;
53017030 65
01482b76 66struct dm_crypt_request {
b2174eeb 67 struct convert_context *ctx;
01482b76
MB
68 struct scatterlist sg_in;
69 struct scatterlist sg_out;
2dc5327d 70 sector_t iv_sector;
01482b76
MB
71};
72
1da177e4
LT
73struct crypt_config;
74
75struct crypt_iv_operations {
76 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
d469f841 77 const char *opts);
1da177e4 78 void (*dtr)(struct crypt_config *cc);
b95bf2d3 79 int (*init)(struct crypt_config *cc);
542da317 80 int (*wipe)(struct crypt_config *cc);
2dc5327d
MB
81 int (*generator)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
83 int (*post)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
1da177e4
LT
85};
86
60473592 87struct iv_essiv_private {
b95bf2d3
MB
88 struct crypto_hash *hash_tfm;
89 u8 *salt;
60473592
MB
90};
91
92struct iv_benbi_private {
93 int shift;
94};
95
34745785
MB
96#define LMK_SEED_SIZE 64 /* hash + 0 */
97struct iv_lmk_private {
98 struct crypto_shash *hash_tfm;
99 u8 *seed;
100};
101
ed04d981
MB
102#define TCW_WHITENING_SIZE 16
103struct iv_tcw_private {
104 struct crypto_shash *crc32_tfm;
105 u8 *iv_seed;
106 u8 *whitening;
107};
108
1da177e4
LT
109/*
110 * Crypt: maps a linear range of a block device
111 * and encrypts / decrypts at the same time.
112 */
0f5d8e6e
MP
113enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
114 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
c0297721
AK
115
116/*
610f2de3 117 * The fields in here must be read only after initialization.
c0297721 118 */
1da177e4
LT
119struct crypt_config {
120 struct dm_dev *dev;
121 sector_t start;
122
123 /*
ddd42edf
MB
124 * pool for per bio private data, crypto requests and
125 * encryption requeusts/buffer pages
1da177e4 126 */
ddd42edf 127 mempool_t *req_pool;
1da177e4 128 mempool_t *page_pool;
6a24c718 129 struct bio_set *bs;
7145c241 130 struct mutex bio_alloc_lock;
1da177e4 131
cabf08e4
MB
132 struct workqueue_struct *io_queue;
133 struct workqueue_struct *crypt_queue;
3f1e9070 134
dc267621
MP
135 struct task_struct *write_thread;
136 wait_queue_head_t write_thread_wait;
137 struct list_head write_thread_list;
138
5ebaee6d 139 char *cipher;
7dbcd137 140 char *cipher_string;
5ebaee6d 141
1da177e4 142 struct crypt_iv_operations *iv_gen_ops;
79066ad3 143 union {
60473592
MB
144 struct iv_essiv_private essiv;
145 struct iv_benbi_private benbi;
34745785 146 struct iv_lmk_private lmk;
ed04d981 147 struct iv_tcw_private tcw;
79066ad3 148 } iv_gen_private;
1da177e4
LT
149 sector_t iv_offset;
150 unsigned int iv_size;
151
fd2d231f
MP
152 /* ESSIV: struct crypto_cipher *essiv_tfm */
153 void *iv_private;
154 struct crypto_ablkcipher **tfms;
d1f96423 155 unsigned tfms_count;
c0297721 156
ddd42edf
MB
157 /*
158 * Layout of each crypto request:
159 *
160 * struct ablkcipher_request
161 * context
162 * padding
163 * struct dm_crypt_request
164 * padding
165 * IV
166 *
167 * The padding is added so that dm_crypt_request and the IV are
168 * correctly aligned.
169 */
170 unsigned int dmreq_start;
ddd42edf 171
298a9fa0
MP
172 unsigned int per_bio_data_size;
173
e48d4bbf 174 unsigned long flags;
1da177e4 175 unsigned int key_size;
da31a078
MB
176 unsigned int key_parts; /* independent parts in key buffer */
177 unsigned int key_extra_size; /* additional keys length */
1da177e4
LT
178 u8 key[0];
179};
180
6a24c718 181#define MIN_IOS 16
1da177e4 182
028867ac 183static void clone_init(struct dm_crypt_io *, struct bio *);
395b167c 184static void kcryptd_queue_crypt(struct dm_crypt_io *io);
2dc5327d 185static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
027581f3 186
c0297721
AK
187/*
188 * Use this to access cipher attributes that are the same for each CPU.
189 */
190static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
191{
fd2d231f 192 return cc->tfms[0];
c0297721
AK
193}
194
1da177e4
LT
195/*
196 * Different IV generation algorithms:
197 *
3c164bd8 198 * plain: the initial vector is the 32-bit little-endian version of the sector
3a4fa0a2 199 * number, padded with zeros if necessary.
1da177e4 200 *
61afef61
MB
201 * plain64: the initial vector is the 64-bit little-endian version of the sector
202 * number, padded with zeros if necessary.
203 *
3c164bd8
RS
204 * essiv: "encrypted sector|salt initial vector", the sector number is
205 * encrypted with the bulk cipher using a salt as key. The salt
206 * should be derived from the bulk cipher's key via hashing.
1da177e4 207 *
48527fa7
RS
208 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
209 * (needed for LRW-32-AES and possible other narrow block modes)
210 *
46b47730
LN
211 * null: the initial vector is always zero. Provides compatibility with
212 * obsolete loop_fish2 devices. Do not use for new devices.
213 *
34745785
MB
214 * lmk: Compatible implementation of the block chaining mode used
215 * by the Loop-AES block device encryption system
216 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
217 * It operates on full 512 byte sectors and uses CBC
218 * with an IV derived from the sector number, the data and
219 * optionally extra IV seed.
220 * This means that after decryption the first block
221 * of sector must be tweaked according to decrypted data.
222 * Loop-AES can use three encryption schemes:
223 * version 1: is plain aes-cbc mode
224 * version 2: uses 64 multikey scheme with lmk IV generator
225 * version 3: the same as version 2 with additional IV seed
226 * (it uses 65 keys, last key is used as IV seed)
227 *
ed04d981
MB
228 * tcw: Compatible implementation of the block chaining mode used
229 * by the TrueCrypt device encryption system (prior to version 4.1).
230 * For more info see: http://www.truecrypt.org
231 * It operates on full 512 byte sectors and uses CBC
232 * with an IV derived from initial key and the sector number.
233 * In addition, whitening value is applied on every sector, whitening
234 * is calculated from initial key, sector number and mixed using CRC32.
235 * Note that this encryption scheme is vulnerable to watermarking attacks
236 * and should be used for old compatible containers access only.
237 *
1da177e4
LT
238 * plumb: unimplemented, see:
239 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
240 */
241
2dc5327d
MB
242static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
243 struct dm_crypt_request *dmreq)
1da177e4
LT
244{
245 memset(iv, 0, cc->iv_size);
283a8328 246 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
1da177e4
LT
247
248 return 0;
249}
250
61afef61 251static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
2dc5327d 252 struct dm_crypt_request *dmreq)
61afef61
MB
253{
254 memset(iv, 0, cc->iv_size);
283a8328 255 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
61afef61
MB
256
257 return 0;
258}
259
b95bf2d3
MB
260/* Initialise ESSIV - compute salt but no local memory allocations */
261static int crypt_iv_essiv_init(struct crypt_config *cc)
262{
263 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
264 struct hash_desc desc;
265 struct scatterlist sg;
c0297721 266 struct crypto_cipher *essiv_tfm;
fd2d231f 267 int err;
b95bf2d3
MB
268
269 sg_init_one(&sg, cc->key, cc->key_size);
270 desc.tfm = essiv->hash_tfm;
271 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
272
273 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
274 if (err)
275 return err;
276
fd2d231f 277 essiv_tfm = cc->iv_private;
c0297721 278
fd2d231f
MP
279 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
280 crypto_hash_digestsize(essiv->hash_tfm));
281 if (err)
282 return err;
c0297721
AK
283
284 return 0;
b95bf2d3
MB
285}
286
542da317
MB
287/* Wipe salt and reset key derived from volume key */
288static int crypt_iv_essiv_wipe(struct crypt_config *cc)
289{
290 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
291 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
c0297721 292 struct crypto_cipher *essiv_tfm;
fd2d231f 293 int r, err = 0;
542da317
MB
294
295 memset(essiv->salt, 0, salt_size);
296
fd2d231f
MP
297 essiv_tfm = cc->iv_private;
298 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
299 if (r)
300 err = r;
c0297721
AK
301
302 return err;
303}
304
305/* Set up per cpu cipher state */
306static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
307 struct dm_target *ti,
308 u8 *salt, unsigned saltsize)
309{
310 struct crypto_cipher *essiv_tfm;
311 int err;
312
313 /* Setup the essiv_tfm with the given salt */
314 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
315 if (IS_ERR(essiv_tfm)) {
316 ti->error = "Error allocating crypto tfm for ESSIV";
317 return essiv_tfm;
318 }
319
320 if (crypto_cipher_blocksize(essiv_tfm) !=
321 crypto_ablkcipher_ivsize(any_tfm(cc))) {
322 ti->error = "Block size of ESSIV cipher does "
323 "not match IV size of block cipher";
324 crypto_free_cipher(essiv_tfm);
325 return ERR_PTR(-EINVAL);
326 }
327
328 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
329 if (err) {
330 ti->error = "Failed to set key for ESSIV cipher";
331 crypto_free_cipher(essiv_tfm);
332 return ERR_PTR(err);
333 }
334
335 return essiv_tfm;
542da317
MB
336}
337
60473592
MB
338static void crypt_iv_essiv_dtr(struct crypt_config *cc)
339{
c0297721 340 struct crypto_cipher *essiv_tfm;
60473592
MB
341 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
342
b95bf2d3
MB
343 crypto_free_hash(essiv->hash_tfm);
344 essiv->hash_tfm = NULL;
345
346 kzfree(essiv->salt);
347 essiv->salt = NULL;
c0297721 348
fd2d231f 349 essiv_tfm = cc->iv_private;
c0297721 350
fd2d231f
MP
351 if (essiv_tfm)
352 crypto_free_cipher(essiv_tfm);
c0297721 353
fd2d231f 354 cc->iv_private = NULL;
60473592
MB
355}
356
1da177e4 357static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
d469f841 358 const char *opts)
1da177e4 359{
5861f1be
MB
360 struct crypto_cipher *essiv_tfm = NULL;
361 struct crypto_hash *hash_tfm = NULL;
5861f1be 362 u8 *salt = NULL;
fd2d231f 363 int err;
1da177e4 364
5861f1be 365 if (!opts) {
72d94861 366 ti->error = "Digest algorithm missing for ESSIV mode";
1da177e4
LT
367 return -EINVAL;
368 }
369
b95bf2d3 370 /* Allocate hash algorithm */
35058687
HX
371 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
372 if (IS_ERR(hash_tfm)) {
72d94861 373 ti->error = "Error initializing ESSIV hash";
5861f1be
MB
374 err = PTR_ERR(hash_tfm);
375 goto bad;
1da177e4
LT
376 }
377
b95bf2d3 378 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
5861f1be 379 if (!salt) {
72d94861 380 ti->error = "Error kmallocing salt storage in ESSIV";
5861f1be
MB
381 err = -ENOMEM;
382 goto bad;
1da177e4
LT
383 }
384
b95bf2d3 385 cc->iv_gen_private.essiv.salt = salt;
b95bf2d3
MB
386 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
387
fd2d231f
MP
388 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
389 crypto_hash_digestsize(hash_tfm));
390 if (IS_ERR(essiv_tfm)) {
391 crypt_iv_essiv_dtr(cc);
392 return PTR_ERR(essiv_tfm);
c0297721 393 }
fd2d231f 394 cc->iv_private = essiv_tfm;
c0297721 395
1da177e4 396 return 0;
5861f1be
MB
397
398bad:
5861f1be
MB
399 if (hash_tfm && !IS_ERR(hash_tfm))
400 crypto_free_hash(hash_tfm);
b95bf2d3 401 kfree(salt);
5861f1be 402 return err;
1da177e4
LT
403}
404
2dc5327d
MB
405static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
406 struct dm_crypt_request *dmreq)
1da177e4 407{
fd2d231f 408 struct crypto_cipher *essiv_tfm = cc->iv_private;
c0297721 409
1da177e4 410 memset(iv, 0, cc->iv_size);
283a8328 411 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
c0297721
AK
412 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
413
1da177e4
LT
414 return 0;
415}
416
48527fa7
RS
417static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
418 const char *opts)
419{
c0297721 420 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
f0d1b0b3 421 int log = ilog2(bs);
48527fa7
RS
422
423 /* we need to calculate how far we must shift the sector count
424 * to get the cipher block count, we use this shift in _gen */
425
426 if (1 << log != bs) {
427 ti->error = "cypher blocksize is not a power of 2";
428 return -EINVAL;
429 }
430
431 if (log > 9) {
432 ti->error = "cypher blocksize is > 512";
433 return -EINVAL;
434 }
435
60473592 436 cc->iv_gen_private.benbi.shift = 9 - log;
48527fa7
RS
437
438 return 0;
439}
440
441static void crypt_iv_benbi_dtr(struct crypt_config *cc)
442{
48527fa7
RS
443}
444
2dc5327d
MB
445static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
446 struct dm_crypt_request *dmreq)
48527fa7 447{
79066ad3
HX
448 __be64 val;
449
48527fa7 450 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
79066ad3 451
2dc5327d 452 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
79066ad3 453 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
48527fa7 454
1da177e4
LT
455 return 0;
456}
457
2dc5327d
MB
458static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
459 struct dm_crypt_request *dmreq)
46b47730
LN
460{
461 memset(iv, 0, cc->iv_size);
462
463 return 0;
464}
465
34745785
MB
466static void crypt_iv_lmk_dtr(struct crypt_config *cc)
467{
468 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
469
470 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
471 crypto_free_shash(lmk->hash_tfm);
472 lmk->hash_tfm = NULL;
473
474 kzfree(lmk->seed);
475 lmk->seed = NULL;
476}
477
478static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
479 const char *opts)
480{
481 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
482
483 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
484 if (IS_ERR(lmk->hash_tfm)) {
485 ti->error = "Error initializing LMK hash";
486 return PTR_ERR(lmk->hash_tfm);
487 }
488
489 /* No seed in LMK version 2 */
490 if (cc->key_parts == cc->tfms_count) {
491 lmk->seed = NULL;
492 return 0;
493 }
494
495 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
496 if (!lmk->seed) {
497 crypt_iv_lmk_dtr(cc);
498 ti->error = "Error kmallocing seed storage in LMK";
499 return -ENOMEM;
500 }
501
502 return 0;
503}
504
505static int crypt_iv_lmk_init(struct crypt_config *cc)
506{
507 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
508 int subkey_size = cc->key_size / cc->key_parts;
509
510 /* LMK seed is on the position of LMK_KEYS + 1 key */
511 if (lmk->seed)
512 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
513 crypto_shash_digestsize(lmk->hash_tfm));
514
515 return 0;
516}
517
518static int crypt_iv_lmk_wipe(struct crypt_config *cc)
519{
520 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
521
522 if (lmk->seed)
523 memset(lmk->seed, 0, LMK_SEED_SIZE);
524
525 return 0;
526}
527
528static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
529 struct dm_crypt_request *dmreq,
530 u8 *data)
531{
532 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
b6106265 533 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
34745785 534 struct md5_state md5state;
da31a078 535 __le32 buf[4];
34745785
MB
536 int i, r;
537
b6106265
JSM
538 desc->tfm = lmk->hash_tfm;
539 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
34745785 540
b6106265 541 r = crypto_shash_init(desc);
34745785
MB
542 if (r)
543 return r;
544
545 if (lmk->seed) {
b6106265 546 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
34745785
MB
547 if (r)
548 return r;
549 }
550
551 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
b6106265 552 r = crypto_shash_update(desc, data + 16, 16 * 31);
34745785
MB
553 if (r)
554 return r;
555
556 /* Sector is cropped to 56 bits here */
557 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
558 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
559 buf[2] = cpu_to_le32(4024);
560 buf[3] = 0;
b6106265 561 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
34745785
MB
562 if (r)
563 return r;
564
565 /* No MD5 padding here */
b6106265 566 r = crypto_shash_export(desc, &md5state);
34745785
MB
567 if (r)
568 return r;
569
570 for (i = 0; i < MD5_HASH_WORDS; i++)
571 __cpu_to_le32s(&md5state.hash[i]);
572 memcpy(iv, &md5state.hash, cc->iv_size);
573
574 return 0;
575}
576
577static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
578 struct dm_crypt_request *dmreq)
579{
580 u8 *src;
581 int r = 0;
582
583 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
c2e022cb 584 src = kmap_atomic(sg_page(&dmreq->sg_in));
34745785 585 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
c2e022cb 586 kunmap_atomic(src);
34745785
MB
587 } else
588 memset(iv, 0, cc->iv_size);
589
590 return r;
591}
592
593static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
594 struct dm_crypt_request *dmreq)
595{
596 u8 *dst;
597 int r;
598
599 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
600 return 0;
601
c2e022cb 602 dst = kmap_atomic(sg_page(&dmreq->sg_out));
34745785
MB
603 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
604
605 /* Tweak the first block of plaintext sector */
606 if (!r)
607 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
608
c2e022cb 609 kunmap_atomic(dst);
34745785
MB
610 return r;
611}
612
ed04d981
MB
613static void crypt_iv_tcw_dtr(struct crypt_config *cc)
614{
615 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
616
617 kzfree(tcw->iv_seed);
618 tcw->iv_seed = NULL;
619 kzfree(tcw->whitening);
620 tcw->whitening = NULL;
621
622 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
623 crypto_free_shash(tcw->crc32_tfm);
624 tcw->crc32_tfm = NULL;
625}
626
627static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
628 const char *opts)
629{
630 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
631
632 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
633 ti->error = "Wrong key size for TCW";
634 return -EINVAL;
635 }
636
637 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
638 if (IS_ERR(tcw->crc32_tfm)) {
639 ti->error = "Error initializing CRC32 in TCW";
640 return PTR_ERR(tcw->crc32_tfm);
641 }
642
643 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
644 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
645 if (!tcw->iv_seed || !tcw->whitening) {
646 crypt_iv_tcw_dtr(cc);
647 ti->error = "Error allocating seed storage in TCW";
648 return -ENOMEM;
649 }
650
651 return 0;
652}
653
654static int crypt_iv_tcw_init(struct crypt_config *cc)
655{
656 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
657 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
658
659 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
660 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
661 TCW_WHITENING_SIZE);
662
663 return 0;
664}
665
666static int crypt_iv_tcw_wipe(struct crypt_config *cc)
667{
668 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
669
670 memset(tcw->iv_seed, 0, cc->iv_size);
671 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
672
673 return 0;
674}
675
676static int crypt_iv_tcw_whitening(struct crypt_config *cc,
677 struct dm_crypt_request *dmreq,
678 u8 *data)
679{
680 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
681 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
682 u8 buf[TCW_WHITENING_SIZE];
b6106265 683 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
ed04d981
MB
684 int i, r;
685
686 /* xor whitening with sector number */
687 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
688 crypto_xor(buf, (u8 *)&sector, 8);
689 crypto_xor(&buf[8], (u8 *)&sector, 8);
690
691 /* calculate crc32 for every 32bit part and xor it */
b6106265
JSM
692 desc->tfm = tcw->crc32_tfm;
693 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ed04d981 694 for (i = 0; i < 4; i++) {
b6106265 695 r = crypto_shash_init(desc);
ed04d981
MB
696 if (r)
697 goto out;
b6106265 698 r = crypto_shash_update(desc, &buf[i * 4], 4);
ed04d981
MB
699 if (r)
700 goto out;
b6106265 701 r = crypto_shash_final(desc, &buf[i * 4]);
ed04d981
MB
702 if (r)
703 goto out;
704 }
705 crypto_xor(&buf[0], &buf[12], 4);
706 crypto_xor(&buf[4], &buf[8], 4);
707
708 /* apply whitening (8 bytes) to whole sector */
709 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
710 crypto_xor(data + i * 8, buf, 8);
711out:
1a71d6ff 712 memzero_explicit(buf, sizeof(buf));
ed04d981
MB
713 return r;
714}
715
716static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
717 struct dm_crypt_request *dmreq)
718{
719 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
720 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
721 u8 *src;
722 int r = 0;
723
724 /* Remove whitening from ciphertext */
725 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
726 src = kmap_atomic(sg_page(&dmreq->sg_in));
727 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
728 kunmap_atomic(src);
729 }
730
731 /* Calculate IV */
732 memcpy(iv, tcw->iv_seed, cc->iv_size);
733 crypto_xor(iv, (u8 *)&sector, 8);
734 if (cc->iv_size > 8)
735 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
736
737 return r;
738}
739
740static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
741 struct dm_crypt_request *dmreq)
742{
743 u8 *dst;
744 int r;
745
746 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
747 return 0;
748
749 /* Apply whitening on ciphertext */
750 dst = kmap_atomic(sg_page(&dmreq->sg_out));
751 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
752 kunmap_atomic(dst);
753
754 return r;
755}
756
1da177e4
LT
757static struct crypt_iv_operations crypt_iv_plain_ops = {
758 .generator = crypt_iv_plain_gen
759};
760
61afef61
MB
761static struct crypt_iv_operations crypt_iv_plain64_ops = {
762 .generator = crypt_iv_plain64_gen
763};
764
1da177e4
LT
765static struct crypt_iv_operations crypt_iv_essiv_ops = {
766 .ctr = crypt_iv_essiv_ctr,
767 .dtr = crypt_iv_essiv_dtr,
b95bf2d3 768 .init = crypt_iv_essiv_init,
542da317 769 .wipe = crypt_iv_essiv_wipe,
1da177e4
LT
770 .generator = crypt_iv_essiv_gen
771};
772
48527fa7
RS
773static struct crypt_iv_operations crypt_iv_benbi_ops = {
774 .ctr = crypt_iv_benbi_ctr,
775 .dtr = crypt_iv_benbi_dtr,
776 .generator = crypt_iv_benbi_gen
777};
1da177e4 778
46b47730
LN
779static struct crypt_iv_operations crypt_iv_null_ops = {
780 .generator = crypt_iv_null_gen
781};
782
34745785
MB
783static struct crypt_iv_operations crypt_iv_lmk_ops = {
784 .ctr = crypt_iv_lmk_ctr,
785 .dtr = crypt_iv_lmk_dtr,
786 .init = crypt_iv_lmk_init,
787 .wipe = crypt_iv_lmk_wipe,
788 .generator = crypt_iv_lmk_gen,
789 .post = crypt_iv_lmk_post
790};
791
ed04d981
MB
792static struct crypt_iv_operations crypt_iv_tcw_ops = {
793 .ctr = crypt_iv_tcw_ctr,
794 .dtr = crypt_iv_tcw_dtr,
795 .init = crypt_iv_tcw_init,
796 .wipe = crypt_iv_tcw_wipe,
797 .generator = crypt_iv_tcw_gen,
798 .post = crypt_iv_tcw_post
799};
800
d469f841
MB
801static void crypt_convert_init(struct crypt_config *cc,
802 struct convert_context *ctx,
803 struct bio *bio_out, struct bio *bio_in,
fcd369da 804 sector_t sector)
1da177e4
LT
805{
806 ctx->bio_in = bio_in;
807 ctx->bio_out = bio_out;
003b5c57
KO
808 if (bio_in)
809 ctx->iter_in = bio_in->bi_iter;
810 if (bio_out)
811 ctx->iter_out = bio_out->bi_iter;
c66029f4 812 ctx->cc_sector = sector + cc->iv_offset;
43d69034 813 init_completion(&ctx->restart);
1da177e4
LT
814}
815
b2174eeb
HY
816static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
817 struct ablkcipher_request *req)
818{
819 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
820}
821
822static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
823 struct dm_crypt_request *dmreq)
824{
825 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
826}
827
2dc5327d
MB
828static u8 *iv_of_dmreq(struct crypt_config *cc,
829 struct dm_crypt_request *dmreq)
830{
831 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
832 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
833}
834
01482b76 835static int crypt_convert_block(struct crypt_config *cc,
3a7f6c99
MB
836 struct convert_context *ctx,
837 struct ablkcipher_request *req)
01482b76 838{
003b5c57
KO
839 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
840 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
3a7f6c99
MB
841 struct dm_crypt_request *dmreq;
842 u8 *iv;
40b6229b 843 int r;
3a7f6c99 844
b2174eeb 845 dmreq = dmreq_of_req(cc, req);
2dc5327d 846 iv = iv_of_dmreq(cc, dmreq);
01482b76 847
c66029f4 848 dmreq->iv_sector = ctx->cc_sector;
b2174eeb 849 dmreq->ctx = ctx;
3a7f6c99 850 sg_init_table(&dmreq->sg_in, 1);
003b5c57
KO
851 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
852 bv_in.bv_offset);
01482b76 853
3a7f6c99 854 sg_init_table(&dmreq->sg_out, 1);
003b5c57
KO
855 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
856 bv_out.bv_offset);
01482b76 857
003b5c57
KO
858 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
859 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
01482b76 860
3a7f6c99 861 if (cc->iv_gen_ops) {
2dc5327d 862 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
3a7f6c99
MB
863 if (r < 0)
864 return r;
865 }
866
867 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
868 1 << SECTOR_SHIFT, iv);
869
870 if (bio_data_dir(ctx->bio_in) == WRITE)
871 r = crypto_ablkcipher_encrypt(req);
872 else
873 r = crypto_ablkcipher_decrypt(req);
874
2dc5327d
MB
875 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
876 r = cc->iv_gen_ops->post(cc, iv, dmreq);
877
3a7f6c99 878 return r;
01482b76
MB
879}
880
95497a96
MB
881static void kcryptd_async_done(struct crypto_async_request *async_req,
882 int error);
c0297721 883
ddd42edf
MB
884static void crypt_alloc_req(struct crypt_config *cc,
885 struct convert_context *ctx)
886{
c66029f4 887 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
c0297721 888
610f2de3
MP
889 if (!ctx->req)
890 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
c0297721 891
610f2de3
MP
892 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
893 ablkcipher_request_set_callback(ctx->req,
c0297721 894 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
610f2de3 895 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
ddd42edf
MB
896}
897
298a9fa0
MP
898static void crypt_free_req(struct crypt_config *cc,
899 struct ablkcipher_request *req, struct bio *base_bio)
900{
901 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
902
903 if ((struct ablkcipher_request *)(io + 1) != req)
904 mempool_free(req, cc->req_pool);
905}
906
1da177e4
LT
907/*
908 * Encrypt / decrypt data from one bio to another one (can be the same one)
909 */
910static int crypt_convert(struct crypt_config *cc,
d469f841 911 struct convert_context *ctx)
1da177e4 912{
3f1e9070 913 int r;
1da177e4 914
40b6229b 915 atomic_set(&ctx->cc_pending, 1);
c8081618 916
003b5c57 917 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1da177e4 918
3a7f6c99
MB
919 crypt_alloc_req(cc, ctx);
920
40b6229b 921 atomic_inc(&ctx->cc_pending);
3f1e9070 922
610f2de3 923 r = crypt_convert_block(cc, ctx, ctx->req);
3a7f6c99
MB
924
925 switch (r) {
3f1e9070 926 /* async */
3a7f6c99
MB
927 case -EBUSY:
928 wait_for_completion(&ctx->restart);
16735d02 929 reinit_completion(&ctx->restart);
3a7f6c99
MB
930 /* fall through*/
931 case -EINPROGRESS:
610f2de3 932 ctx->req = NULL;
c66029f4 933 ctx->cc_sector++;
3f1e9070
MB
934 continue;
935
936 /* sync */
3a7f6c99 937 case 0:
40b6229b 938 atomic_dec(&ctx->cc_pending);
c66029f4 939 ctx->cc_sector++;
c7f1b204 940 cond_resched();
3a7f6c99 941 continue;
3a7f6c99 942
3f1e9070
MB
943 /* error */
944 default:
40b6229b 945 atomic_dec(&ctx->cc_pending);
3f1e9070
MB
946 return r;
947 }
1da177e4
LT
948 }
949
3f1e9070 950 return 0;
1da177e4
LT
951}
952
cf2f1abf
MP
953static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
954
1da177e4
LT
955/*
956 * Generate a new unfragmented bio with the given size
957 * This should never violate the device limitations
7145c241
MP
958 *
959 * This function may be called concurrently. If we allocate from the mempool
960 * concurrently, there is a possibility of deadlock. For example, if we have
961 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
962 * the mempool concurrently, it may deadlock in a situation where both processes
963 * have allocated 128 pages and the mempool is exhausted.
964 *
965 * In order to avoid this scenario we allocate the pages under a mutex.
966 *
967 * In order to not degrade performance with excessive locking, we try
968 * non-blocking allocations without a mutex first but on failure we fallback
969 * to blocking allocations with a mutex.
1da177e4 970 */
cf2f1abf 971static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1da177e4 972{
49a8a920 973 struct crypt_config *cc = io->cc;
8b004457 974 struct bio *clone;
1da177e4 975 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
7145c241
MP
976 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
977 unsigned i, len, remaining_size;
91e10625 978 struct page *page;
cf2f1abf 979 struct bio_vec *bvec;
1da177e4 980
7145c241
MP
981retry:
982 if (unlikely(gfp_mask & __GFP_WAIT))
983 mutex_lock(&cc->bio_alloc_lock);
984
2f9941b6 985 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
8b004457 986 if (!clone)
7145c241 987 goto return_clone;
1da177e4 988
027581f3 989 clone_init(io, clone);
6a24c718 990
7145c241
MP
991 remaining_size = size;
992
f97380bc 993 for (i = 0; i < nr_iovecs; i++) {
91e10625 994 page = mempool_alloc(cc->page_pool, gfp_mask);
7145c241
MP
995 if (!page) {
996 crypt_free_buffer_pages(cc, clone);
997 bio_put(clone);
998 gfp_mask |= __GFP_WAIT;
999 goto retry;
1000 }
1da177e4 1001
7145c241 1002 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
91e10625 1003
cf2f1abf
MP
1004 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1005 bvec->bv_page = page;
1006 bvec->bv_len = len;
1007 bvec->bv_offset = 0;
1da177e4 1008
cf2f1abf 1009 clone->bi_iter.bi_size += len;
1da177e4 1010
7145c241 1011 remaining_size -= len;
1da177e4
LT
1012 }
1013
7145c241
MP
1014return_clone:
1015 if (unlikely(gfp_mask & __GFP_WAIT))
1016 mutex_unlock(&cc->bio_alloc_lock);
1017
8b004457 1018 return clone;
1da177e4
LT
1019}
1020
644bd2f0 1021static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1da177e4 1022{
644bd2f0 1023 unsigned int i;
1da177e4
LT
1024 struct bio_vec *bv;
1025
cb34e057 1026 bio_for_each_segment_all(bv, clone, i) {
1da177e4
LT
1027 BUG_ON(!bv->bv_page);
1028 mempool_free(bv->bv_page, cc->page_pool);
1029 bv->bv_page = NULL;
1030 }
1031}
1032
298a9fa0
MP
1033static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1034 struct bio *bio, sector_t sector)
dc440d1e 1035{
49a8a920 1036 io->cc = cc;
dc440d1e
MB
1037 io->base_bio = bio;
1038 io->sector = sector;
1039 io->error = 0;
610f2de3 1040 io->ctx.req = NULL;
40b6229b 1041 atomic_set(&io->io_pending, 0);
dc440d1e
MB
1042}
1043
3e1a8bdd
MB
1044static void crypt_inc_pending(struct dm_crypt_io *io)
1045{
40b6229b 1046 atomic_inc(&io->io_pending);
3e1a8bdd
MB
1047}
1048
1da177e4
LT
1049/*
1050 * One of the bios was finished. Check for completion of
1051 * the whole request and correctly clean up the buffer.
1052 */
5742fd77 1053static void crypt_dec_pending(struct dm_crypt_io *io)
1da177e4 1054{
49a8a920 1055 struct crypt_config *cc = io->cc;
b35f8caa 1056 struct bio *base_bio = io->base_bio;
b35f8caa 1057 int error = io->error;
1da177e4 1058
40b6229b 1059 if (!atomic_dec_and_test(&io->io_pending))
1da177e4
LT
1060 return;
1061
610f2de3 1062 if (io->ctx.req)
298a9fa0 1063 crypt_free_req(cc, io->ctx.req, base_bio);
b35f8caa 1064
cf2f1abf 1065 bio_endio(base_bio, error);
1da177e4
LT
1066}
1067
1068/*
cabf08e4 1069 * kcryptd/kcryptd_io:
1da177e4
LT
1070 *
1071 * Needed because it would be very unwise to do decryption in an
23541d2d 1072 * interrupt context.
cabf08e4
MB
1073 *
1074 * kcryptd performs the actual encryption or decryption.
1075 *
1076 * kcryptd_io performs the IO submission.
1077 *
1078 * They must be separated as otherwise the final stages could be
1079 * starved by new requests which can block in the first stages due
1080 * to memory allocation.
c0297721
AK
1081 *
1082 * The work is done per CPU global for all dm-crypt instances.
1083 * They should not depend on each other and do not block.
1da177e4 1084 */
6712ecf8 1085static void crypt_endio(struct bio *clone, int error)
8b004457 1086{
028867ac 1087 struct dm_crypt_io *io = clone->bi_private;
49a8a920 1088 struct crypt_config *cc = io->cc;
ee7a491e 1089 unsigned rw = bio_data_dir(clone);
8b004457 1090
adfe4770
MB
1091 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1092 error = -EIO;
1093
8b004457 1094 /*
6712ecf8 1095 * free the processed pages
8b004457 1096 */
ee7a491e 1097 if (rw == WRITE)
644bd2f0 1098 crypt_free_buffer_pages(cc, clone);
8b004457
MB
1099
1100 bio_put(clone);
8b004457 1101
ee7a491e
MB
1102 if (rw == READ && !error) {
1103 kcryptd_queue_crypt(io);
1104 return;
1105 }
5742fd77
MB
1106
1107 if (unlikely(error))
1108 io->error = error;
1109
1110 crypt_dec_pending(io);
8b004457
MB
1111}
1112
028867ac 1113static void clone_init(struct dm_crypt_io *io, struct bio *clone)
8b004457 1114{
49a8a920 1115 struct crypt_config *cc = io->cc;
8b004457
MB
1116
1117 clone->bi_private = io;
1118 clone->bi_end_io = crypt_endio;
1119 clone->bi_bdev = cc->dev->bdev;
1120 clone->bi_rw = io->base_bio->bi_rw;
1121}
1122
20c82538 1123static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
8b004457 1124{
49a8a920 1125 struct crypt_config *cc = io->cc;
8b004457
MB
1126 struct bio *base_bio = io->base_bio;
1127 struct bio *clone;
93e605c2 1128
8b004457
MB
1129 /*
1130 * The block layer might modify the bvec array, so always
1131 * copy the required bvecs because we need the original
1132 * one in order to decrypt the whole bio data *afterwards*.
1133 */
bf800ef1 1134 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
7eaceacc 1135 if (!clone)
20c82538 1136 return 1;
8b004457 1137
20c82538
MB
1138 crypt_inc_pending(io);
1139
8b004457 1140 clone_init(io, clone);
4f024f37 1141 clone->bi_iter.bi_sector = cc->start + io->sector;
8b004457 1142
93e605c2 1143 generic_make_request(clone);
20c82538 1144 return 0;
8b004457
MB
1145}
1146
dc267621
MP
1147static void kcryptd_io_read_work(struct work_struct *work)
1148{
1149 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1150
1151 crypt_inc_pending(io);
1152 if (kcryptd_io_read(io, GFP_NOIO))
1153 io->error = -ENOMEM;
1154 crypt_dec_pending(io);
1155}
1156
1157static void kcryptd_queue_read(struct dm_crypt_io *io)
1158{
1159 struct crypt_config *cc = io->cc;
1160
1161 INIT_WORK(&io->work, kcryptd_io_read_work);
1162 queue_work(cc->io_queue, &io->work);
1163}
1164
4e4eef64
MB
1165static void kcryptd_io_write(struct dm_crypt_io *io)
1166{
95497a96 1167 struct bio *clone = io->ctx.bio_out;
dc267621 1168
95497a96 1169 generic_make_request(clone);
4e4eef64
MB
1170}
1171
dc267621 1172static int dmcrypt_write(void *data)
395b167c 1173{
dc267621
MP
1174 struct crypt_config *cc = data;
1175 while (1) {
1176 struct list_head local_list;
1177 struct blk_plug plug;
395b167c 1178
dc267621 1179 DECLARE_WAITQUEUE(wait, current);
395b167c 1180
dc267621
MP
1181 spin_lock_irq(&cc->write_thread_wait.lock);
1182continue_locked:
395b167c 1183
dc267621
MP
1184 if (!list_empty(&cc->write_thread_list))
1185 goto pop_from_list;
1186
1187 __set_current_state(TASK_INTERRUPTIBLE);
1188 __add_wait_queue(&cc->write_thread_wait, &wait);
1189
1190 spin_unlock_irq(&cc->write_thread_wait.lock);
1191
1192 if (unlikely(kthread_should_stop())) {
1193 set_task_state(current, TASK_RUNNING);
1194 remove_wait_queue(&cc->write_thread_wait, &wait);
1195 break;
1196 }
1197
1198 schedule();
1199
1200 set_task_state(current, TASK_RUNNING);
1201 spin_lock_irq(&cc->write_thread_wait.lock);
1202 __remove_wait_queue(&cc->write_thread_wait, &wait);
1203 goto continue_locked;
1204
1205pop_from_list:
1206 local_list = cc->write_thread_list;
1207 local_list.next->prev = &local_list;
1208 local_list.prev->next = &local_list;
1209 INIT_LIST_HEAD(&cc->write_thread_list);
1210
1211 spin_unlock_irq(&cc->write_thread_wait.lock);
1212
1213 blk_start_plug(&plug);
1214 do {
1215 struct dm_crypt_io *io = container_of(local_list.next,
1216 struct dm_crypt_io, list);
1217 list_del(&io->list);
1218 kcryptd_io_write(io);
1219 } while (!list_empty(&local_list));
1220 blk_finish_plug(&plug);
1221 }
1222 return 0;
395b167c
AK
1223}
1224
72c6e7af 1225static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
4e4eef64 1226{
dec1cedf 1227 struct bio *clone = io->ctx.bio_out;
49a8a920 1228 struct crypt_config *cc = io->cc;
dc267621 1229 unsigned long flags;
dec1cedf 1230
72c6e7af 1231 if (unlikely(io->error < 0)) {
dec1cedf
MB
1232 crypt_free_buffer_pages(cc, clone);
1233 bio_put(clone);
6c031f41 1234 crypt_dec_pending(io);
dec1cedf
MB
1235 return;
1236 }
1237
1238 /* crypt_convert should have filled the clone bio */
003b5c57 1239 BUG_ON(io->ctx.iter_out.bi_size);
dec1cedf 1240
4f024f37 1241 clone->bi_iter.bi_sector = cc->start + io->sector;
899c95d3 1242
0f5d8e6e
MP
1243 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1244 generic_make_request(clone);
1245 return;
1246 }
1247
dc267621
MP
1248 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1249 list_add_tail(&io->list, &cc->write_thread_list);
1250 wake_up_locked(&cc->write_thread_wait);
1251 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
4e4eef64
MB
1252}
1253
fc5a5e9a 1254static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
8b004457 1255{
49a8a920 1256 struct crypt_config *cc = io->cc;
8b004457 1257 struct bio *clone;
c8081618 1258 int crypt_finished;
b635b00e 1259 sector_t sector = io->sector;
dec1cedf 1260 int r;
8b004457 1261
fc5a5e9a
MB
1262 /*
1263 * Prevent io from disappearing until this function completes.
1264 */
1265 crypt_inc_pending(io);
b635b00e 1266 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
fc5a5e9a 1267
cf2f1abf
MP
1268 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1269 if (unlikely(!clone)) {
1270 io->error = -EIO;
1271 goto dec;
1272 }
c8081618 1273
cf2f1abf
MP
1274 io->ctx.bio_out = clone;
1275 io->ctx.iter_out = clone->bi_iter;
b635b00e 1276
cf2f1abf 1277 sector += bio_sectors(clone);
93e605c2 1278
cf2f1abf
MP
1279 crypt_inc_pending(io);
1280 r = crypt_convert(cc, &io->ctx);
1281 if (r)
1282 io->error = -EIO;
1283 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
933f01d4 1284
cf2f1abf
MP
1285 /* Encryption was already finished, submit io now */
1286 if (crypt_finished) {
1287 kcryptd_crypt_write_io_submit(io, 0);
1288 io->sector = sector;
93e605c2 1289 }
899c95d3 1290
cf2f1abf 1291dec:
899c95d3 1292 crypt_dec_pending(io);
84131db6
MB
1293}
1294
72c6e7af 1295static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
5742fd77 1296{
5742fd77
MB
1297 crypt_dec_pending(io);
1298}
1299
4e4eef64 1300static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
8b004457 1301{
49a8a920 1302 struct crypt_config *cc = io->cc;
5742fd77 1303 int r = 0;
1da177e4 1304
3e1a8bdd 1305 crypt_inc_pending(io);
3a7f6c99 1306
53017030 1307 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
0c395b0f 1308 io->sector);
1da177e4 1309
5742fd77 1310 r = crypt_convert(cc, &io->ctx);
72c6e7af
MP
1311 if (r < 0)
1312 io->error = -EIO;
5742fd77 1313
40b6229b 1314 if (atomic_dec_and_test(&io->ctx.cc_pending))
72c6e7af 1315 kcryptd_crypt_read_done(io);
3a7f6c99
MB
1316
1317 crypt_dec_pending(io);
1da177e4
LT
1318}
1319
95497a96
MB
1320static void kcryptd_async_done(struct crypto_async_request *async_req,
1321 int error)
1322{
b2174eeb
HY
1323 struct dm_crypt_request *dmreq = async_req->data;
1324 struct convert_context *ctx = dmreq->ctx;
95497a96 1325 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
49a8a920 1326 struct crypt_config *cc = io->cc;
95497a96
MB
1327
1328 if (error == -EINPROGRESS) {
1329 complete(&ctx->restart);
1330 return;
1331 }
1332
2dc5327d
MB
1333 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1334 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1335
72c6e7af
MP
1336 if (error < 0)
1337 io->error = -EIO;
1338
298a9fa0 1339 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
95497a96 1340
40b6229b 1341 if (!atomic_dec_and_test(&ctx->cc_pending))
95497a96
MB
1342 return;
1343
1344 if (bio_data_dir(io->base_bio) == READ)
72c6e7af 1345 kcryptd_crypt_read_done(io);
95497a96 1346 else
72c6e7af 1347 kcryptd_crypt_write_io_submit(io, 1);
95497a96
MB
1348}
1349
395b167c 1350static void kcryptd_crypt(struct work_struct *work)
1da177e4 1351{
028867ac 1352 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
8b004457 1353
cabf08e4 1354 if (bio_data_dir(io->base_bio) == READ)
395b167c 1355 kcryptd_crypt_read_convert(io);
4e4eef64 1356 else
395b167c 1357 kcryptd_crypt_write_convert(io);
cabf08e4
MB
1358}
1359
395b167c 1360static void kcryptd_queue_crypt(struct dm_crypt_io *io)
cabf08e4 1361{
49a8a920 1362 struct crypt_config *cc = io->cc;
cabf08e4 1363
395b167c
AK
1364 INIT_WORK(&io->work, kcryptd_crypt);
1365 queue_work(cc->crypt_queue, &io->work);
1da177e4
LT
1366}
1367
1368/*
1369 * Decode key from its hex representation
1370 */
1371static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1372{
1373 char buffer[3];
1da177e4
LT
1374 unsigned int i;
1375
1376 buffer[2] = '\0';
1377
8b004457 1378 for (i = 0; i < size; i++) {
1da177e4
LT
1379 buffer[0] = *hex++;
1380 buffer[1] = *hex++;
1381
1a66a08a 1382 if (kstrtou8(buffer, 16, &key[i]))
1da177e4
LT
1383 return -EINVAL;
1384 }
1385
1386 if (*hex != '\0')
1387 return -EINVAL;
1388
1389 return 0;
1390}
1391
fd2d231f 1392static void crypt_free_tfms(struct crypt_config *cc)
d1f96423 1393{
d1f96423
MB
1394 unsigned i;
1395
fd2d231f
MP
1396 if (!cc->tfms)
1397 return;
1398
d1f96423 1399 for (i = 0; i < cc->tfms_count; i++)
fd2d231f
MP
1400 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1401 crypto_free_ablkcipher(cc->tfms[i]);
1402 cc->tfms[i] = NULL;
d1f96423 1403 }
fd2d231f
MP
1404
1405 kfree(cc->tfms);
1406 cc->tfms = NULL;
d1f96423
MB
1407}
1408
fd2d231f 1409static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
d1f96423 1410{
d1f96423
MB
1411 unsigned i;
1412 int err;
1413
fd2d231f
MP
1414 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1415 GFP_KERNEL);
1416 if (!cc->tfms)
1417 return -ENOMEM;
1418
d1f96423 1419 for (i = 0; i < cc->tfms_count; i++) {
fd2d231f
MP
1420 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1421 if (IS_ERR(cc->tfms[i])) {
1422 err = PTR_ERR(cc->tfms[i]);
1423 crypt_free_tfms(cc);
d1f96423
MB
1424 return err;
1425 }
1426 }
1427
1428 return 0;
1429}
1430
c0297721
AK
1431static int crypt_setkey_allcpus(struct crypt_config *cc)
1432{
da31a078 1433 unsigned subkey_size;
fd2d231f
MP
1434 int err = 0, i, r;
1435
da31a078
MB
1436 /* Ignore extra keys (which are used for IV etc) */
1437 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1438
fd2d231f
MP
1439 for (i = 0; i < cc->tfms_count; i++) {
1440 r = crypto_ablkcipher_setkey(cc->tfms[i],
1441 cc->key + (i * subkey_size),
1442 subkey_size);
1443 if (r)
1444 err = r;
c0297721
AK
1445 }
1446
1447 return err;
1448}
1449
e48d4bbf
MB
1450static int crypt_set_key(struct crypt_config *cc, char *key)
1451{
de8be5ac
MB
1452 int r = -EINVAL;
1453 int key_string_len = strlen(key);
1454
69a8cfcd 1455 /* The key size may not be changed. */
de8be5ac
MB
1456 if (cc->key_size != (key_string_len >> 1))
1457 goto out;
e48d4bbf 1458
69a8cfcd
MB
1459 /* Hyphen (which gives a key_size of zero) means there is no key. */
1460 if (!cc->key_size && strcmp(key, "-"))
de8be5ac 1461 goto out;
e48d4bbf 1462
69a8cfcd 1463 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
de8be5ac 1464 goto out;
e48d4bbf
MB
1465
1466 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1467
de8be5ac
MB
1468 r = crypt_setkey_allcpus(cc);
1469
1470out:
1471 /* Hex key string not needed after here, so wipe it. */
1472 memset(key, '0', key_string_len);
1473
1474 return r;
e48d4bbf
MB
1475}
1476
1477static int crypt_wipe_key(struct crypt_config *cc)
1478{
1479 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1480 memset(&cc->key, 0, cc->key_size * sizeof(u8));
c0297721
AK
1481
1482 return crypt_setkey_allcpus(cc);
e48d4bbf
MB
1483}
1484
28513fcc
MB
1485static void crypt_dtr(struct dm_target *ti)
1486{
1487 struct crypt_config *cc = ti->private;
1488
1489 ti->private = NULL;
1490
1491 if (!cc)
1492 return;
1493
dc267621
MP
1494 if (cc->write_thread)
1495 kthread_stop(cc->write_thread);
1496
28513fcc
MB
1497 if (cc->io_queue)
1498 destroy_workqueue(cc->io_queue);
1499 if (cc->crypt_queue)
1500 destroy_workqueue(cc->crypt_queue);
1501
fd2d231f
MP
1502 crypt_free_tfms(cc);
1503
28513fcc
MB
1504 if (cc->bs)
1505 bioset_free(cc->bs);
1506
1507 if (cc->page_pool)
1508 mempool_destroy(cc->page_pool);
1509 if (cc->req_pool)
1510 mempool_destroy(cc->req_pool);
28513fcc
MB
1511
1512 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1513 cc->iv_gen_ops->dtr(cc);
1514
28513fcc
MB
1515 if (cc->dev)
1516 dm_put_device(ti, cc->dev);
1517
5ebaee6d 1518 kzfree(cc->cipher);
7dbcd137 1519 kzfree(cc->cipher_string);
28513fcc
MB
1520
1521 /* Must zero key material before freeing */
1522 kzfree(cc);
1523}
1524
5ebaee6d
MB
1525static int crypt_ctr_cipher(struct dm_target *ti,
1526 char *cipher_in, char *key)
1da177e4 1527{
5ebaee6d 1528 struct crypt_config *cc = ti->private;
d1f96423 1529 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
5ebaee6d 1530 char *cipher_api = NULL;
fd2d231f 1531 int ret = -EINVAL;
31998ef1 1532 char dummy;
1da177e4 1533
5ebaee6d
MB
1534 /* Convert to crypto api definition? */
1535 if (strchr(cipher_in, '(')) {
1536 ti->error = "Bad cipher specification";
1da177e4
LT
1537 return -EINVAL;
1538 }
1539
7dbcd137
MB
1540 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1541 if (!cc->cipher_string)
1542 goto bad_mem;
1543
5ebaee6d
MB
1544 /*
1545 * Legacy dm-crypt cipher specification
d1f96423 1546 * cipher[:keycount]-mode-iv:ivopts
5ebaee6d
MB
1547 */
1548 tmp = cipher_in;
d1f96423
MB
1549 keycount = strsep(&tmp, "-");
1550 cipher = strsep(&keycount, ":");
1551
1552 if (!keycount)
1553 cc->tfms_count = 1;
31998ef1 1554 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
d1f96423
MB
1555 !is_power_of_2(cc->tfms_count)) {
1556 ti->error = "Bad cipher key count specification";
1557 return -EINVAL;
1558 }
1559 cc->key_parts = cc->tfms_count;
da31a078 1560 cc->key_extra_size = 0;
5ebaee6d
MB
1561
1562 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1563 if (!cc->cipher)
1564 goto bad_mem;
1565
1da177e4
LT
1566 chainmode = strsep(&tmp, "-");
1567 ivopts = strsep(&tmp, "-");
1568 ivmode = strsep(&ivopts, ":");
1569
1570 if (tmp)
5ebaee6d 1571 DMWARN("Ignoring unexpected additional cipher options");
1da177e4 1572
7dbcd137
MB
1573 /*
1574 * For compatibility with the original dm-crypt mapping format, if
1575 * only the cipher name is supplied, use cbc-plain.
1576 */
5ebaee6d 1577 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1da177e4
LT
1578 chainmode = "cbc";
1579 ivmode = "plain";
1580 }
1581
d1806f6a 1582 if (strcmp(chainmode, "ecb") && !ivmode) {
5ebaee6d
MB
1583 ti->error = "IV mechanism required";
1584 return -EINVAL;
1da177e4
LT
1585 }
1586
5ebaee6d
MB
1587 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1588 if (!cipher_api)
1589 goto bad_mem;
1590
1591 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1592 "%s(%s)", chainmode, cipher);
1593 if (ret < 0) {
1594 kfree(cipher_api);
1595 goto bad_mem;
1da177e4
LT
1596 }
1597
5ebaee6d 1598 /* Allocate cipher */
fd2d231f
MP
1599 ret = crypt_alloc_tfms(cc, cipher_api);
1600 if (ret < 0) {
1601 ti->error = "Error allocating crypto tfm";
1602 goto bad;
1da177e4 1603 }
1da177e4 1604
5ebaee6d 1605 /* Initialize IV */
c0297721 1606 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
5ebaee6d
MB
1607 if (cc->iv_size)
1608 /* at least a 64 bit sector number should fit in our buffer */
1609 cc->iv_size = max(cc->iv_size,
1610 (unsigned int)(sizeof(u64) / sizeof(u8)));
1611 else if (ivmode) {
1612 DMWARN("Selected cipher does not support IVs");
1613 ivmode = NULL;
1614 }
1615
1616 /* Choose ivmode, see comments at iv code. */
1da177e4
LT
1617 if (ivmode == NULL)
1618 cc->iv_gen_ops = NULL;
1619 else if (strcmp(ivmode, "plain") == 0)
1620 cc->iv_gen_ops = &crypt_iv_plain_ops;
61afef61
MB
1621 else if (strcmp(ivmode, "plain64") == 0)
1622 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1da177e4
LT
1623 else if (strcmp(ivmode, "essiv") == 0)
1624 cc->iv_gen_ops = &crypt_iv_essiv_ops;
48527fa7
RS
1625 else if (strcmp(ivmode, "benbi") == 0)
1626 cc->iv_gen_ops = &crypt_iv_benbi_ops;
46b47730
LN
1627 else if (strcmp(ivmode, "null") == 0)
1628 cc->iv_gen_ops = &crypt_iv_null_ops;
34745785
MB
1629 else if (strcmp(ivmode, "lmk") == 0) {
1630 cc->iv_gen_ops = &crypt_iv_lmk_ops;
ed04d981
MB
1631 /*
1632 * Version 2 and 3 is recognised according
34745785
MB
1633 * to length of provided multi-key string.
1634 * If present (version 3), last key is used as IV seed.
ed04d981 1635 * All keys (including IV seed) are always the same size.
34745785 1636 */
da31a078 1637 if (cc->key_size % cc->key_parts) {
34745785 1638 cc->key_parts++;
da31a078
MB
1639 cc->key_extra_size = cc->key_size / cc->key_parts;
1640 }
ed04d981
MB
1641 } else if (strcmp(ivmode, "tcw") == 0) {
1642 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1643 cc->key_parts += 2; /* IV + whitening */
1644 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
34745785 1645 } else {
5ebaee6d 1646 ret = -EINVAL;
72d94861 1647 ti->error = "Invalid IV mode";
28513fcc 1648 goto bad;
1da177e4
LT
1649 }
1650
da31a078
MB
1651 /* Initialize and set key */
1652 ret = crypt_set_key(cc, key);
1653 if (ret < 0) {
1654 ti->error = "Error decoding and setting key";
1655 goto bad;
1656 }
1657
28513fcc
MB
1658 /* Allocate IV */
1659 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1660 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1661 if (ret < 0) {
1662 ti->error = "Error creating IV";
1663 goto bad;
1664 }
1665 }
1da177e4 1666
28513fcc
MB
1667 /* Initialize IV (set keys for ESSIV etc) */
1668 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1669 ret = cc->iv_gen_ops->init(cc);
1670 if (ret < 0) {
1671 ti->error = "Error initialising IV";
1672 goto bad;
1673 }
b95bf2d3
MB
1674 }
1675
5ebaee6d
MB
1676 ret = 0;
1677bad:
1678 kfree(cipher_api);
1679 return ret;
1680
1681bad_mem:
1682 ti->error = "Cannot allocate cipher strings";
1683 return -ENOMEM;
1684}
1685
1686/*
1687 * Construct an encryption mapping:
1688 * <cipher> <key> <iv_offset> <dev_path> <start>
1689 */
1690static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1691{
1692 struct crypt_config *cc;
772ae5f5 1693 unsigned int key_size, opt_params;
5ebaee6d
MB
1694 unsigned long long tmpll;
1695 int ret;
d49ec52f 1696 size_t iv_size_padding;
772ae5f5
MB
1697 struct dm_arg_set as;
1698 const char *opt_string;
31998ef1 1699 char dummy;
772ae5f5
MB
1700
1701 static struct dm_arg _args[] = {
0f5d8e6e 1702 {0, 3, "Invalid number of feature args"},
772ae5f5 1703 };
5ebaee6d 1704
772ae5f5 1705 if (argc < 5) {
5ebaee6d
MB
1706 ti->error = "Not enough arguments";
1707 return -EINVAL;
1da177e4
LT
1708 }
1709
5ebaee6d
MB
1710 key_size = strlen(argv[1]) >> 1;
1711
1712 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1713 if (!cc) {
1714 ti->error = "Cannot allocate encryption context";
1715 return -ENOMEM;
1716 }
69a8cfcd 1717 cc->key_size = key_size;
5ebaee6d
MB
1718
1719 ti->private = cc;
1720 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1721 if (ret < 0)
1722 goto bad;
1723
ddd42edf 1724 cc->dmreq_start = sizeof(struct ablkcipher_request);
c0297721 1725 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
d49ec52f
MP
1726 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1727
1728 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1729 /* Allocate the padding exactly */
1730 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1731 & crypto_ablkcipher_alignmask(any_tfm(cc));
1732 } else {
1733 /*
1734 * If the cipher requires greater alignment than kmalloc
1735 * alignment, we don't know the exact position of the
1736 * initialization vector. We must assume worst case.
1737 */
1738 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1739 }
ddd42edf 1740
94f5e024 1741 ret = -ENOMEM;
ddd42edf 1742 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
d49ec52f 1743 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
ddd42edf
MB
1744 if (!cc->req_pool) {
1745 ti->error = "Cannot allocate crypt request mempool";
28513fcc 1746 goto bad;
ddd42edf 1747 }
ddd42edf 1748
298a9fa0 1749 cc->per_bio_data_size = ti->per_bio_data_size =
d49ec52f
MP
1750 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1751 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1752 ARCH_KMALLOC_MINALIGN);
298a9fa0 1753
cf2f1abf 1754 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
1da177e4 1755 if (!cc->page_pool) {
72d94861 1756 ti->error = "Cannot allocate page mempool";
28513fcc 1757 goto bad;
1da177e4
LT
1758 }
1759
bb799ca0 1760 cc->bs = bioset_create(MIN_IOS, 0);
6a24c718
MB
1761 if (!cc->bs) {
1762 ti->error = "Cannot allocate crypt bioset";
28513fcc 1763 goto bad;
6a24c718
MB
1764 }
1765
7145c241
MP
1766 mutex_init(&cc->bio_alloc_lock);
1767
28513fcc 1768 ret = -EINVAL;
31998ef1 1769 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
72d94861 1770 ti->error = "Invalid iv_offset sector";
28513fcc 1771 goto bad;
1da177e4 1772 }
4ee218cd 1773 cc->iv_offset = tmpll;
1da177e4 1774
28513fcc
MB
1775 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1776 ti->error = "Device lookup failed";
1777 goto bad;
1778 }
1779
31998ef1 1780 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
72d94861 1781 ti->error = "Invalid device sector";
28513fcc 1782 goto bad;
1da177e4 1783 }
4ee218cd 1784 cc->start = tmpll;
1da177e4 1785
772ae5f5
MB
1786 argv += 5;
1787 argc -= 5;
1788
1789 /* Optional parameters */
1790 if (argc) {
1791 as.argc = argc;
1792 as.argv = argv;
1793
1794 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1795 if (ret)
1796 goto bad;
1797
f3396c58
MP
1798 while (opt_params--) {
1799 opt_string = dm_shift_arg(&as);
1800 if (!opt_string) {
1801 ti->error = "Not enough feature arguments";
1802 goto bad;
1803 }
772ae5f5 1804
f3396c58
MP
1805 if (!strcasecmp(opt_string, "allow_discards"))
1806 ti->num_discard_bios = 1;
1807
1808 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1809 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1810
0f5d8e6e
MP
1811 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
1812 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1813
f3396c58
MP
1814 else {
1815 ti->error = "Invalid feature arguments";
1816 goto bad;
1817 }
772ae5f5
MB
1818 }
1819 }
1820
28513fcc 1821 ret = -ENOMEM;
670368a8 1822 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
cabf08e4
MB
1823 if (!cc->io_queue) {
1824 ti->error = "Couldn't create kcryptd io queue";
28513fcc 1825 goto bad;
cabf08e4
MB
1826 }
1827
f3396c58
MP
1828 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1829 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1830 else
1831 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1832 num_online_cpus());
cabf08e4 1833 if (!cc->crypt_queue) {
9934a8be 1834 ti->error = "Couldn't create kcryptd queue";
28513fcc 1835 goto bad;
9934a8be
MB
1836 }
1837
dc267621
MP
1838 init_waitqueue_head(&cc->write_thread_wait);
1839 INIT_LIST_HEAD(&cc->write_thread_list);
1840
1841 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1842 if (IS_ERR(cc->write_thread)) {
1843 ret = PTR_ERR(cc->write_thread);
1844 cc->write_thread = NULL;
1845 ti->error = "Couldn't spawn write thread";
1846 goto bad;
1847 }
1848 wake_up_process(cc->write_thread);
1849
55a62eef 1850 ti->num_flush_bios = 1;
0ac55489 1851 ti->discard_zeroes_data_unsupported = true;
983c7db3 1852
1da177e4
LT
1853 return 0;
1854
28513fcc
MB
1855bad:
1856 crypt_dtr(ti);
1857 return ret;
1da177e4
LT
1858}
1859
7de3ee57 1860static int crypt_map(struct dm_target *ti, struct bio *bio)
1da177e4 1861{
028867ac 1862 struct dm_crypt_io *io;
49a8a920 1863 struct crypt_config *cc = ti->private;
647c7db1 1864
772ae5f5
MB
1865 /*
1866 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1867 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1868 * - for REQ_DISCARD caller must use flush if IO ordering matters
1869 */
1870 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
647c7db1 1871 bio->bi_bdev = cc->dev->bdev;
772ae5f5 1872 if (bio_sectors(bio))
4f024f37
KO
1873 bio->bi_iter.bi_sector = cc->start +
1874 dm_target_offset(ti, bio->bi_iter.bi_sector);
647c7db1
MP
1875 return DM_MAPIO_REMAPPED;
1876 }
1da177e4 1877
298a9fa0
MP
1878 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1879 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1880 io->ctx.req = (struct ablkcipher_request *)(io + 1);
cabf08e4 1881
20c82538
MB
1882 if (bio_data_dir(io->base_bio) == READ) {
1883 if (kcryptd_io_read(io, GFP_NOWAIT))
dc267621 1884 kcryptd_queue_read(io);
20c82538 1885 } else
cabf08e4 1886 kcryptd_queue_crypt(io);
1da177e4 1887
d2a7ad29 1888 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1889}
1890
fd7c092e
MP
1891static void crypt_status(struct dm_target *ti, status_type_t type,
1892 unsigned status_flags, char *result, unsigned maxlen)
1da177e4 1893{
5ebaee6d 1894 struct crypt_config *cc = ti->private;
fd7c092e 1895 unsigned i, sz = 0;
f3396c58 1896 int num_feature_args = 0;
1da177e4
LT
1897
1898 switch (type) {
1899 case STATUSTYPE_INFO:
1900 result[0] = '\0';
1901 break;
1902
1903 case STATUSTYPE_TABLE:
7dbcd137 1904 DMEMIT("%s ", cc->cipher_string);
1da177e4 1905
fd7c092e
MP
1906 if (cc->key_size > 0)
1907 for (i = 0; i < cc->key_size; i++)
1908 DMEMIT("%02x", cc->key[i]);
1909 else
1910 DMEMIT("-");
1da177e4 1911
4ee218cd
AM
1912 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1913 cc->dev->name, (unsigned long long)cc->start);
772ae5f5 1914
f3396c58
MP
1915 num_feature_args += !!ti->num_discard_bios;
1916 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
0f5d8e6e 1917 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
f3396c58
MP
1918 if (num_feature_args) {
1919 DMEMIT(" %d", num_feature_args);
1920 if (ti->num_discard_bios)
1921 DMEMIT(" allow_discards");
1922 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1923 DMEMIT(" same_cpu_crypt");
0f5d8e6e
MP
1924 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
1925 DMEMIT(" submit_from_crypt_cpus");
f3396c58 1926 }
772ae5f5 1927
1da177e4
LT
1928 break;
1929 }
1da177e4
LT
1930}
1931
e48d4bbf
MB
1932static void crypt_postsuspend(struct dm_target *ti)
1933{
1934 struct crypt_config *cc = ti->private;
1935
1936 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1937}
1938
1939static int crypt_preresume(struct dm_target *ti)
1940{
1941 struct crypt_config *cc = ti->private;
1942
1943 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1944 DMERR("aborting resume - crypt key is not set.");
1945 return -EAGAIN;
1946 }
1947
1948 return 0;
1949}
1950
1951static void crypt_resume(struct dm_target *ti)
1952{
1953 struct crypt_config *cc = ti->private;
1954
1955 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1956}
1957
1958/* Message interface
1959 * key set <key>
1960 * key wipe
1961 */
1962static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1963{
1964 struct crypt_config *cc = ti->private;
542da317 1965 int ret = -EINVAL;
e48d4bbf
MB
1966
1967 if (argc < 2)
1968 goto error;
1969
498f0103 1970 if (!strcasecmp(argv[0], "key")) {
e48d4bbf
MB
1971 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1972 DMWARN("not suspended during key manipulation.");
1973 return -EINVAL;
1974 }
498f0103 1975 if (argc == 3 && !strcasecmp(argv[1], "set")) {
542da317
MB
1976 ret = crypt_set_key(cc, argv[2]);
1977 if (ret)
1978 return ret;
1979 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1980 ret = cc->iv_gen_ops->init(cc);
1981 return ret;
1982 }
498f0103 1983 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
542da317
MB
1984 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1985 ret = cc->iv_gen_ops->wipe(cc);
1986 if (ret)
1987 return ret;
1988 }
e48d4bbf 1989 return crypt_wipe_key(cc);
542da317 1990 }
e48d4bbf
MB
1991 }
1992
1993error:
1994 DMWARN("unrecognised message received.");
1995 return -EINVAL;
1996}
1997
d41e26b9
MB
1998static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1999 struct bio_vec *biovec, int max_size)
2000{
2001 struct crypt_config *cc = ti->private;
2002 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
2003
2004 if (!q->merge_bvec_fn)
2005 return max_size;
2006
2007 bvm->bi_bdev = cc->dev->bdev;
b441a262 2008 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
d41e26b9
MB
2009
2010 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2011}
2012
af4874e0
MS
2013static int crypt_iterate_devices(struct dm_target *ti,
2014 iterate_devices_callout_fn fn, void *data)
2015{
2016 struct crypt_config *cc = ti->private;
2017
5dea271b 2018 return fn(ti, cc->dev, cc->start, ti->len, data);
af4874e0
MS
2019}
2020
1da177e4
LT
2021static struct target_type crypt_target = {
2022 .name = "crypt",
f3396c58 2023 .version = {1, 14, 0},
1da177e4
LT
2024 .module = THIS_MODULE,
2025 .ctr = crypt_ctr,
2026 .dtr = crypt_dtr,
2027 .map = crypt_map,
2028 .status = crypt_status,
e48d4bbf
MB
2029 .postsuspend = crypt_postsuspend,
2030 .preresume = crypt_preresume,
2031 .resume = crypt_resume,
2032 .message = crypt_message,
d41e26b9 2033 .merge = crypt_merge,
af4874e0 2034 .iterate_devices = crypt_iterate_devices,
1da177e4
LT
2035};
2036
2037static int __init dm_crypt_init(void)
2038{
2039 int r;
2040
1da177e4 2041 r = dm_register_target(&crypt_target);
94f5e024 2042 if (r < 0)
72d94861 2043 DMERR("register failed %d", r);
1da177e4 2044
1da177e4
LT
2045 return r;
2046}
2047
2048static void __exit dm_crypt_exit(void)
2049{
10d3bd09 2050 dm_unregister_target(&crypt_target);
1da177e4
LT
2051}
2052
2053module_init(dm_crypt_init);
2054module_exit(dm_crypt_exit);
2055
bf14299f 2056MODULE_AUTHOR("Jana Saout <jana@saout.de>");
1da177e4
LT
2057MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2058MODULE_LICENSE("GPL");