]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/md/dm-crypt.c
dm crypt: add multi key capability
[mirror_ubuntu-jammy-kernel.git] / drivers / md / dm-crypt.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
542da317 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
1da177e4
LT
5 *
6 * This file is released under the GPL.
7 */
8
43d69034 9#include <linux/completion.h>
d1806f6a 10#include <linux/err.h>
1da177e4
LT
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bio.h>
15#include <linux/blkdev.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/crypto.h>
19#include <linux/workqueue.h>
3fcfab16 20#include <linux/backing-dev.h>
c0297721 21#include <linux/percpu.h>
1da177e4 22#include <asm/atomic.h>
378f058c 23#include <linux/scatterlist.h>
1da177e4 24#include <asm/page.h>
48527fa7 25#include <asm/unaligned.h>
1da177e4 26
586e80e6 27#include <linux/device-mapper.h>
1da177e4 28
72d94861 29#define DM_MSG_PREFIX "crypt"
e48d4bbf 30#define MESG_STR(x) x, sizeof(x)
1da177e4 31
1da177e4
LT
32/*
33 * context holding the current state of a multi-part conversion
34 */
35struct convert_context {
43d69034 36 struct completion restart;
1da177e4
LT
37 struct bio *bio_in;
38 struct bio *bio_out;
39 unsigned int offset_in;
40 unsigned int offset_out;
41 unsigned int idx_in;
42 unsigned int idx_out;
43 sector_t sector;
43d69034 44 atomic_t pending;
1da177e4
LT
45};
46
53017030
MB
47/*
48 * per bio private data
49 */
50struct dm_crypt_io {
51 struct dm_target *target;
52 struct bio *base_bio;
53 struct work_struct work;
54
55 struct convert_context ctx;
56
57 atomic_t pending;
58 int error;
0c395b0f 59 sector_t sector;
393b47ef 60 struct dm_crypt_io *base_io;
53017030
MB
61};
62
01482b76 63struct dm_crypt_request {
b2174eeb 64 struct convert_context *ctx;
01482b76
MB
65 struct scatterlist sg_in;
66 struct scatterlist sg_out;
2dc5327d 67 sector_t iv_sector;
01482b76
MB
68};
69
1da177e4
LT
70struct crypt_config;
71
72struct crypt_iv_operations {
73 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
d469f841 74 const char *opts);
1da177e4 75 void (*dtr)(struct crypt_config *cc);
b95bf2d3 76 int (*init)(struct crypt_config *cc);
542da317 77 int (*wipe)(struct crypt_config *cc);
2dc5327d
MB
78 int (*generator)(struct crypt_config *cc, u8 *iv,
79 struct dm_crypt_request *dmreq);
80 int (*post)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
1da177e4
LT
82};
83
60473592 84struct iv_essiv_private {
b95bf2d3
MB
85 struct crypto_hash *hash_tfm;
86 u8 *salt;
60473592
MB
87};
88
89struct iv_benbi_private {
90 int shift;
91};
92
1da177e4
LT
93/*
94 * Crypt: maps a linear range of a block device
95 * and encrypts / decrypts at the same time.
96 */
e48d4bbf 97enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
c0297721
AK
98
99/*
100 * Duplicated per-CPU state for cipher.
101 */
102struct crypt_cpu {
103 struct ablkcipher_request *req;
c0297721
AK
104 /* ESSIV: struct crypto_cipher *essiv_tfm */
105 void *iv_private;
d1f96423 106 struct crypto_ablkcipher *tfms[0];
c0297721
AK
107};
108
109/*
110 * The fields in here must be read only after initialization,
111 * changing state should be in crypt_cpu.
112 */
1da177e4
LT
113struct crypt_config {
114 struct dm_dev *dev;
115 sector_t start;
116
117 /*
ddd42edf
MB
118 * pool for per bio private data, crypto requests and
119 * encryption requeusts/buffer pages
1da177e4
LT
120 */
121 mempool_t *io_pool;
ddd42edf 122 mempool_t *req_pool;
1da177e4 123 mempool_t *page_pool;
6a24c718 124 struct bio_set *bs;
1da177e4 125
cabf08e4
MB
126 struct workqueue_struct *io_queue;
127 struct workqueue_struct *crypt_queue;
3f1e9070 128
5ebaee6d 129 char *cipher;
7dbcd137 130 char *cipher_string;
5ebaee6d 131
1da177e4 132 struct crypt_iv_operations *iv_gen_ops;
79066ad3 133 union {
60473592
MB
134 struct iv_essiv_private essiv;
135 struct iv_benbi_private benbi;
79066ad3 136 } iv_gen_private;
1da177e4
LT
137 sector_t iv_offset;
138 unsigned int iv_size;
139
c0297721
AK
140 /*
141 * Duplicated per cpu state. Access through
142 * per_cpu_ptr() only.
143 */
144 struct crypt_cpu __percpu *cpu;
d1f96423 145 unsigned tfms_count;
c0297721 146
ddd42edf
MB
147 /*
148 * Layout of each crypto request:
149 *
150 * struct ablkcipher_request
151 * context
152 * padding
153 * struct dm_crypt_request
154 * padding
155 * IV
156 *
157 * The padding is added so that dm_crypt_request and the IV are
158 * correctly aligned.
159 */
160 unsigned int dmreq_start;
ddd42edf 161
e48d4bbf 162 unsigned long flags;
1da177e4 163 unsigned int key_size;
d1f96423 164 unsigned int key_parts;
1da177e4
LT
165 u8 key[0];
166};
167
6a24c718 168#define MIN_IOS 16
1da177e4
LT
169#define MIN_POOL_PAGES 32
170#define MIN_BIO_PAGES 8
171
e18b890b 172static struct kmem_cache *_crypt_io_pool;
1da177e4 173
028867ac 174static void clone_init(struct dm_crypt_io *, struct bio *);
395b167c 175static void kcryptd_queue_crypt(struct dm_crypt_io *io);
2dc5327d 176static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
027581f3 177
c0297721
AK
178static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
179{
180 return this_cpu_ptr(cc->cpu);
181}
182
183/*
184 * Use this to access cipher attributes that are the same for each CPU.
185 */
186static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
187{
d1f96423 188 return __this_cpu_ptr(cc->cpu)->tfms[0];
c0297721
AK
189}
190
1da177e4
LT
191/*
192 * Different IV generation algorithms:
193 *
3c164bd8 194 * plain: the initial vector is the 32-bit little-endian version of the sector
3a4fa0a2 195 * number, padded with zeros if necessary.
1da177e4 196 *
61afef61
MB
197 * plain64: the initial vector is the 64-bit little-endian version of the sector
198 * number, padded with zeros if necessary.
199 *
3c164bd8
RS
200 * essiv: "encrypted sector|salt initial vector", the sector number is
201 * encrypted with the bulk cipher using a salt as key. The salt
202 * should be derived from the bulk cipher's key via hashing.
1da177e4 203 *
48527fa7
RS
204 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
205 * (needed for LRW-32-AES and possible other narrow block modes)
206 *
46b47730
LN
207 * null: the initial vector is always zero. Provides compatibility with
208 * obsolete loop_fish2 devices. Do not use for new devices.
209 *
1da177e4
LT
210 * plumb: unimplemented, see:
211 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
212 */
213
2dc5327d
MB
214static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
215 struct dm_crypt_request *dmreq)
1da177e4
LT
216{
217 memset(iv, 0, cc->iv_size);
2dc5327d 218 *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
1da177e4
LT
219
220 return 0;
221}
222
61afef61 223static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
2dc5327d 224 struct dm_crypt_request *dmreq)
61afef61
MB
225{
226 memset(iv, 0, cc->iv_size);
2dc5327d 227 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
61afef61
MB
228
229 return 0;
230}
231
b95bf2d3
MB
232/* Initialise ESSIV - compute salt but no local memory allocations */
233static int crypt_iv_essiv_init(struct crypt_config *cc)
234{
235 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
236 struct hash_desc desc;
237 struct scatterlist sg;
c0297721
AK
238 struct crypto_cipher *essiv_tfm;
239 int err, cpu;
b95bf2d3
MB
240
241 sg_init_one(&sg, cc->key, cc->key_size);
242 desc.tfm = essiv->hash_tfm;
243 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
244
245 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
246 if (err)
247 return err;
248
c0297721
AK
249 for_each_possible_cpu(cpu) {
250 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
251
252 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
b95bf2d3 253 crypto_hash_digestsize(essiv->hash_tfm));
c0297721
AK
254 if (err)
255 return err;
256 }
257
258 return 0;
b95bf2d3
MB
259}
260
542da317
MB
261/* Wipe salt and reset key derived from volume key */
262static int crypt_iv_essiv_wipe(struct crypt_config *cc)
263{
264 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
265 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
c0297721
AK
266 struct crypto_cipher *essiv_tfm;
267 int cpu, r, err = 0;
542da317
MB
268
269 memset(essiv->salt, 0, salt_size);
270
c0297721
AK
271 for_each_possible_cpu(cpu) {
272 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
273 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
274 if (r)
275 err = r;
276 }
277
278 return err;
279}
280
281/* Set up per cpu cipher state */
282static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
283 struct dm_target *ti,
284 u8 *salt, unsigned saltsize)
285{
286 struct crypto_cipher *essiv_tfm;
287 int err;
288
289 /* Setup the essiv_tfm with the given salt */
290 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
291 if (IS_ERR(essiv_tfm)) {
292 ti->error = "Error allocating crypto tfm for ESSIV";
293 return essiv_tfm;
294 }
295
296 if (crypto_cipher_blocksize(essiv_tfm) !=
297 crypto_ablkcipher_ivsize(any_tfm(cc))) {
298 ti->error = "Block size of ESSIV cipher does "
299 "not match IV size of block cipher";
300 crypto_free_cipher(essiv_tfm);
301 return ERR_PTR(-EINVAL);
302 }
303
304 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
305 if (err) {
306 ti->error = "Failed to set key for ESSIV cipher";
307 crypto_free_cipher(essiv_tfm);
308 return ERR_PTR(err);
309 }
310
311 return essiv_tfm;
542da317
MB
312}
313
60473592
MB
314static void crypt_iv_essiv_dtr(struct crypt_config *cc)
315{
c0297721
AK
316 int cpu;
317 struct crypt_cpu *cpu_cc;
318 struct crypto_cipher *essiv_tfm;
60473592
MB
319 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
320
b95bf2d3
MB
321 crypto_free_hash(essiv->hash_tfm);
322 essiv->hash_tfm = NULL;
323
324 kzfree(essiv->salt);
325 essiv->salt = NULL;
c0297721
AK
326
327 for_each_possible_cpu(cpu) {
328 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
329 essiv_tfm = cpu_cc->iv_private;
330
331 if (essiv_tfm)
332 crypto_free_cipher(essiv_tfm);
333
334 cpu_cc->iv_private = NULL;
335 }
60473592
MB
336}
337
1da177e4 338static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
d469f841 339 const char *opts)
1da177e4 340{
5861f1be
MB
341 struct crypto_cipher *essiv_tfm = NULL;
342 struct crypto_hash *hash_tfm = NULL;
5861f1be 343 u8 *salt = NULL;
c0297721 344 int err, cpu;
1da177e4 345
5861f1be 346 if (!opts) {
72d94861 347 ti->error = "Digest algorithm missing for ESSIV mode";
1da177e4
LT
348 return -EINVAL;
349 }
350
b95bf2d3 351 /* Allocate hash algorithm */
35058687
HX
352 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
353 if (IS_ERR(hash_tfm)) {
72d94861 354 ti->error = "Error initializing ESSIV hash";
5861f1be
MB
355 err = PTR_ERR(hash_tfm);
356 goto bad;
1da177e4
LT
357 }
358
b95bf2d3 359 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
5861f1be 360 if (!salt) {
72d94861 361 ti->error = "Error kmallocing salt storage in ESSIV";
5861f1be
MB
362 err = -ENOMEM;
363 goto bad;
1da177e4
LT
364 }
365
b95bf2d3 366 cc->iv_gen_private.essiv.salt = salt;
b95bf2d3
MB
367 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
368
c0297721
AK
369 for_each_possible_cpu(cpu) {
370 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
371 crypto_hash_digestsize(hash_tfm));
372 if (IS_ERR(essiv_tfm)) {
373 crypt_iv_essiv_dtr(cc);
374 return PTR_ERR(essiv_tfm);
375 }
376 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
377 }
378
1da177e4 379 return 0;
5861f1be
MB
380
381bad:
5861f1be
MB
382 if (hash_tfm && !IS_ERR(hash_tfm))
383 crypto_free_hash(hash_tfm);
b95bf2d3 384 kfree(salt);
5861f1be 385 return err;
1da177e4
LT
386}
387
2dc5327d
MB
388static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
389 struct dm_crypt_request *dmreq)
1da177e4 390{
c0297721
AK
391 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
392
1da177e4 393 memset(iv, 0, cc->iv_size);
2dc5327d 394 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
c0297721
AK
395 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
396
1da177e4
LT
397 return 0;
398}
399
48527fa7
RS
400static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
401 const char *opts)
402{
c0297721 403 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
f0d1b0b3 404 int log = ilog2(bs);
48527fa7
RS
405
406 /* we need to calculate how far we must shift the sector count
407 * to get the cipher block count, we use this shift in _gen */
408
409 if (1 << log != bs) {
410 ti->error = "cypher blocksize is not a power of 2";
411 return -EINVAL;
412 }
413
414 if (log > 9) {
415 ti->error = "cypher blocksize is > 512";
416 return -EINVAL;
417 }
418
60473592 419 cc->iv_gen_private.benbi.shift = 9 - log;
48527fa7
RS
420
421 return 0;
422}
423
424static void crypt_iv_benbi_dtr(struct crypt_config *cc)
425{
48527fa7
RS
426}
427
2dc5327d
MB
428static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
429 struct dm_crypt_request *dmreq)
48527fa7 430{
79066ad3
HX
431 __be64 val;
432
48527fa7 433 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
79066ad3 434
2dc5327d 435 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
79066ad3 436 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
48527fa7 437
1da177e4
LT
438 return 0;
439}
440
2dc5327d
MB
441static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
442 struct dm_crypt_request *dmreq)
46b47730
LN
443{
444 memset(iv, 0, cc->iv_size);
445
446 return 0;
447}
448
1da177e4
LT
449static struct crypt_iv_operations crypt_iv_plain_ops = {
450 .generator = crypt_iv_plain_gen
451};
452
61afef61
MB
453static struct crypt_iv_operations crypt_iv_plain64_ops = {
454 .generator = crypt_iv_plain64_gen
455};
456
1da177e4
LT
457static struct crypt_iv_operations crypt_iv_essiv_ops = {
458 .ctr = crypt_iv_essiv_ctr,
459 .dtr = crypt_iv_essiv_dtr,
b95bf2d3 460 .init = crypt_iv_essiv_init,
542da317 461 .wipe = crypt_iv_essiv_wipe,
1da177e4
LT
462 .generator = crypt_iv_essiv_gen
463};
464
48527fa7
RS
465static struct crypt_iv_operations crypt_iv_benbi_ops = {
466 .ctr = crypt_iv_benbi_ctr,
467 .dtr = crypt_iv_benbi_dtr,
468 .generator = crypt_iv_benbi_gen
469};
1da177e4 470
46b47730
LN
471static struct crypt_iv_operations crypt_iv_null_ops = {
472 .generator = crypt_iv_null_gen
473};
474
d469f841
MB
475static void crypt_convert_init(struct crypt_config *cc,
476 struct convert_context *ctx,
477 struct bio *bio_out, struct bio *bio_in,
fcd369da 478 sector_t sector)
1da177e4
LT
479{
480 ctx->bio_in = bio_in;
481 ctx->bio_out = bio_out;
482 ctx->offset_in = 0;
483 ctx->offset_out = 0;
484 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
485 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
486 ctx->sector = sector + cc->iv_offset;
43d69034 487 init_completion(&ctx->restart);
1da177e4
LT
488}
489
b2174eeb
HY
490static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
491 struct ablkcipher_request *req)
492{
493 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
494}
495
496static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
497 struct dm_crypt_request *dmreq)
498{
499 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
500}
501
2dc5327d
MB
502static u8 *iv_of_dmreq(struct crypt_config *cc,
503 struct dm_crypt_request *dmreq)
504{
505 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
506 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
507}
508
01482b76 509static int crypt_convert_block(struct crypt_config *cc,
3a7f6c99
MB
510 struct convert_context *ctx,
511 struct ablkcipher_request *req)
01482b76
MB
512{
513 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
514 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
3a7f6c99
MB
515 struct dm_crypt_request *dmreq;
516 u8 *iv;
517 int r = 0;
518
b2174eeb 519 dmreq = dmreq_of_req(cc, req);
2dc5327d 520 iv = iv_of_dmreq(cc, dmreq);
01482b76 521
2dc5327d 522 dmreq->iv_sector = ctx->sector;
b2174eeb 523 dmreq->ctx = ctx;
3a7f6c99
MB
524 sg_init_table(&dmreq->sg_in, 1);
525 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
01482b76
MB
526 bv_in->bv_offset + ctx->offset_in);
527
3a7f6c99
MB
528 sg_init_table(&dmreq->sg_out, 1);
529 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
01482b76
MB
530 bv_out->bv_offset + ctx->offset_out);
531
532 ctx->offset_in += 1 << SECTOR_SHIFT;
533 if (ctx->offset_in >= bv_in->bv_len) {
534 ctx->offset_in = 0;
535 ctx->idx_in++;
536 }
537
538 ctx->offset_out += 1 << SECTOR_SHIFT;
539 if (ctx->offset_out >= bv_out->bv_len) {
540 ctx->offset_out = 0;
541 ctx->idx_out++;
542 }
543
3a7f6c99 544 if (cc->iv_gen_ops) {
2dc5327d 545 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
3a7f6c99
MB
546 if (r < 0)
547 return r;
548 }
549
550 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
551 1 << SECTOR_SHIFT, iv);
552
553 if (bio_data_dir(ctx->bio_in) == WRITE)
554 r = crypto_ablkcipher_encrypt(req);
555 else
556 r = crypto_ablkcipher_decrypt(req);
557
2dc5327d
MB
558 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
559 r = cc->iv_gen_ops->post(cc, iv, dmreq);
560
3a7f6c99 561 return r;
01482b76
MB
562}
563
95497a96
MB
564static void kcryptd_async_done(struct crypto_async_request *async_req,
565 int error);
c0297721 566
ddd42edf
MB
567static void crypt_alloc_req(struct crypt_config *cc,
568 struct convert_context *ctx)
569{
c0297721 570 struct crypt_cpu *this_cc = this_crypt_config(cc);
d1f96423 571 unsigned key_index = ctx->sector & (cc->tfms_count - 1);
c0297721
AK
572
573 if (!this_cc->req)
574 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
575
d1f96423 576 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
c0297721
AK
577 ablkcipher_request_set_callback(this_cc->req,
578 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
579 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
ddd42edf
MB
580}
581
1da177e4
LT
582/*
583 * Encrypt / decrypt data from one bio to another one (can be the same one)
584 */
585static int crypt_convert(struct crypt_config *cc,
d469f841 586 struct convert_context *ctx)
1da177e4 587{
c0297721 588 struct crypt_cpu *this_cc = this_crypt_config(cc);
3f1e9070 589 int r;
1da177e4 590
c8081618
MB
591 atomic_set(&ctx->pending, 1);
592
1da177e4
LT
593 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
594 ctx->idx_out < ctx->bio_out->bi_vcnt) {
1da177e4 595
3a7f6c99
MB
596 crypt_alloc_req(cc, ctx);
597
3f1e9070
MB
598 atomic_inc(&ctx->pending);
599
c0297721 600 r = crypt_convert_block(cc, ctx, this_cc->req);
3a7f6c99
MB
601
602 switch (r) {
3f1e9070 603 /* async */
3a7f6c99
MB
604 case -EBUSY:
605 wait_for_completion(&ctx->restart);
606 INIT_COMPLETION(ctx->restart);
607 /* fall through*/
608 case -EINPROGRESS:
c0297721 609 this_cc->req = NULL;
3f1e9070
MB
610 ctx->sector++;
611 continue;
612
613 /* sync */
3a7f6c99 614 case 0:
3f1e9070 615 atomic_dec(&ctx->pending);
3a7f6c99 616 ctx->sector++;
c7f1b204 617 cond_resched();
3a7f6c99 618 continue;
3a7f6c99 619
3f1e9070
MB
620 /* error */
621 default:
622 atomic_dec(&ctx->pending);
623 return r;
624 }
1da177e4
LT
625 }
626
3f1e9070 627 return 0;
1da177e4
LT
628}
629
d469f841
MB
630static void dm_crypt_bio_destructor(struct bio *bio)
631{
028867ac 632 struct dm_crypt_io *io = bio->bi_private;
6a24c718
MB
633 struct crypt_config *cc = io->target->private;
634
635 bio_free(bio, cc->bs);
d469f841 636}
6a24c718 637
1da177e4
LT
638/*
639 * Generate a new unfragmented bio with the given size
640 * This should never violate the device limitations
933f01d4
MB
641 * May return a smaller bio when running out of pages, indicated by
642 * *out_of_pages set to 1.
1da177e4 643 */
933f01d4
MB
644static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
645 unsigned *out_of_pages)
1da177e4 646{
027581f3 647 struct crypt_config *cc = io->target->private;
8b004457 648 struct bio *clone;
1da177e4 649 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
b4e3ca1a 650 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
91e10625
MB
651 unsigned i, len;
652 struct page *page;
1da177e4 653
2f9941b6 654 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
8b004457 655 if (!clone)
1da177e4 656 return NULL;
1da177e4 657
027581f3 658 clone_init(io, clone);
933f01d4 659 *out_of_pages = 0;
6a24c718 660
f97380bc 661 for (i = 0; i < nr_iovecs; i++) {
91e10625 662 page = mempool_alloc(cc->page_pool, gfp_mask);
933f01d4
MB
663 if (!page) {
664 *out_of_pages = 1;
1da177e4 665 break;
933f01d4 666 }
1da177e4
LT
667
668 /*
669 * if additional pages cannot be allocated without waiting,
670 * return a partially allocated bio, the caller will then try
671 * to allocate additional bios while submitting this partial bio
672 */
f97380bc 673 if (i == (MIN_BIO_PAGES - 1))
1da177e4
LT
674 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
675
91e10625
MB
676 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
677
678 if (!bio_add_page(clone, page, len, 0)) {
679 mempool_free(page, cc->page_pool);
680 break;
681 }
1da177e4 682
91e10625 683 size -= len;
1da177e4
LT
684 }
685
8b004457
MB
686 if (!clone->bi_size) {
687 bio_put(clone);
1da177e4
LT
688 return NULL;
689 }
690
8b004457 691 return clone;
1da177e4
LT
692}
693
644bd2f0 694static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1da177e4 695{
644bd2f0 696 unsigned int i;
1da177e4
LT
697 struct bio_vec *bv;
698
644bd2f0 699 for (i = 0; i < clone->bi_vcnt; i++) {
8b004457 700 bv = bio_iovec_idx(clone, i);
1da177e4
LT
701 BUG_ON(!bv->bv_page);
702 mempool_free(bv->bv_page, cc->page_pool);
703 bv->bv_page = NULL;
704 }
705}
706
dc440d1e
MB
707static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
708 struct bio *bio, sector_t sector)
709{
710 struct crypt_config *cc = ti->private;
711 struct dm_crypt_io *io;
712
713 io = mempool_alloc(cc->io_pool, GFP_NOIO);
714 io->target = ti;
715 io->base_bio = bio;
716 io->sector = sector;
717 io->error = 0;
393b47ef 718 io->base_io = NULL;
dc440d1e
MB
719 atomic_set(&io->pending, 0);
720
721 return io;
722}
723
3e1a8bdd
MB
724static void crypt_inc_pending(struct dm_crypt_io *io)
725{
726 atomic_inc(&io->pending);
727}
728
1da177e4
LT
729/*
730 * One of the bios was finished. Check for completion of
731 * the whole request and correctly clean up the buffer.
393b47ef 732 * If base_io is set, wait for the last fragment to complete.
1da177e4 733 */
5742fd77 734static void crypt_dec_pending(struct dm_crypt_io *io)
1da177e4 735{
5742fd77 736 struct crypt_config *cc = io->target->private;
b35f8caa
MB
737 struct bio *base_bio = io->base_bio;
738 struct dm_crypt_io *base_io = io->base_io;
739 int error = io->error;
1da177e4
LT
740
741 if (!atomic_dec_and_test(&io->pending))
742 return;
743
b35f8caa
MB
744 mempool_free(io, cc->io_pool);
745
746 if (likely(!base_io))
747 bio_endio(base_bio, error);
393b47ef 748 else {
b35f8caa
MB
749 if (error && !base_io->error)
750 base_io->error = error;
751 crypt_dec_pending(base_io);
393b47ef 752 }
1da177e4
LT
753}
754
755/*
cabf08e4 756 * kcryptd/kcryptd_io:
1da177e4
LT
757 *
758 * Needed because it would be very unwise to do decryption in an
23541d2d 759 * interrupt context.
cabf08e4
MB
760 *
761 * kcryptd performs the actual encryption or decryption.
762 *
763 * kcryptd_io performs the IO submission.
764 *
765 * They must be separated as otherwise the final stages could be
766 * starved by new requests which can block in the first stages due
767 * to memory allocation.
c0297721
AK
768 *
769 * The work is done per CPU global for all dm-crypt instances.
770 * They should not depend on each other and do not block.
1da177e4 771 */
6712ecf8 772static void crypt_endio(struct bio *clone, int error)
8b004457 773{
028867ac 774 struct dm_crypt_io *io = clone->bi_private;
8b004457 775 struct crypt_config *cc = io->target->private;
ee7a491e 776 unsigned rw = bio_data_dir(clone);
8b004457 777
adfe4770
MB
778 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
779 error = -EIO;
780
8b004457 781 /*
6712ecf8 782 * free the processed pages
8b004457 783 */
ee7a491e 784 if (rw == WRITE)
644bd2f0 785 crypt_free_buffer_pages(cc, clone);
8b004457
MB
786
787 bio_put(clone);
8b004457 788
ee7a491e
MB
789 if (rw == READ && !error) {
790 kcryptd_queue_crypt(io);
791 return;
792 }
5742fd77
MB
793
794 if (unlikely(error))
795 io->error = error;
796
797 crypt_dec_pending(io);
8b004457
MB
798}
799
028867ac 800static void clone_init(struct dm_crypt_io *io, struct bio *clone)
8b004457
MB
801{
802 struct crypt_config *cc = io->target->private;
803
804 clone->bi_private = io;
805 clone->bi_end_io = crypt_endio;
806 clone->bi_bdev = cc->dev->bdev;
807 clone->bi_rw = io->base_bio->bi_rw;
027581f3 808 clone->bi_destructor = dm_crypt_bio_destructor;
8b004457
MB
809}
810
20c82538
MB
811static void kcryptd_unplug(struct crypt_config *cc)
812{
813 blk_unplug(bdev_get_queue(cc->dev->bdev));
814}
815
816static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
8b004457
MB
817{
818 struct crypt_config *cc = io->target->private;
819 struct bio *base_bio = io->base_bio;
820 struct bio *clone;
93e605c2 821
8b004457
MB
822 /*
823 * The block layer might modify the bvec array, so always
824 * copy the required bvecs because we need the original
825 * one in order to decrypt the whole bio data *afterwards*.
826 */
20c82538
MB
827 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
828 if (!clone) {
829 kcryptd_unplug(cc);
830 return 1;
93e605c2 831 }
8b004457 832
20c82538
MB
833 crypt_inc_pending(io);
834
8b004457
MB
835 clone_init(io, clone);
836 clone->bi_idx = 0;
837 clone->bi_vcnt = bio_segments(base_bio);
838 clone->bi_size = base_bio->bi_size;
0c395b0f 839 clone->bi_sector = cc->start + io->sector;
8b004457
MB
840 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
841 sizeof(struct bio_vec) * clone->bi_vcnt);
8b004457 842
93e605c2 843 generic_make_request(clone);
20c82538 844 return 0;
8b004457
MB
845}
846
4e4eef64
MB
847static void kcryptd_io_write(struct dm_crypt_io *io)
848{
95497a96 849 struct bio *clone = io->ctx.bio_out;
95497a96 850 generic_make_request(clone);
4e4eef64
MB
851}
852
395b167c
AK
853static void kcryptd_io(struct work_struct *work)
854{
855 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
856
20c82538
MB
857 if (bio_data_dir(io->base_bio) == READ) {
858 crypt_inc_pending(io);
859 if (kcryptd_io_read(io, GFP_NOIO))
860 io->error = -ENOMEM;
861 crypt_dec_pending(io);
862 } else
395b167c
AK
863 kcryptd_io_write(io);
864}
865
866static void kcryptd_queue_io(struct dm_crypt_io *io)
867{
868 struct crypt_config *cc = io->target->private;
869
870 INIT_WORK(&io->work, kcryptd_io);
871 queue_work(cc->io_queue, &io->work);
872}
873
95497a96
MB
874static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
875 int error, int async)
4e4eef64 876{
dec1cedf
MB
877 struct bio *clone = io->ctx.bio_out;
878 struct crypt_config *cc = io->target->private;
879
880 if (unlikely(error < 0)) {
881 crypt_free_buffer_pages(cc, clone);
882 bio_put(clone);
883 io->error = -EIO;
6c031f41 884 crypt_dec_pending(io);
dec1cedf
MB
885 return;
886 }
887
888 /* crypt_convert should have filled the clone bio */
889 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
890
891 clone->bi_sector = cc->start + io->sector;
899c95d3 892
95497a96
MB
893 if (async)
894 kcryptd_queue_io(io);
1e37bb8e 895 else
95497a96 896 generic_make_request(clone);
4e4eef64
MB
897}
898
fc5a5e9a 899static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
8b004457
MB
900{
901 struct crypt_config *cc = io->target->private;
8b004457 902 struct bio *clone;
393b47ef 903 struct dm_crypt_io *new_io;
c8081618 904 int crypt_finished;
933f01d4 905 unsigned out_of_pages = 0;
dec1cedf 906 unsigned remaining = io->base_bio->bi_size;
b635b00e 907 sector_t sector = io->sector;
dec1cedf 908 int r;
8b004457 909
fc5a5e9a
MB
910 /*
911 * Prevent io from disappearing until this function completes.
912 */
913 crypt_inc_pending(io);
b635b00e 914 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
fc5a5e9a 915
93e605c2
MB
916 /*
917 * The allocated buffers can be smaller than the whole bio,
918 * so repeat the whole process until all the data can be handled.
919 */
920 while (remaining) {
933f01d4 921 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
23541d2d 922 if (unlikely(!clone)) {
5742fd77 923 io->error = -ENOMEM;
fc5a5e9a 924 break;
23541d2d 925 }
93e605c2 926
53017030
MB
927 io->ctx.bio_out = clone;
928 io->ctx.idx_out = 0;
93e605c2 929
dec1cedf 930 remaining -= clone->bi_size;
b635b00e 931 sector += bio_sectors(clone);
93e605c2 932
4e594098 933 crypt_inc_pending(io);
dec1cedf 934 r = crypt_convert(cc, &io->ctx);
c8081618 935 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
f97380bc 936
c8081618
MB
937 /* Encryption was already finished, submit io now */
938 if (crypt_finished) {
3a7f6c99 939 kcryptd_crypt_write_io_submit(io, r, 0);
c8081618
MB
940
941 /*
942 * If there was an error, do not try next fragments.
943 * For async, error is processed in async handler.
944 */
6c031f41 945 if (unlikely(r < 0))
fc5a5e9a 946 break;
b635b00e
MB
947
948 io->sector = sector;
4e594098 949 }
93e605c2 950
933f01d4
MB
951 /*
952 * Out of memory -> run queues
953 * But don't wait if split was due to the io size restriction
954 */
955 if (unlikely(out_of_pages))
8aa7e847 956 congestion_wait(BLK_RW_ASYNC, HZ/100);
933f01d4 957
393b47ef
MB
958 /*
959 * With async crypto it is unsafe to share the crypto context
960 * between fragments, so switch to a new dm_crypt_io structure.
961 */
962 if (unlikely(!crypt_finished && remaining)) {
963 new_io = crypt_io_alloc(io->target, io->base_bio,
964 sector);
965 crypt_inc_pending(new_io);
966 crypt_convert_init(cc, &new_io->ctx, NULL,
967 io->base_bio, sector);
968 new_io->ctx.idx_in = io->ctx.idx_in;
969 new_io->ctx.offset_in = io->ctx.offset_in;
970
971 /*
972 * Fragments after the first use the base_io
973 * pending count.
974 */
975 if (!io->base_io)
976 new_io->base_io = io;
977 else {
978 new_io->base_io = io->base_io;
979 crypt_inc_pending(io->base_io);
980 crypt_dec_pending(io);
981 }
982
983 io = new_io;
984 }
93e605c2 985 }
899c95d3
MB
986
987 crypt_dec_pending(io);
84131db6
MB
988}
989
4e4eef64 990static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
5742fd77
MB
991{
992 if (unlikely(error < 0))
993 io->error = -EIO;
994
995 crypt_dec_pending(io);
996}
997
4e4eef64 998static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
8b004457
MB
999{
1000 struct crypt_config *cc = io->target->private;
5742fd77 1001 int r = 0;
1da177e4 1002
3e1a8bdd 1003 crypt_inc_pending(io);
3a7f6c99 1004
53017030 1005 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
0c395b0f 1006 io->sector);
1da177e4 1007
5742fd77
MB
1008 r = crypt_convert(cc, &io->ctx);
1009
3f1e9070 1010 if (atomic_dec_and_test(&io->ctx.pending))
3a7f6c99
MB
1011 kcryptd_crypt_read_done(io, r);
1012
1013 crypt_dec_pending(io);
1da177e4
LT
1014}
1015
95497a96
MB
1016static void kcryptd_async_done(struct crypto_async_request *async_req,
1017 int error)
1018{
b2174eeb
HY
1019 struct dm_crypt_request *dmreq = async_req->data;
1020 struct convert_context *ctx = dmreq->ctx;
95497a96
MB
1021 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1022 struct crypt_config *cc = io->target->private;
1023
1024 if (error == -EINPROGRESS) {
1025 complete(&ctx->restart);
1026 return;
1027 }
1028
2dc5327d
MB
1029 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1030 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1031
b2174eeb 1032 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
95497a96
MB
1033
1034 if (!atomic_dec_and_test(&ctx->pending))
1035 return;
1036
1037 if (bio_data_dir(io->base_bio) == READ)
1038 kcryptd_crypt_read_done(io, error);
1039 else
1040 kcryptd_crypt_write_io_submit(io, error, 1);
1041}
1042
395b167c 1043static void kcryptd_crypt(struct work_struct *work)
1da177e4 1044{
028867ac 1045 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
8b004457 1046
cabf08e4 1047 if (bio_data_dir(io->base_bio) == READ)
395b167c 1048 kcryptd_crypt_read_convert(io);
4e4eef64 1049 else
395b167c 1050 kcryptd_crypt_write_convert(io);
cabf08e4
MB
1051}
1052
395b167c 1053static void kcryptd_queue_crypt(struct dm_crypt_io *io)
cabf08e4 1054{
395b167c 1055 struct crypt_config *cc = io->target->private;
cabf08e4 1056
395b167c
AK
1057 INIT_WORK(&io->work, kcryptd_crypt);
1058 queue_work(cc->crypt_queue, &io->work);
1da177e4
LT
1059}
1060
1061/*
1062 * Decode key from its hex representation
1063 */
1064static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1065{
1066 char buffer[3];
1067 char *endp;
1068 unsigned int i;
1069
1070 buffer[2] = '\0';
1071
8b004457 1072 for (i = 0; i < size; i++) {
1da177e4
LT
1073 buffer[0] = *hex++;
1074 buffer[1] = *hex++;
1075
1076 key[i] = (u8)simple_strtoul(buffer, &endp, 16);
1077
1078 if (endp != &buffer[2])
1079 return -EINVAL;
1080 }
1081
1082 if (*hex != '\0')
1083 return -EINVAL;
1084
1085 return 0;
1086}
1087
1088/*
1089 * Encode key into its hex representation
1090 */
1091static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
1092{
1093 unsigned int i;
1094
8b004457 1095 for (i = 0; i < size; i++) {
1da177e4
LT
1096 sprintf(hex, "%02x", *key);
1097 hex += 2;
1098 key++;
1099 }
1100}
1101
d1f96423
MB
1102static void crypt_free_tfms(struct crypt_config *cc, int cpu)
1103{
1104 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1105 unsigned i;
1106
1107 for (i = 0; i < cc->tfms_count; i++)
1108 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
1109 crypto_free_ablkcipher(cpu_cc->tfms[i]);
1110 cpu_cc->tfms[i] = NULL;
1111 }
1112}
1113
1114static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
1115{
1116 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1117 unsigned i;
1118 int err;
1119
1120 for (i = 0; i < cc->tfms_count; i++) {
1121 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1122 if (IS_ERR(cpu_cc->tfms[i])) {
1123 err = PTR_ERR(cpu_cc->tfms[i]);
1124 crypt_free_tfms(cc, cpu);
1125 return err;
1126 }
1127 }
1128
1129 return 0;
1130}
1131
c0297721
AK
1132static int crypt_setkey_allcpus(struct crypt_config *cc)
1133{
d1f96423
MB
1134 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
1135 int cpu, err = 0, i, r;
c0297721
AK
1136
1137 for_each_possible_cpu(cpu) {
d1f96423
MB
1138 for (i = 0; i < cc->tfms_count; i++) {
1139 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
1140 cc->key + (i * subkey_size), subkey_size);
1141 if (r)
1142 err = r;
1143 }
c0297721
AK
1144 }
1145
1146 return err;
1147}
1148
e48d4bbf
MB
1149static int crypt_set_key(struct crypt_config *cc, char *key)
1150{
69a8cfcd
MB
1151 /* The key size may not be changed. */
1152 if (cc->key_size != (strlen(key) >> 1))
e48d4bbf
MB
1153 return -EINVAL;
1154
69a8cfcd
MB
1155 /* Hyphen (which gives a key_size of zero) means there is no key. */
1156 if (!cc->key_size && strcmp(key, "-"))
1157 return -EINVAL;
e48d4bbf 1158
69a8cfcd 1159 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
e48d4bbf
MB
1160 return -EINVAL;
1161
1162 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1163
c0297721 1164 return crypt_setkey_allcpus(cc);
e48d4bbf
MB
1165}
1166
1167static int crypt_wipe_key(struct crypt_config *cc)
1168{
1169 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1170 memset(&cc->key, 0, cc->key_size * sizeof(u8));
c0297721
AK
1171
1172 return crypt_setkey_allcpus(cc);
e48d4bbf
MB
1173}
1174
28513fcc
MB
1175static void crypt_dtr(struct dm_target *ti)
1176{
1177 struct crypt_config *cc = ti->private;
c0297721
AK
1178 struct crypt_cpu *cpu_cc;
1179 int cpu;
28513fcc
MB
1180
1181 ti->private = NULL;
1182
1183 if (!cc)
1184 return;
1185
1186 if (cc->io_queue)
1187 destroy_workqueue(cc->io_queue);
1188 if (cc->crypt_queue)
1189 destroy_workqueue(cc->crypt_queue);
1190
c0297721
AK
1191 if (cc->cpu)
1192 for_each_possible_cpu(cpu) {
1193 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1194 if (cpu_cc->req)
1195 mempool_free(cpu_cc->req, cc->req_pool);
d1f96423 1196 crypt_free_tfms(cc, cpu);
c0297721
AK
1197 }
1198
28513fcc
MB
1199 if (cc->bs)
1200 bioset_free(cc->bs);
1201
1202 if (cc->page_pool)
1203 mempool_destroy(cc->page_pool);
1204 if (cc->req_pool)
1205 mempool_destroy(cc->req_pool);
1206 if (cc->io_pool)
1207 mempool_destroy(cc->io_pool);
1208
1209 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1210 cc->iv_gen_ops->dtr(cc);
1211
28513fcc
MB
1212 if (cc->dev)
1213 dm_put_device(ti, cc->dev);
1214
c0297721
AK
1215 if (cc->cpu)
1216 free_percpu(cc->cpu);
1217
5ebaee6d 1218 kzfree(cc->cipher);
7dbcd137 1219 kzfree(cc->cipher_string);
28513fcc
MB
1220
1221 /* Must zero key material before freeing */
1222 kzfree(cc);
1223}
1224
5ebaee6d
MB
1225static int crypt_ctr_cipher(struct dm_target *ti,
1226 char *cipher_in, char *key)
1da177e4 1227{
5ebaee6d 1228 struct crypt_config *cc = ti->private;
d1f96423 1229 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
5ebaee6d 1230 char *cipher_api = NULL;
c0297721 1231 int cpu, ret = -EINVAL;
1da177e4 1232
5ebaee6d
MB
1233 /* Convert to crypto api definition? */
1234 if (strchr(cipher_in, '(')) {
1235 ti->error = "Bad cipher specification";
1da177e4
LT
1236 return -EINVAL;
1237 }
1238
7dbcd137
MB
1239 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1240 if (!cc->cipher_string)
1241 goto bad_mem;
1242
5ebaee6d
MB
1243 /*
1244 * Legacy dm-crypt cipher specification
d1f96423 1245 * cipher[:keycount]-mode-iv:ivopts
5ebaee6d
MB
1246 */
1247 tmp = cipher_in;
d1f96423
MB
1248 keycount = strsep(&tmp, "-");
1249 cipher = strsep(&keycount, ":");
1250
1251 if (!keycount)
1252 cc->tfms_count = 1;
1253 else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
1254 !is_power_of_2(cc->tfms_count)) {
1255 ti->error = "Bad cipher key count specification";
1256 return -EINVAL;
1257 }
1258 cc->key_parts = cc->tfms_count;
5ebaee6d
MB
1259
1260 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1261 if (!cc->cipher)
1262 goto bad_mem;
1263
1da177e4
LT
1264 chainmode = strsep(&tmp, "-");
1265 ivopts = strsep(&tmp, "-");
1266 ivmode = strsep(&ivopts, ":");
1267
1268 if (tmp)
5ebaee6d 1269 DMWARN("Ignoring unexpected additional cipher options");
1da177e4 1270
d1f96423
MB
1271 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
1272 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
1273 __alignof__(struct crypt_cpu));
c0297721
AK
1274 if (!cc->cpu) {
1275 ti->error = "Cannot allocate per cpu state";
1276 goto bad_mem;
1277 }
1278
7dbcd137
MB
1279 /*
1280 * For compatibility with the original dm-crypt mapping format, if
1281 * only the cipher name is supplied, use cbc-plain.
1282 */
5ebaee6d 1283 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1da177e4
LT
1284 chainmode = "cbc";
1285 ivmode = "plain";
1286 }
1287
d1806f6a 1288 if (strcmp(chainmode, "ecb") && !ivmode) {
5ebaee6d
MB
1289 ti->error = "IV mechanism required";
1290 return -EINVAL;
1da177e4
LT
1291 }
1292
5ebaee6d
MB
1293 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1294 if (!cipher_api)
1295 goto bad_mem;
1296
1297 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1298 "%s(%s)", chainmode, cipher);
1299 if (ret < 0) {
1300 kfree(cipher_api);
1301 goto bad_mem;
1da177e4
LT
1302 }
1303
5ebaee6d 1304 /* Allocate cipher */
c0297721 1305 for_each_possible_cpu(cpu) {
d1f96423
MB
1306 ret = crypt_alloc_tfms(cc, cpu, cipher_api);
1307 if (ret < 0) {
c0297721
AK
1308 ti->error = "Error allocating crypto tfm";
1309 goto bad;
1310 }
1da177e4 1311 }
1da177e4 1312
5ebaee6d
MB
1313 /* Initialize and set key */
1314 ret = crypt_set_key(cc, key);
28513fcc 1315 if (ret < 0) {
0b430958 1316 ti->error = "Error decoding and setting key";
28513fcc 1317 goto bad;
0b430958
MB
1318 }
1319
5ebaee6d 1320 /* Initialize IV */
c0297721 1321 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
5ebaee6d
MB
1322 if (cc->iv_size)
1323 /* at least a 64 bit sector number should fit in our buffer */
1324 cc->iv_size = max(cc->iv_size,
1325 (unsigned int)(sizeof(u64) / sizeof(u8)));
1326 else if (ivmode) {
1327 DMWARN("Selected cipher does not support IVs");
1328 ivmode = NULL;
1329 }
1330
1331 /* Choose ivmode, see comments at iv code. */
1da177e4
LT
1332 if (ivmode == NULL)
1333 cc->iv_gen_ops = NULL;
1334 else if (strcmp(ivmode, "plain") == 0)
1335 cc->iv_gen_ops = &crypt_iv_plain_ops;
61afef61
MB
1336 else if (strcmp(ivmode, "plain64") == 0)
1337 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1da177e4
LT
1338 else if (strcmp(ivmode, "essiv") == 0)
1339 cc->iv_gen_ops = &crypt_iv_essiv_ops;
48527fa7
RS
1340 else if (strcmp(ivmode, "benbi") == 0)
1341 cc->iv_gen_ops = &crypt_iv_benbi_ops;
46b47730
LN
1342 else if (strcmp(ivmode, "null") == 0)
1343 cc->iv_gen_ops = &crypt_iv_null_ops;
1da177e4 1344 else {
5ebaee6d 1345 ret = -EINVAL;
72d94861 1346 ti->error = "Invalid IV mode";
28513fcc 1347 goto bad;
1da177e4
LT
1348 }
1349
28513fcc
MB
1350 /* Allocate IV */
1351 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1352 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1353 if (ret < 0) {
1354 ti->error = "Error creating IV";
1355 goto bad;
1356 }
1357 }
1da177e4 1358
28513fcc
MB
1359 /* Initialize IV (set keys for ESSIV etc) */
1360 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1361 ret = cc->iv_gen_ops->init(cc);
1362 if (ret < 0) {
1363 ti->error = "Error initialising IV";
1364 goto bad;
1365 }
b95bf2d3
MB
1366 }
1367
5ebaee6d
MB
1368 ret = 0;
1369bad:
1370 kfree(cipher_api);
1371 return ret;
1372
1373bad_mem:
1374 ti->error = "Cannot allocate cipher strings";
1375 return -ENOMEM;
1376}
1377
1378/*
1379 * Construct an encryption mapping:
1380 * <cipher> <key> <iv_offset> <dev_path> <start>
1381 */
1382static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1383{
1384 struct crypt_config *cc;
1385 unsigned int key_size;
1386 unsigned long long tmpll;
1387 int ret;
1388
1389 if (argc != 5) {
1390 ti->error = "Not enough arguments";
1391 return -EINVAL;
1da177e4
LT
1392 }
1393
5ebaee6d
MB
1394 key_size = strlen(argv[1]) >> 1;
1395
1396 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1397 if (!cc) {
1398 ti->error = "Cannot allocate encryption context";
1399 return -ENOMEM;
1400 }
69a8cfcd 1401 cc->key_size = key_size;
5ebaee6d
MB
1402
1403 ti->private = cc;
1404 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1405 if (ret < 0)
1406 goto bad;
1407
28513fcc 1408 ret = -ENOMEM;
93d2341c 1409 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1da177e4 1410 if (!cc->io_pool) {
72d94861 1411 ti->error = "Cannot allocate crypt io mempool";
28513fcc 1412 goto bad;
1da177e4
LT
1413 }
1414
ddd42edf 1415 cc->dmreq_start = sizeof(struct ablkcipher_request);
c0297721 1416 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
ddd42edf 1417 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
c0297721 1418 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
3a7f6c99 1419 ~(crypto_tfm_ctx_alignment() - 1);
ddd42edf
MB
1420
1421 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1422 sizeof(struct dm_crypt_request) + cc->iv_size);
1423 if (!cc->req_pool) {
1424 ti->error = "Cannot allocate crypt request mempool";
28513fcc 1425 goto bad;
ddd42edf 1426 }
ddd42edf 1427
a19b27ce 1428 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1da177e4 1429 if (!cc->page_pool) {
72d94861 1430 ti->error = "Cannot allocate page mempool";
28513fcc 1431 goto bad;
1da177e4
LT
1432 }
1433
bb799ca0 1434 cc->bs = bioset_create(MIN_IOS, 0);
6a24c718
MB
1435 if (!cc->bs) {
1436 ti->error = "Cannot allocate crypt bioset";
28513fcc 1437 goto bad;
6a24c718
MB
1438 }
1439
28513fcc 1440 ret = -EINVAL;
4ee218cd 1441 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
72d94861 1442 ti->error = "Invalid iv_offset sector";
28513fcc 1443 goto bad;
1da177e4 1444 }
4ee218cd 1445 cc->iv_offset = tmpll;
1da177e4 1446
28513fcc
MB
1447 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1448 ti->error = "Device lookup failed";
1449 goto bad;
1450 }
1451
4ee218cd 1452 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
72d94861 1453 ti->error = "Invalid device sector";
28513fcc 1454 goto bad;
1da177e4 1455 }
4ee218cd 1456 cc->start = tmpll;
1da177e4 1457
28513fcc 1458 ret = -ENOMEM;
c0297721
AK
1459 cc->io_queue = alloc_workqueue("kcryptd_io",
1460 WQ_NON_REENTRANT|
1461 WQ_MEM_RECLAIM,
1462 1);
cabf08e4
MB
1463 if (!cc->io_queue) {
1464 ti->error = "Couldn't create kcryptd io queue";
28513fcc 1465 goto bad;
cabf08e4
MB
1466 }
1467
c0297721
AK
1468 cc->crypt_queue = alloc_workqueue("kcryptd",
1469 WQ_NON_REENTRANT|
1470 WQ_CPU_INTENSIVE|
1471 WQ_MEM_RECLAIM,
1472 1);
cabf08e4 1473 if (!cc->crypt_queue) {
9934a8be 1474 ti->error = "Couldn't create kcryptd queue";
28513fcc 1475 goto bad;
9934a8be
MB
1476 }
1477
647c7db1 1478 ti->num_flush_requests = 1;
1da177e4
LT
1479 return 0;
1480
28513fcc
MB
1481bad:
1482 crypt_dtr(ti);
1483 return ret;
1da177e4
LT
1484}
1485
1da177e4
LT
1486static int crypt_map(struct dm_target *ti, struct bio *bio,
1487 union map_info *map_context)
1488{
028867ac 1489 struct dm_crypt_io *io;
647c7db1
MP
1490 struct crypt_config *cc;
1491
d87f4c14 1492 if (bio->bi_rw & REQ_FLUSH) {
647c7db1
MP
1493 cc = ti->private;
1494 bio->bi_bdev = cc->dev->bdev;
1495 return DM_MAPIO_REMAPPED;
1496 }
1da177e4 1497
b441a262 1498 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
cabf08e4 1499
20c82538
MB
1500 if (bio_data_dir(io->base_bio) == READ) {
1501 if (kcryptd_io_read(io, GFP_NOWAIT))
1502 kcryptd_queue_io(io);
1503 } else
cabf08e4 1504 kcryptd_queue_crypt(io);
1da177e4 1505
d2a7ad29 1506 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1507}
1508
1509static int crypt_status(struct dm_target *ti, status_type_t type,
1510 char *result, unsigned int maxlen)
1511{
5ebaee6d 1512 struct crypt_config *cc = ti->private;
1da177e4
LT
1513 unsigned int sz = 0;
1514
1515 switch (type) {
1516 case STATUSTYPE_INFO:
1517 result[0] = '\0';
1518 break;
1519
1520 case STATUSTYPE_TABLE:
7dbcd137 1521 DMEMIT("%s ", cc->cipher_string);
1da177e4
LT
1522
1523 if (cc->key_size > 0) {
1524 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
1525 return -ENOMEM;
1526
1527 crypt_encode_key(result + sz, cc->key, cc->key_size);
1528 sz += cc->key_size << 1;
1529 } else {
1530 if (sz >= maxlen)
1531 return -ENOMEM;
1532 result[sz++] = '-';
1533 }
1534
4ee218cd
AM
1535 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1536 cc->dev->name, (unsigned long long)cc->start);
1da177e4
LT
1537 break;
1538 }
1539 return 0;
1540}
1541
e48d4bbf
MB
1542static void crypt_postsuspend(struct dm_target *ti)
1543{
1544 struct crypt_config *cc = ti->private;
1545
1546 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1547}
1548
1549static int crypt_preresume(struct dm_target *ti)
1550{
1551 struct crypt_config *cc = ti->private;
1552
1553 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1554 DMERR("aborting resume - crypt key is not set.");
1555 return -EAGAIN;
1556 }
1557
1558 return 0;
1559}
1560
1561static void crypt_resume(struct dm_target *ti)
1562{
1563 struct crypt_config *cc = ti->private;
1564
1565 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1566}
1567
1568/* Message interface
1569 * key set <key>
1570 * key wipe
1571 */
1572static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1573{
1574 struct crypt_config *cc = ti->private;
542da317 1575 int ret = -EINVAL;
e48d4bbf
MB
1576
1577 if (argc < 2)
1578 goto error;
1579
1580 if (!strnicmp(argv[0], MESG_STR("key"))) {
1581 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1582 DMWARN("not suspended during key manipulation.");
1583 return -EINVAL;
1584 }
542da317
MB
1585 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
1586 ret = crypt_set_key(cc, argv[2]);
1587 if (ret)
1588 return ret;
1589 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1590 ret = cc->iv_gen_ops->init(cc);
1591 return ret;
1592 }
1593 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
1594 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1595 ret = cc->iv_gen_ops->wipe(cc);
1596 if (ret)
1597 return ret;
1598 }
e48d4bbf 1599 return crypt_wipe_key(cc);
542da317 1600 }
e48d4bbf
MB
1601 }
1602
1603error:
1604 DMWARN("unrecognised message received.");
1605 return -EINVAL;
1606}
1607
d41e26b9
MB
1608static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1609 struct bio_vec *biovec, int max_size)
1610{
1611 struct crypt_config *cc = ti->private;
1612 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1613
1614 if (!q->merge_bvec_fn)
1615 return max_size;
1616
1617 bvm->bi_bdev = cc->dev->bdev;
b441a262 1618 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
d41e26b9
MB
1619
1620 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1621}
1622
af4874e0
MS
1623static int crypt_iterate_devices(struct dm_target *ti,
1624 iterate_devices_callout_fn fn, void *data)
1625{
1626 struct crypt_config *cc = ti->private;
1627
5dea271b 1628 return fn(ti, cc->dev, cc->start, ti->len, data);
af4874e0
MS
1629}
1630
1da177e4
LT
1631static struct target_type crypt_target = {
1632 .name = "crypt",
d1f96423 1633 .version = {1, 10, 0},
1da177e4
LT
1634 .module = THIS_MODULE,
1635 .ctr = crypt_ctr,
1636 .dtr = crypt_dtr,
1637 .map = crypt_map,
1638 .status = crypt_status,
e48d4bbf
MB
1639 .postsuspend = crypt_postsuspend,
1640 .preresume = crypt_preresume,
1641 .resume = crypt_resume,
1642 .message = crypt_message,
d41e26b9 1643 .merge = crypt_merge,
af4874e0 1644 .iterate_devices = crypt_iterate_devices,
1da177e4
LT
1645};
1646
1647static int __init dm_crypt_init(void)
1648{
1649 int r;
1650
028867ac 1651 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1da177e4
LT
1652 if (!_crypt_io_pool)
1653 return -ENOMEM;
1654
1da177e4
LT
1655 r = dm_register_target(&crypt_target);
1656 if (r < 0) {
72d94861 1657 DMERR("register failed %d", r);
9934a8be 1658 kmem_cache_destroy(_crypt_io_pool);
1da177e4
LT
1659 }
1660
1da177e4
LT
1661 return r;
1662}
1663
1664static void __exit dm_crypt_exit(void)
1665{
10d3bd09 1666 dm_unregister_target(&crypt_target);
1da177e4
LT
1667 kmem_cache_destroy(_crypt_io_pool);
1668}
1669
1670module_init(dm_crypt_init);
1671module_exit(dm_crypt_exit);
1672
1673MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1674MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1675MODULE_LICENSE("GPL");