]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2003 Jana Saout <jana@saout.de> | |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | |
4 | * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. | |
5 | * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> | |
6 | * | |
7 | * This file is released under the GPL. | |
8 | */ | |
9 | ||
10 | #include <linux/completion.h> | |
11 | #include <linux/err.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/bio.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/crypto.h> | |
20 | #include <linux/workqueue.h> | |
21 | #include <linux/backing-dev.h> | |
22 | #include <linux/atomic.h> | |
23 | #include <linux/scatterlist.h> | |
24 | #include <asm/page.h> | |
25 | #include <asm/unaligned.h> | |
26 | #include <crypto/hash.h> | |
27 | #include <crypto/md5.h> | |
28 | #include <crypto/algapi.h> | |
29 | ||
30 | #include <linux/device-mapper.h> | |
31 | ||
32 | #define DM_MSG_PREFIX "crypt" | |
33 | ||
34 | /* | |
35 | * context holding the current state of a multi-part conversion | |
36 | */ | |
37 | struct convert_context { | |
38 | struct completion restart; | |
39 | struct bio *bio_in; | |
40 | struct bio *bio_out; | |
41 | struct bvec_iter iter_in; | |
42 | struct bvec_iter iter_out; | |
43 | sector_t cc_sector; | |
44 | atomic_t cc_pending; | |
45 | struct ablkcipher_request *req; | |
46 | }; | |
47 | ||
48 | /* | |
49 | * per bio private data | |
50 | */ | |
51 | struct dm_crypt_io { | |
52 | struct crypt_config *cc; | |
53 | struct bio *base_bio; | |
54 | struct work_struct work; | |
55 | ||
56 | struct convert_context ctx; | |
57 | ||
58 | atomic_t io_pending; | |
59 | int error; | |
60 | sector_t sector; | |
61 | struct dm_crypt_io *base_io; | |
62 | } CRYPTO_MINALIGN_ATTR; | |
63 | ||
64 | struct dm_crypt_request { | |
65 | struct convert_context *ctx; | |
66 | struct scatterlist sg_in; | |
67 | struct scatterlist sg_out; | |
68 | sector_t iv_sector; | |
69 | }; | |
70 | ||
71 | struct crypt_config; | |
72 | ||
73 | struct crypt_iv_operations { | |
74 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
75 | const char *opts); | |
76 | void (*dtr)(struct crypt_config *cc); | |
77 | int (*init)(struct crypt_config *cc); | |
78 | int (*wipe)(struct crypt_config *cc); | |
79 | int (*generator)(struct crypt_config *cc, u8 *iv, | |
80 | struct dm_crypt_request *dmreq); | |
81 | int (*post)(struct crypt_config *cc, u8 *iv, | |
82 | struct dm_crypt_request *dmreq); | |
83 | }; | |
84 | ||
85 | struct iv_essiv_private { | |
86 | struct crypto_hash *hash_tfm; | |
87 | u8 *salt; | |
88 | }; | |
89 | ||
90 | struct iv_benbi_private { | |
91 | int shift; | |
92 | }; | |
93 | ||
94 | #define LMK_SEED_SIZE 64 /* hash + 0 */ | |
95 | struct iv_lmk_private { | |
96 | struct crypto_shash *hash_tfm; | |
97 | u8 *seed; | |
98 | }; | |
99 | ||
100 | #define TCW_WHITENING_SIZE 16 | |
101 | struct iv_tcw_private { | |
102 | struct crypto_shash *crc32_tfm; | |
103 | u8 *iv_seed; | |
104 | u8 *whitening; | |
105 | }; | |
106 | ||
107 | /* | |
108 | * Crypt: maps a linear range of a block device | |
109 | * and encrypts / decrypts at the same time. | |
110 | */ | |
111 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; | |
112 | ||
113 | /* | |
114 | * The fields in here must be read only after initialization. | |
115 | */ | |
116 | struct crypt_config { | |
117 | struct dm_dev *dev; | |
118 | sector_t start; | |
119 | ||
120 | /* | |
121 | * pool for per bio private data, crypto requests and | |
122 | * encryption requeusts/buffer pages | |
123 | */ | |
124 | mempool_t *io_pool; | |
125 | mempool_t *req_pool; | |
126 | mempool_t *page_pool; | |
127 | struct bio_set *bs; | |
128 | ||
129 | struct workqueue_struct *io_queue; | |
130 | struct workqueue_struct *crypt_queue; | |
131 | ||
132 | char *cipher; | |
133 | char *cipher_string; | |
134 | ||
135 | struct crypt_iv_operations *iv_gen_ops; | |
136 | union { | |
137 | struct iv_essiv_private essiv; | |
138 | struct iv_benbi_private benbi; | |
139 | struct iv_lmk_private lmk; | |
140 | struct iv_tcw_private tcw; | |
141 | } iv_gen_private; | |
142 | sector_t iv_offset; | |
143 | unsigned int iv_size; | |
144 | ||
145 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | |
146 | void *iv_private; | |
147 | struct crypto_ablkcipher **tfms; | |
148 | unsigned tfms_count; | |
149 | ||
150 | /* | |
151 | * Layout of each crypto request: | |
152 | * | |
153 | * struct ablkcipher_request | |
154 | * context | |
155 | * padding | |
156 | * struct dm_crypt_request | |
157 | * padding | |
158 | * IV | |
159 | * | |
160 | * The padding is added so that dm_crypt_request and the IV are | |
161 | * correctly aligned. | |
162 | */ | |
163 | unsigned int dmreq_start; | |
164 | ||
165 | unsigned int per_bio_data_size; | |
166 | ||
167 | unsigned long flags; | |
168 | unsigned int key_size; | |
169 | unsigned int key_parts; /* independent parts in key buffer */ | |
170 | unsigned int key_extra_size; /* additional keys length */ | |
171 | u8 key[0]; | |
172 | }; | |
173 | ||
174 | #define MIN_IOS 16 | |
175 | #define MIN_POOL_PAGES 32 | |
176 | ||
177 | static struct kmem_cache *_crypt_io_pool; | |
178 | ||
179 | static void clone_init(struct dm_crypt_io *, struct bio *); | |
180 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | |
181 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); | |
182 | ||
183 | /* | |
184 | * Use this to access cipher attributes that are the same for each CPU. | |
185 | */ | |
186 | static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) | |
187 | { | |
188 | return cc->tfms[0]; | |
189 | } | |
190 | ||
191 | /* | |
192 | * Different IV generation algorithms: | |
193 | * | |
194 | * plain: the initial vector is the 32-bit little-endian version of the sector | |
195 | * number, padded with zeros if necessary. | |
196 | * | |
197 | * plain64: the initial vector is the 64-bit little-endian version of the sector | |
198 | * number, padded with zeros if necessary. | |
199 | * | |
200 | * essiv: "encrypted sector|salt initial vector", the sector number is | |
201 | * encrypted with the bulk cipher using a salt as key. The salt | |
202 | * should be derived from the bulk cipher's key via hashing. | |
203 | * | |
204 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 | |
205 | * (needed for LRW-32-AES and possible other narrow block modes) | |
206 | * | |
207 | * null: the initial vector is always zero. Provides compatibility with | |
208 | * obsolete loop_fish2 devices. Do not use for new devices. | |
209 | * | |
210 | * lmk: Compatible implementation of the block chaining mode used | |
211 | * by the Loop-AES block device encryption system | |
212 | * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ | |
213 | * It operates on full 512 byte sectors and uses CBC | |
214 | * with an IV derived from the sector number, the data and | |
215 | * optionally extra IV seed. | |
216 | * This means that after decryption the first block | |
217 | * of sector must be tweaked according to decrypted data. | |
218 | * Loop-AES can use three encryption schemes: | |
219 | * version 1: is plain aes-cbc mode | |
220 | * version 2: uses 64 multikey scheme with lmk IV generator | |
221 | * version 3: the same as version 2 with additional IV seed | |
222 | * (it uses 65 keys, last key is used as IV seed) | |
223 | * | |
224 | * tcw: Compatible implementation of the block chaining mode used | |
225 | * by the TrueCrypt device encryption system (prior to version 4.1). | |
226 | * For more info see: http://www.truecrypt.org | |
227 | * It operates on full 512 byte sectors and uses CBC | |
228 | * with an IV derived from initial key and the sector number. | |
229 | * In addition, whitening value is applied on every sector, whitening | |
230 | * is calculated from initial key, sector number and mixed using CRC32. | |
231 | * Note that this encryption scheme is vulnerable to watermarking attacks | |
232 | * and should be used for old compatible containers access only. | |
233 | * | |
234 | * plumb: unimplemented, see: | |
235 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | |
236 | */ | |
237 | ||
238 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, | |
239 | struct dm_crypt_request *dmreq) | |
240 | { | |
241 | memset(iv, 0, cc->iv_size); | |
242 | *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
247 | static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, | |
248 | struct dm_crypt_request *dmreq) | |
249 | { | |
250 | memset(iv, 0, cc->iv_size); | |
251 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); | |
252 | ||
253 | return 0; | |
254 | } | |
255 | ||
256 | /* Initialise ESSIV - compute salt but no local memory allocations */ | |
257 | static int crypt_iv_essiv_init(struct crypt_config *cc) | |
258 | { | |
259 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
260 | struct hash_desc desc; | |
261 | struct scatterlist sg; | |
262 | struct crypto_cipher *essiv_tfm; | |
263 | int err; | |
264 | ||
265 | sg_init_one(&sg, cc->key, cc->key_size); | |
266 | desc.tfm = essiv->hash_tfm; | |
267 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
268 | ||
269 | err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); | |
270 | if (err) | |
271 | return err; | |
272 | ||
273 | essiv_tfm = cc->iv_private; | |
274 | ||
275 | err = crypto_cipher_setkey(essiv_tfm, essiv->salt, | |
276 | crypto_hash_digestsize(essiv->hash_tfm)); | |
277 | if (err) | |
278 | return err; | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | /* Wipe salt and reset key derived from volume key */ | |
284 | static int crypt_iv_essiv_wipe(struct crypt_config *cc) | |
285 | { | |
286 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
287 | unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); | |
288 | struct crypto_cipher *essiv_tfm; | |
289 | int r, err = 0; | |
290 | ||
291 | memset(essiv->salt, 0, salt_size); | |
292 | ||
293 | essiv_tfm = cc->iv_private; | |
294 | r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); | |
295 | if (r) | |
296 | err = r; | |
297 | ||
298 | return err; | |
299 | } | |
300 | ||
301 | /* Set up per cpu cipher state */ | |
302 | static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, | |
303 | struct dm_target *ti, | |
304 | u8 *salt, unsigned saltsize) | |
305 | { | |
306 | struct crypto_cipher *essiv_tfm; | |
307 | int err; | |
308 | ||
309 | /* Setup the essiv_tfm with the given salt */ | |
310 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); | |
311 | if (IS_ERR(essiv_tfm)) { | |
312 | ti->error = "Error allocating crypto tfm for ESSIV"; | |
313 | return essiv_tfm; | |
314 | } | |
315 | ||
316 | if (crypto_cipher_blocksize(essiv_tfm) != | |
317 | crypto_ablkcipher_ivsize(any_tfm(cc))) { | |
318 | ti->error = "Block size of ESSIV cipher does " | |
319 | "not match IV size of block cipher"; | |
320 | crypto_free_cipher(essiv_tfm); | |
321 | return ERR_PTR(-EINVAL); | |
322 | } | |
323 | ||
324 | err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); | |
325 | if (err) { | |
326 | ti->error = "Failed to set key for ESSIV cipher"; | |
327 | crypto_free_cipher(essiv_tfm); | |
328 | return ERR_PTR(err); | |
329 | } | |
330 | ||
331 | return essiv_tfm; | |
332 | } | |
333 | ||
334 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | |
335 | { | |
336 | struct crypto_cipher *essiv_tfm; | |
337 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
338 | ||
339 | crypto_free_hash(essiv->hash_tfm); | |
340 | essiv->hash_tfm = NULL; | |
341 | ||
342 | kzfree(essiv->salt); | |
343 | essiv->salt = NULL; | |
344 | ||
345 | essiv_tfm = cc->iv_private; | |
346 | ||
347 | if (essiv_tfm) | |
348 | crypto_free_cipher(essiv_tfm); | |
349 | ||
350 | cc->iv_private = NULL; | |
351 | } | |
352 | ||
353 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |
354 | const char *opts) | |
355 | { | |
356 | struct crypto_cipher *essiv_tfm = NULL; | |
357 | struct crypto_hash *hash_tfm = NULL; | |
358 | u8 *salt = NULL; | |
359 | int err; | |
360 | ||
361 | if (!opts) { | |
362 | ti->error = "Digest algorithm missing for ESSIV mode"; | |
363 | return -EINVAL; | |
364 | } | |
365 | ||
366 | /* Allocate hash algorithm */ | |
367 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); | |
368 | if (IS_ERR(hash_tfm)) { | |
369 | ti->error = "Error initializing ESSIV hash"; | |
370 | err = PTR_ERR(hash_tfm); | |
371 | goto bad; | |
372 | } | |
373 | ||
374 | salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); | |
375 | if (!salt) { | |
376 | ti->error = "Error kmallocing salt storage in ESSIV"; | |
377 | err = -ENOMEM; | |
378 | goto bad; | |
379 | } | |
380 | ||
381 | cc->iv_gen_private.essiv.salt = salt; | |
382 | cc->iv_gen_private.essiv.hash_tfm = hash_tfm; | |
383 | ||
384 | essiv_tfm = setup_essiv_cpu(cc, ti, salt, | |
385 | crypto_hash_digestsize(hash_tfm)); | |
386 | if (IS_ERR(essiv_tfm)) { | |
387 | crypt_iv_essiv_dtr(cc); | |
388 | return PTR_ERR(essiv_tfm); | |
389 | } | |
390 | cc->iv_private = essiv_tfm; | |
391 | ||
392 | return 0; | |
393 | ||
394 | bad: | |
395 | if (hash_tfm && !IS_ERR(hash_tfm)) | |
396 | crypto_free_hash(hash_tfm); | |
397 | kfree(salt); | |
398 | return err; | |
399 | } | |
400 | ||
401 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, | |
402 | struct dm_crypt_request *dmreq) | |
403 | { | |
404 | struct crypto_cipher *essiv_tfm = cc->iv_private; | |
405 | ||
406 | memset(iv, 0, cc->iv_size); | |
407 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); | |
408 | crypto_cipher_encrypt_one(essiv_tfm, iv, iv); | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
413 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, | |
414 | const char *opts) | |
415 | { | |
416 | unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); | |
417 | int log = ilog2(bs); | |
418 | ||
419 | /* we need to calculate how far we must shift the sector count | |
420 | * to get the cipher block count, we use this shift in _gen */ | |
421 | ||
422 | if (1 << log != bs) { | |
423 | ti->error = "cypher blocksize is not a power of 2"; | |
424 | return -EINVAL; | |
425 | } | |
426 | ||
427 | if (log > 9) { | |
428 | ti->error = "cypher blocksize is > 512"; | |
429 | return -EINVAL; | |
430 | } | |
431 | ||
432 | cc->iv_gen_private.benbi.shift = 9 - log; | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
437 | static void crypt_iv_benbi_dtr(struct crypt_config *cc) | |
438 | { | |
439 | } | |
440 | ||
441 | static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, | |
442 | struct dm_crypt_request *dmreq) | |
443 | { | |
444 | __be64 val; | |
445 | ||
446 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ | |
447 | ||
448 | val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); | |
449 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); | |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
454 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, | |
455 | struct dm_crypt_request *dmreq) | |
456 | { | |
457 | memset(iv, 0, cc->iv_size); | |
458 | ||
459 | return 0; | |
460 | } | |
461 | ||
462 | static void crypt_iv_lmk_dtr(struct crypt_config *cc) | |
463 | { | |
464 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
465 | ||
466 | if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) | |
467 | crypto_free_shash(lmk->hash_tfm); | |
468 | lmk->hash_tfm = NULL; | |
469 | ||
470 | kzfree(lmk->seed); | |
471 | lmk->seed = NULL; | |
472 | } | |
473 | ||
474 | static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, | |
475 | const char *opts) | |
476 | { | |
477 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
478 | ||
479 | lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); | |
480 | if (IS_ERR(lmk->hash_tfm)) { | |
481 | ti->error = "Error initializing LMK hash"; | |
482 | return PTR_ERR(lmk->hash_tfm); | |
483 | } | |
484 | ||
485 | /* No seed in LMK version 2 */ | |
486 | if (cc->key_parts == cc->tfms_count) { | |
487 | lmk->seed = NULL; | |
488 | return 0; | |
489 | } | |
490 | ||
491 | lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); | |
492 | if (!lmk->seed) { | |
493 | crypt_iv_lmk_dtr(cc); | |
494 | ti->error = "Error kmallocing seed storage in LMK"; | |
495 | return -ENOMEM; | |
496 | } | |
497 | ||
498 | return 0; | |
499 | } | |
500 | ||
501 | static int crypt_iv_lmk_init(struct crypt_config *cc) | |
502 | { | |
503 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
504 | int subkey_size = cc->key_size / cc->key_parts; | |
505 | ||
506 | /* LMK seed is on the position of LMK_KEYS + 1 key */ | |
507 | if (lmk->seed) | |
508 | memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), | |
509 | crypto_shash_digestsize(lmk->hash_tfm)); | |
510 | ||
511 | return 0; | |
512 | } | |
513 | ||
514 | static int crypt_iv_lmk_wipe(struct crypt_config *cc) | |
515 | { | |
516 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
517 | ||
518 | if (lmk->seed) | |
519 | memset(lmk->seed, 0, LMK_SEED_SIZE); | |
520 | ||
521 | return 0; | |
522 | } | |
523 | ||
524 | static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, | |
525 | struct dm_crypt_request *dmreq, | |
526 | u8 *data) | |
527 | { | |
528 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
529 | struct { | |
530 | struct shash_desc desc; | |
531 | char ctx[crypto_shash_descsize(lmk->hash_tfm)]; | |
532 | } sdesc; | |
533 | struct md5_state md5state; | |
534 | __le32 buf[4]; | |
535 | int i, r; | |
536 | ||
537 | sdesc.desc.tfm = lmk->hash_tfm; | |
538 | sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
539 | ||
540 | r = crypto_shash_init(&sdesc.desc); | |
541 | if (r) | |
542 | return r; | |
543 | ||
544 | if (lmk->seed) { | |
545 | r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); | |
546 | if (r) | |
547 | return r; | |
548 | } | |
549 | ||
550 | /* Sector is always 512B, block size 16, add data of blocks 1-31 */ | |
551 | r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); | |
552 | if (r) | |
553 | return r; | |
554 | ||
555 | /* Sector is cropped to 56 bits here */ | |
556 | buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); | |
557 | buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); | |
558 | buf[2] = cpu_to_le32(4024); | |
559 | buf[3] = 0; | |
560 | r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); | |
561 | if (r) | |
562 | return r; | |
563 | ||
564 | /* No MD5 padding here */ | |
565 | r = crypto_shash_export(&sdesc.desc, &md5state); | |
566 | if (r) | |
567 | return r; | |
568 | ||
569 | for (i = 0; i < MD5_HASH_WORDS; i++) | |
570 | __cpu_to_le32s(&md5state.hash[i]); | |
571 | memcpy(iv, &md5state.hash, cc->iv_size); | |
572 | ||
573 | return 0; | |
574 | } | |
575 | ||
576 | static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, | |
577 | struct dm_crypt_request *dmreq) | |
578 | { | |
579 | u8 *src; | |
580 | int r = 0; | |
581 | ||
582 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { | |
583 | src = kmap_atomic(sg_page(&dmreq->sg_in)); | |
584 | r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); | |
585 | kunmap_atomic(src); | |
586 | } else | |
587 | memset(iv, 0, cc->iv_size); | |
588 | ||
589 | return r; | |
590 | } | |
591 | ||
592 | static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, | |
593 | struct dm_crypt_request *dmreq) | |
594 | { | |
595 | u8 *dst; | |
596 | int r; | |
597 | ||
598 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) | |
599 | return 0; | |
600 | ||
601 | dst = kmap_atomic(sg_page(&dmreq->sg_out)); | |
602 | r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); | |
603 | ||
604 | /* Tweak the first block of plaintext sector */ | |
605 | if (!r) | |
606 | crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); | |
607 | ||
608 | kunmap_atomic(dst); | |
609 | return r; | |
610 | } | |
611 | ||
612 | static void crypt_iv_tcw_dtr(struct crypt_config *cc) | |
613 | { | |
614 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
615 | ||
616 | kzfree(tcw->iv_seed); | |
617 | tcw->iv_seed = NULL; | |
618 | kzfree(tcw->whitening); | |
619 | tcw->whitening = NULL; | |
620 | ||
621 | if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) | |
622 | crypto_free_shash(tcw->crc32_tfm); | |
623 | tcw->crc32_tfm = NULL; | |
624 | } | |
625 | ||
626 | static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, | |
627 | const char *opts) | |
628 | { | |
629 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
630 | ||
631 | if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { | |
632 | ti->error = "Wrong key size for TCW"; | |
633 | return -EINVAL; | |
634 | } | |
635 | ||
636 | tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); | |
637 | if (IS_ERR(tcw->crc32_tfm)) { | |
638 | ti->error = "Error initializing CRC32 in TCW"; | |
639 | return PTR_ERR(tcw->crc32_tfm); | |
640 | } | |
641 | ||
642 | tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); | |
643 | tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); | |
644 | if (!tcw->iv_seed || !tcw->whitening) { | |
645 | crypt_iv_tcw_dtr(cc); | |
646 | ti->error = "Error allocating seed storage in TCW"; | |
647 | return -ENOMEM; | |
648 | } | |
649 | ||
650 | return 0; | |
651 | } | |
652 | ||
653 | static int crypt_iv_tcw_init(struct crypt_config *cc) | |
654 | { | |
655 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
656 | int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; | |
657 | ||
658 | memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); | |
659 | memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], | |
660 | TCW_WHITENING_SIZE); | |
661 | ||
662 | return 0; | |
663 | } | |
664 | ||
665 | static int crypt_iv_tcw_wipe(struct crypt_config *cc) | |
666 | { | |
667 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
668 | ||
669 | memset(tcw->iv_seed, 0, cc->iv_size); | |
670 | memset(tcw->whitening, 0, TCW_WHITENING_SIZE); | |
671 | ||
672 | return 0; | |
673 | } | |
674 | ||
675 | static int crypt_iv_tcw_whitening(struct crypt_config *cc, | |
676 | struct dm_crypt_request *dmreq, | |
677 | u8 *data) | |
678 | { | |
679 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
680 | u64 sector = cpu_to_le64((u64)dmreq->iv_sector); | |
681 | u8 buf[TCW_WHITENING_SIZE]; | |
682 | struct { | |
683 | struct shash_desc desc; | |
684 | char ctx[crypto_shash_descsize(tcw->crc32_tfm)]; | |
685 | } sdesc; | |
686 | int i, r; | |
687 | ||
688 | /* xor whitening with sector number */ | |
689 | memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); | |
690 | crypto_xor(buf, (u8 *)§or, 8); | |
691 | crypto_xor(&buf[8], (u8 *)§or, 8); | |
692 | ||
693 | /* calculate crc32 for every 32bit part and xor it */ | |
694 | sdesc.desc.tfm = tcw->crc32_tfm; | |
695 | sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
696 | for (i = 0; i < 4; i++) { | |
697 | r = crypto_shash_init(&sdesc.desc); | |
698 | if (r) | |
699 | goto out; | |
700 | r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4); | |
701 | if (r) | |
702 | goto out; | |
703 | r = crypto_shash_final(&sdesc.desc, &buf[i * 4]); | |
704 | if (r) | |
705 | goto out; | |
706 | } | |
707 | crypto_xor(&buf[0], &buf[12], 4); | |
708 | crypto_xor(&buf[4], &buf[8], 4); | |
709 | ||
710 | /* apply whitening (8 bytes) to whole sector */ | |
711 | for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) | |
712 | crypto_xor(data + i * 8, buf, 8); | |
713 | out: | |
714 | memset(buf, 0, sizeof(buf)); | |
715 | return r; | |
716 | } | |
717 | ||
718 | static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, | |
719 | struct dm_crypt_request *dmreq) | |
720 | { | |
721 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
722 | u64 sector = cpu_to_le64((u64)dmreq->iv_sector); | |
723 | u8 *src; | |
724 | int r = 0; | |
725 | ||
726 | /* Remove whitening from ciphertext */ | |
727 | if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { | |
728 | src = kmap_atomic(sg_page(&dmreq->sg_in)); | |
729 | r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); | |
730 | kunmap_atomic(src); | |
731 | } | |
732 | ||
733 | /* Calculate IV */ | |
734 | memcpy(iv, tcw->iv_seed, cc->iv_size); | |
735 | crypto_xor(iv, (u8 *)§or, 8); | |
736 | if (cc->iv_size > 8) | |
737 | crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); | |
738 | ||
739 | return r; | |
740 | } | |
741 | ||
742 | static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, | |
743 | struct dm_crypt_request *dmreq) | |
744 | { | |
745 | u8 *dst; | |
746 | int r; | |
747 | ||
748 | if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) | |
749 | return 0; | |
750 | ||
751 | /* Apply whitening on ciphertext */ | |
752 | dst = kmap_atomic(sg_page(&dmreq->sg_out)); | |
753 | r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); | |
754 | kunmap_atomic(dst); | |
755 | ||
756 | return r; | |
757 | } | |
758 | ||
759 | static struct crypt_iv_operations crypt_iv_plain_ops = { | |
760 | .generator = crypt_iv_plain_gen | |
761 | }; | |
762 | ||
763 | static struct crypt_iv_operations crypt_iv_plain64_ops = { | |
764 | .generator = crypt_iv_plain64_gen | |
765 | }; | |
766 | ||
767 | static struct crypt_iv_operations crypt_iv_essiv_ops = { | |
768 | .ctr = crypt_iv_essiv_ctr, | |
769 | .dtr = crypt_iv_essiv_dtr, | |
770 | .init = crypt_iv_essiv_init, | |
771 | .wipe = crypt_iv_essiv_wipe, | |
772 | .generator = crypt_iv_essiv_gen | |
773 | }; | |
774 | ||
775 | static struct crypt_iv_operations crypt_iv_benbi_ops = { | |
776 | .ctr = crypt_iv_benbi_ctr, | |
777 | .dtr = crypt_iv_benbi_dtr, | |
778 | .generator = crypt_iv_benbi_gen | |
779 | }; | |
780 | ||
781 | static struct crypt_iv_operations crypt_iv_null_ops = { | |
782 | .generator = crypt_iv_null_gen | |
783 | }; | |
784 | ||
785 | static struct crypt_iv_operations crypt_iv_lmk_ops = { | |
786 | .ctr = crypt_iv_lmk_ctr, | |
787 | .dtr = crypt_iv_lmk_dtr, | |
788 | .init = crypt_iv_lmk_init, | |
789 | .wipe = crypt_iv_lmk_wipe, | |
790 | .generator = crypt_iv_lmk_gen, | |
791 | .post = crypt_iv_lmk_post | |
792 | }; | |
793 | ||
794 | static struct crypt_iv_operations crypt_iv_tcw_ops = { | |
795 | .ctr = crypt_iv_tcw_ctr, | |
796 | .dtr = crypt_iv_tcw_dtr, | |
797 | .init = crypt_iv_tcw_init, | |
798 | .wipe = crypt_iv_tcw_wipe, | |
799 | .generator = crypt_iv_tcw_gen, | |
800 | .post = crypt_iv_tcw_post | |
801 | }; | |
802 | ||
803 | static void crypt_convert_init(struct crypt_config *cc, | |
804 | struct convert_context *ctx, | |
805 | struct bio *bio_out, struct bio *bio_in, | |
806 | sector_t sector) | |
807 | { | |
808 | ctx->bio_in = bio_in; | |
809 | ctx->bio_out = bio_out; | |
810 | if (bio_in) | |
811 | ctx->iter_in = bio_in->bi_iter; | |
812 | if (bio_out) | |
813 | ctx->iter_out = bio_out->bi_iter; | |
814 | ctx->cc_sector = sector + cc->iv_offset; | |
815 | init_completion(&ctx->restart); | |
816 | } | |
817 | ||
818 | static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, | |
819 | struct ablkcipher_request *req) | |
820 | { | |
821 | return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | |
822 | } | |
823 | ||
824 | static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, | |
825 | struct dm_crypt_request *dmreq) | |
826 | { | |
827 | return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); | |
828 | } | |
829 | ||
830 | static u8 *iv_of_dmreq(struct crypt_config *cc, | |
831 | struct dm_crypt_request *dmreq) | |
832 | { | |
833 | return (u8 *)ALIGN((unsigned long)(dmreq + 1), | |
834 | crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); | |
835 | } | |
836 | ||
837 | static int crypt_convert_block(struct crypt_config *cc, | |
838 | struct convert_context *ctx, | |
839 | struct ablkcipher_request *req) | |
840 | { | |
841 | struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); | |
842 | struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); | |
843 | struct dm_crypt_request *dmreq; | |
844 | u8 *iv; | |
845 | int r; | |
846 | ||
847 | dmreq = dmreq_of_req(cc, req); | |
848 | iv = iv_of_dmreq(cc, dmreq); | |
849 | ||
850 | dmreq->iv_sector = ctx->cc_sector; | |
851 | dmreq->ctx = ctx; | |
852 | sg_init_table(&dmreq->sg_in, 1); | |
853 | sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, | |
854 | bv_in.bv_offset); | |
855 | ||
856 | sg_init_table(&dmreq->sg_out, 1); | |
857 | sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, | |
858 | bv_out.bv_offset); | |
859 | ||
860 | bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); | |
861 | bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); | |
862 | ||
863 | if (cc->iv_gen_ops) { | |
864 | r = cc->iv_gen_ops->generator(cc, iv, dmreq); | |
865 | if (r < 0) | |
866 | return r; | |
867 | } | |
868 | ||
869 | ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, | |
870 | 1 << SECTOR_SHIFT, iv); | |
871 | ||
872 | if (bio_data_dir(ctx->bio_in) == WRITE) | |
873 | r = crypto_ablkcipher_encrypt(req); | |
874 | else | |
875 | r = crypto_ablkcipher_decrypt(req); | |
876 | ||
877 | if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) | |
878 | r = cc->iv_gen_ops->post(cc, iv, dmreq); | |
879 | ||
880 | return r; | |
881 | } | |
882 | ||
883 | static void kcryptd_async_done(struct crypto_async_request *async_req, | |
884 | int error); | |
885 | ||
886 | static void crypt_alloc_req(struct crypt_config *cc, | |
887 | struct convert_context *ctx) | |
888 | { | |
889 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); | |
890 | ||
891 | if (!ctx->req) | |
892 | ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); | |
893 | ||
894 | ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); | |
895 | ablkcipher_request_set_callback(ctx->req, | |
896 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | |
897 | kcryptd_async_done, dmreq_of_req(cc, ctx->req)); | |
898 | } | |
899 | ||
900 | static void crypt_free_req(struct crypt_config *cc, | |
901 | struct ablkcipher_request *req, struct bio *base_bio) | |
902 | { | |
903 | struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); | |
904 | ||
905 | if ((struct ablkcipher_request *)(io + 1) != req) | |
906 | mempool_free(req, cc->req_pool); | |
907 | } | |
908 | ||
909 | /* | |
910 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
911 | */ | |
912 | static int crypt_convert(struct crypt_config *cc, | |
913 | struct convert_context *ctx) | |
914 | { | |
915 | int r; | |
916 | ||
917 | atomic_set(&ctx->cc_pending, 1); | |
918 | ||
919 | while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { | |
920 | ||
921 | crypt_alloc_req(cc, ctx); | |
922 | ||
923 | atomic_inc(&ctx->cc_pending); | |
924 | ||
925 | r = crypt_convert_block(cc, ctx, ctx->req); | |
926 | ||
927 | switch (r) { | |
928 | /* async */ | |
929 | case -EBUSY: | |
930 | wait_for_completion(&ctx->restart); | |
931 | reinit_completion(&ctx->restart); | |
932 | /* fall through*/ | |
933 | case -EINPROGRESS: | |
934 | ctx->req = NULL; | |
935 | ctx->cc_sector++; | |
936 | continue; | |
937 | ||
938 | /* sync */ | |
939 | case 0: | |
940 | atomic_dec(&ctx->cc_pending); | |
941 | ctx->cc_sector++; | |
942 | cond_resched(); | |
943 | continue; | |
944 | ||
945 | /* error */ | |
946 | default: | |
947 | atomic_dec(&ctx->cc_pending); | |
948 | return r; | |
949 | } | |
950 | } | |
951 | ||
952 | return 0; | |
953 | } | |
954 | ||
955 | /* | |
956 | * Generate a new unfragmented bio with the given size | |
957 | * This should never violate the device limitations | |
958 | * May return a smaller bio when running out of pages, indicated by | |
959 | * *out_of_pages set to 1. | |
960 | */ | |
961 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, | |
962 | unsigned *out_of_pages) | |
963 | { | |
964 | struct crypt_config *cc = io->cc; | |
965 | struct bio *clone; | |
966 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
967 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | |
968 | unsigned i, len; | |
969 | struct page *page; | |
970 | ||
971 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | |
972 | if (!clone) | |
973 | return NULL; | |
974 | ||
975 | clone_init(io, clone); | |
976 | *out_of_pages = 0; | |
977 | ||
978 | for (i = 0; i < nr_iovecs; i++) { | |
979 | page = mempool_alloc(cc->page_pool, gfp_mask); | |
980 | if (!page) { | |
981 | *out_of_pages = 1; | |
982 | break; | |
983 | } | |
984 | ||
985 | /* | |
986 | * If additional pages cannot be allocated without waiting, | |
987 | * return a partially-allocated bio. The caller will then try | |
988 | * to allocate more bios while submitting this partial bio. | |
989 | */ | |
990 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | |
991 | ||
992 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; | |
993 | ||
994 | if (!bio_add_page(clone, page, len, 0)) { | |
995 | mempool_free(page, cc->page_pool); | |
996 | break; | |
997 | } | |
998 | ||
999 | size -= len; | |
1000 | } | |
1001 | ||
1002 | if (!clone->bi_iter.bi_size) { | |
1003 | bio_put(clone); | |
1004 | return NULL; | |
1005 | } | |
1006 | ||
1007 | return clone; | |
1008 | } | |
1009 | ||
1010 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) | |
1011 | { | |
1012 | unsigned int i; | |
1013 | struct bio_vec *bv; | |
1014 | ||
1015 | bio_for_each_segment_all(bv, clone, i) { | |
1016 | BUG_ON(!bv->bv_page); | |
1017 | mempool_free(bv->bv_page, cc->page_pool); | |
1018 | bv->bv_page = NULL; | |
1019 | } | |
1020 | } | |
1021 | ||
1022 | static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, | |
1023 | struct bio *bio, sector_t sector) | |
1024 | { | |
1025 | io->cc = cc; | |
1026 | io->base_bio = bio; | |
1027 | io->sector = sector; | |
1028 | io->error = 0; | |
1029 | io->base_io = NULL; | |
1030 | io->ctx.req = NULL; | |
1031 | atomic_set(&io->io_pending, 0); | |
1032 | } | |
1033 | ||
1034 | static void crypt_inc_pending(struct dm_crypt_io *io) | |
1035 | { | |
1036 | atomic_inc(&io->io_pending); | |
1037 | } | |
1038 | ||
1039 | /* | |
1040 | * One of the bios was finished. Check for completion of | |
1041 | * the whole request and correctly clean up the buffer. | |
1042 | * If base_io is set, wait for the last fragment to complete. | |
1043 | */ | |
1044 | static void crypt_dec_pending(struct dm_crypt_io *io) | |
1045 | { | |
1046 | struct crypt_config *cc = io->cc; | |
1047 | struct bio *base_bio = io->base_bio; | |
1048 | struct dm_crypt_io *base_io = io->base_io; | |
1049 | int error = io->error; | |
1050 | ||
1051 | if (!atomic_dec_and_test(&io->io_pending)) | |
1052 | return; | |
1053 | ||
1054 | if (io->ctx.req) | |
1055 | crypt_free_req(cc, io->ctx.req, base_bio); | |
1056 | if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) | |
1057 | mempool_free(io, cc->io_pool); | |
1058 | ||
1059 | if (likely(!base_io)) | |
1060 | bio_endio(base_bio, error); | |
1061 | else { | |
1062 | if (error && !base_io->error) | |
1063 | base_io->error = error; | |
1064 | crypt_dec_pending(base_io); | |
1065 | } | |
1066 | } | |
1067 | ||
1068 | /* | |
1069 | * kcryptd/kcryptd_io: | |
1070 | * | |
1071 | * Needed because it would be very unwise to do decryption in an | |
1072 | * interrupt context. | |
1073 | * | |
1074 | * kcryptd performs the actual encryption or decryption. | |
1075 | * | |
1076 | * kcryptd_io performs the IO submission. | |
1077 | * | |
1078 | * They must be separated as otherwise the final stages could be | |
1079 | * starved by new requests which can block in the first stages due | |
1080 | * to memory allocation. | |
1081 | * | |
1082 | * The work is done per CPU global for all dm-crypt instances. | |
1083 | * They should not depend on each other and do not block. | |
1084 | */ | |
1085 | static void crypt_endio(struct bio *clone, int error) | |
1086 | { | |
1087 | struct dm_crypt_io *io = clone->bi_private; | |
1088 | struct crypt_config *cc = io->cc; | |
1089 | unsigned rw = bio_data_dir(clone); | |
1090 | ||
1091 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) | |
1092 | error = -EIO; | |
1093 | ||
1094 | /* | |
1095 | * free the processed pages | |
1096 | */ | |
1097 | if (rw == WRITE) | |
1098 | crypt_free_buffer_pages(cc, clone); | |
1099 | ||
1100 | bio_put(clone); | |
1101 | ||
1102 | if (rw == READ && !error) { | |
1103 | kcryptd_queue_crypt(io); | |
1104 | return; | |
1105 | } | |
1106 | ||
1107 | if (unlikely(error)) | |
1108 | io->error = error; | |
1109 | ||
1110 | crypt_dec_pending(io); | |
1111 | } | |
1112 | ||
1113 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |
1114 | { | |
1115 | struct crypt_config *cc = io->cc; | |
1116 | ||
1117 | clone->bi_private = io; | |
1118 | clone->bi_end_io = crypt_endio; | |
1119 | clone->bi_bdev = cc->dev->bdev; | |
1120 | clone->bi_rw = io->base_bio->bi_rw; | |
1121 | } | |
1122 | ||
1123 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | |
1124 | { | |
1125 | struct crypt_config *cc = io->cc; | |
1126 | struct bio *base_bio = io->base_bio; | |
1127 | struct bio *clone; | |
1128 | ||
1129 | /* | |
1130 | * The block layer might modify the bvec array, so always | |
1131 | * copy the required bvecs because we need the original | |
1132 | * one in order to decrypt the whole bio data *afterwards*. | |
1133 | */ | |
1134 | clone = bio_clone_bioset(base_bio, gfp, cc->bs); | |
1135 | if (!clone) | |
1136 | return 1; | |
1137 | ||
1138 | crypt_inc_pending(io); | |
1139 | ||
1140 | clone_init(io, clone); | |
1141 | clone->bi_iter.bi_sector = cc->start + io->sector; | |
1142 | ||
1143 | generic_make_request(clone); | |
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | static void kcryptd_io_write(struct dm_crypt_io *io) | |
1148 | { | |
1149 | struct bio *clone = io->ctx.bio_out; | |
1150 | generic_make_request(clone); | |
1151 | } | |
1152 | ||
1153 | static void kcryptd_io(struct work_struct *work) | |
1154 | { | |
1155 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
1156 | ||
1157 | if (bio_data_dir(io->base_bio) == READ) { | |
1158 | crypt_inc_pending(io); | |
1159 | if (kcryptd_io_read(io, GFP_NOIO)) | |
1160 | io->error = -ENOMEM; | |
1161 | crypt_dec_pending(io); | |
1162 | } else | |
1163 | kcryptd_io_write(io); | |
1164 | } | |
1165 | ||
1166 | static void kcryptd_queue_io(struct dm_crypt_io *io) | |
1167 | { | |
1168 | struct crypt_config *cc = io->cc; | |
1169 | ||
1170 | INIT_WORK(&io->work, kcryptd_io); | |
1171 | queue_work(cc->io_queue, &io->work); | |
1172 | } | |
1173 | ||
1174 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | |
1175 | { | |
1176 | struct bio *clone = io->ctx.bio_out; | |
1177 | struct crypt_config *cc = io->cc; | |
1178 | ||
1179 | if (unlikely(io->error < 0)) { | |
1180 | crypt_free_buffer_pages(cc, clone); | |
1181 | bio_put(clone); | |
1182 | crypt_dec_pending(io); | |
1183 | return; | |
1184 | } | |
1185 | ||
1186 | /* crypt_convert should have filled the clone bio */ | |
1187 | BUG_ON(io->ctx.iter_out.bi_size); | |
1188 | ||
1189 | clone->bi_iter.bi_sector = cc->start + io->sector; | |
1190 | ||
1191 | if (async) | |
1192 | kcryptd_queue_io(io); | |
1193 | else | |
1194 | generic_make_request(clone); | |
1195 | } | |
1196 | ||
1197 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |
1198 | { | |
1199 | struct crypt_config *cc = io->cc; | |
1200 | struct bio *clone; | |
1201 | struct dm_crypt_io *new_io; | |
1202 | int crypt_finished; | |
1203 | unsigned out_of_pages = 0; | |
1204 | unsigned remaining = io->base_bio->bi_iter.bi_size; | |
1205 | sector_t sector = io->sector; | |
1206 | int r; | |
1207 | ||
1208 | /* | |
1209 | * Prevent io from disappearing until this function completes. | |
1210 | */ | |
1211 | crypt_inc_pending(io); | |
1212 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); | |
1213 | ||
1214 | /* | |
1215 | * The allocated buffers can be smaller than the whole bio, | |
1216 | * so repeat the whole process until all the data can be handled. | |
1217 | */ | |
1218 | while (remaining) { | |
1219 | clone = crypt_alloc_buffer(io, remaining, &out_of_pages); | |
1220 | if (unlikely(!clone)) { | |
1221 | io->error = -ENOMEM; | |
1222 | break; | |
1223 | } | |
1224 | ||
1225 | io->ctx.bio_out = clone; | |
1226 | io->ctx.iter_out = clone->bi_iter; | |
1227 | ||
1228 | remaining -= clone->bi_iter.bi_size; | |
1229 | sector += bio_sectors(clone); | |
1230 | ||
1231 | crypt_inc_pending(io); | |
1232 | ||
1233 | r = crypt_convert(cc, &io->ctx); | |
1234 | if (r < 0) | |
1235 | io->error = -EIO; | |
1236 | ||
1237 | crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); | |
1238 | ||
1239 | /* Encryption was already finished, submit io now */ | |
1240 | if (crypt_finished) { | |
1241 | kcryptd_crypt_write_io_submit(io, 0); | |
1242 | ||
1243 | /* | |
1244 | * If there was an error, do not try next fragments. | |
1245 | * For async, error is processed in async handler. | |
1246 | */ | |
1247 | if (unlikely(r < 0)) | |
1248 | break; | |
1249 | ||
1250 | io->sector = sector; | |
1251 | } | |
1252 | ||
1253 | /* | |
1254 | * Out of memory -> run queues | |
1255 | * But don't wait if split was due to the io size restriction | |
1256 | */ | |
1257 | if (unlikely(out_of_pages)) | |
1258 | congestion_wait(BLK_RW_ASYNC, HZ/100); | |
1259 | ||
1260 | /* | |
1261 | * With async crypto it is unsafe to share the crypto context | |
1262 | * between fragments, so switch to a new dm_crypt_io structure. | |
1263 | */ | |
1264 | if (unlikely(!crypt_finished && remaining)) { | |
1265 | new_io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
1266 | crypt_io_init(new_io, io->cc, io->base_bio, sector); | |
1267 | crypt_inc_pending(new_io); | |
1268 | crypt_convert_init(cc, &new_io->ctx, NULL, | |
1269 | io->base_bio, sector); | |
1270 | new_io->ctx.iter_in = io->ctx.iter_in; | |
1271 | ||
1272 | /* | |
1273 | * Fragments after the first use the base_io | |
1274 | * pending count. | |
1275 | */ | |
1276 | if (!io->base_io) | |
1277 | new_io->base_io = io; | |
1278 | else { | |
1279 | new_io->base_io = io->base_io; | |
1280 | crypt_inc_pending(io->base_io); | |
1281 | crypt_dec_pending(io); | |
1282 | } | |
1283 | ||
1284 | io = new_io; | |
1285 | } | |
1286 | } | |
1287 | ||
1288 | crypt_dec_pending(io); | |
1289 | } | |
1290 | ||
1291 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io) | |
1292 | { | |
1293 | crypt_dec_pending(io); | |
1294 | } | |
1295 | ||
1296 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | |
1297 | { | |
1298 | struct crypt_config *cc = io->cc; | |
1299 | int r = 0; | |
1300 | ||
1301 | crypt_inc_pending(io); | |
1302 | ||
1303 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, | |
1304 | io->sector); | |
1305 | ||
1306 | r = crypt_convert(cc, &io->ctx); | |
1307 | if (r < 0) | |
1308 | io->error = -EIO; | |
1309 | ||
1310 | if (atomic_dec_and_test(&io->ctx.cc_pending)) | |
1311 | kcryptd_crypt_read_done(io); | |
1312 | ||
1313 | crypt_dec_pending(io); | |
1314 | } | |
1315 | ||
1316 | static void kcryptd_async_done(struct crypto_async_request *async_req, | |
1317 | int error) | |
1318 | { | |
1319 | struct dm_crypt_request *dmreq = async_req->data; | |
1320 | struct convert_context *ctx = dmreq->ctx; | |
1321 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | |
1322 | struct crypt_config *cc = io->cc; | |
1323 | ||
1324 | if (error == -EINPROGRESS) { | |
1325 | complete(&ctx->restart); | |
1326 | return; | |
1327 | } | |
1328 | ||
1329 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) | |
1330 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); | |
1331 | ||
1332 | if (error < 0) | |
1333 | io->error = -EIO; | |
1334 | ||
1335 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); | |
1336 | ||
1337 | if (!atomic_dec_and_test(&ctx->cc_pending)) | |
1338 | return; | |
1339 | ||
1340 | if (bio_data_dir(io->base_bio) == READ) | |
1341 | kcryptd_crypt_read_done(io); | |
1342 | else | |
1343 | kcryptd_crypt_write_io_submit(io, 1); | |
1344 | } | |
1345 | ||
1346 | static void kcryptd_crypt(struct work_struct *work) | |
1347 | { | |
1348 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
1349 | ||
1350 | if (bio_data_dir(io->base_bio) == READ) | |
1351 | kcryptd_crypt_read_convert(io); | |
1352 | else | |
1353 | kcryptd_crypt_write_convert(io); | |
1354 | } | |
1355 | ||
1356 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) | |
1357 | { | |
1358 | struct crypt_config *cc = io->cc; | |
1359 | ||
1360 | INIT_WORK(&io->work, kcryptd_crypt); | |
1361 | queue_work(cc->crypt_queue, &io->work); | |
1362 | } | |
1363 | ||
1364 | /* | |
1365 | * Decode key from its hex representation | |
1366 | */ | |
1367 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | |
1368 | { | |
1369 | char buffer[3]; | |
1370 | unsigned int i; | |
1371 | ||
1372 | buffer[2] = '\0'; | |
1373 | ||
1374 | for (i = 0; i < size; i++) { | |
1375 | buffer[0] = *hex++; | |
1376 | buffer[1] = *hex++; | |
1377 | ||
1378 | if (kstrtou8(buffer, 16, &key[i])) | |
1379 | return -EINVAL; | |
1380 | } | |
1381 | ||
1382 | if (*hex != '\0') | |
1383 | return -EINVAL; | |
1384 | ||
1385 | return 0; | |
1386 | } | |
1387 | ||
1388 | static void crypt_free_tfms(struct crypt_config *cc) | |
1389 | { | |
1390 | unsigned i; | |
1391 | ||
1392 | if (!cc->tfms) | |
1393 | return; | |
1394 | ||
1395 | for (i = 0; i < cc->tfms_count; i++) | |
1396 | if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { | |
1397 | crypto_free_ablkcipher(cc->tfms[i]); | |
1398 | cc->tfms[i] = NULL; | |
1399 | } | |
1400 | ||
1401 | kfree(cc->tfms); | |
1402 | cc->tfms = NULL; | |
1403 | } | |
1404 | ||
1405 | static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) | |
1406 | { | |
1407 | unsigned i; | |
1408 | int err; | |
1409 | ||
1410 | cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), | |
1411 | GFP_KERNEL); | |
1412 | if (!cc->tfms) | |
1413 | return -ENOMEM; | |
1414 | ||
1415 | for (i = 0; i < cc->tfms_count; i++) { | |
1416 | cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); | |
1417 | if (IS_ERR(cc->tfms[i])) { | |
1418 | err = PTR_ERR(cc->tfms[i]); | |
1419 | crypt_free_tfms(cc); | |
1420 | return err; | |
1421 | } | |
1422 | } | |
1423 | ||
1424 | return 0; | |
1425 | } | |
1426 | ||
1427 | static int crypt_setkey_allcpus(struct crypt_config *cc) | |
1428 | { | |
1429 | unsigned subkey_size; | |
1430 | int err = 0, i, r; | |
1431 | ||
1432 | /* Ignore extra keys (which are used for IV etc) */ | |
1433 | subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); | |
1434 | ||
1435 | for (i = 0; i < cc->tfms_count; i++) { | |
1436 | r = crypto_ablkcipher_setkey(cc->tfms[i], | |
1437 | cc->key + (i * subkey_size), | |
1438 | subkey_size); | |
1439 | if (r) | |
1440 | err = r; | |
1441 | } | |
1442 | ||
1443 | return err; | |
1444 | } | |
1445 | ||
1446 | static int crypt_set_key(struct crypt_config *cc, char *key) | |
1447 | { | |
1448 | int r = -EINVAL; | |
1449 | int key_string_len = strlen(key); | |
1450 | ||
1451 | /* The key size may not be changed. */ | |
1452 | if (cc->key_size != (key_string_len >> 1)) | |
1453 | goto out; | |
1454 | ||
1455 | /* Hyphen (which gives a key_size of zero) means there is no key. */ | |
1456 | if (!cc->key_size && strcmp(key, "-")) | |
1457 | goto out; | |
1458 | ||
1459 | if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) | |
1460 | goto out; | |
1461 | ||
1462 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
1463 | ||
1464 | r = crypt_setkey_allcpus(cc); | |
1465 | ||
1466 | out: | |
1467 | /* Hex key string not needed after here, so wipe it. */ | |
1468 | memset(key, '0', key_string_len); | |
1469 | ||
1470 | return r; | |
1471 | } | |
1472 | ||
1473 | static int crypt_wipe_key(struct crypt_config *cc) | |
1474 | { | |
1475 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
1476 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); | |
1477 | ||
1478 | return crypt_setkey_allcpus(cc); | |
1479 | } | |
1480 | ||
1481 | static void crypt_dtr(struct dm_target *ti) | |
1482 | { | |
1483 | struct crypt_config *cc = ti->private; | |
1484 | ||
1485 | ti->private = NULL; | |
1486 | ||
1487 | if (!cc) | |
1488 | return; | |
1489 | ||
1490 | if (cc->io_queue) | |
1491 | destroy_workqueue(cc->io_queue); | |
1492 | if (cc->crypt_queue) | |
1493 | destroy_workqueue(cc->crypt_queue); | |
1494 | ||
1495 | crypt_free_tfms(cc); | |
1496 | ||
1497 | if (cc->bs) | |
1498 | bioset_free(cc->bs); | |
1499 | ||
1500 | if (cc->page_pool) | |
1501 | mempool_destroy(cc->page_pool); | |
1502 | if (cc->req_pool) | |
1503 | mempool_destroy(cc->req_pool); | |
1504 | if (cc->io_pool) | |
1505 | mempool_destroy(cc->io_pool); | |
1506 | ||
1507 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | |
1508 | cc->iv_gen_ops->dtr(cc); | |
1509 | ||
1510 | if (cc->dev) | |
1511 | dm_put_device(ti, cc->dev); | |
1512 | ||
1513 | kzfree(cc->cipher); | |
1514 | kzfree(cc->cipher_string); | |
1515 | ||
1516 | /* Must zero key material before freeing */ | |
1517 | kzfree(cc); | |
1518 | } | |
1519 | ||
1520 | static int crypt_ctr_cipher(struct dm_target *ti, | |
1521 | char *cipher_in, char *key) | |
1522 | { | |
1523 | struct crypt_config *cc = ti->private; | |
1524 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; | |
1525 | char *cipher_api = NULL; | |
1526 | int ret = -EINVAL; | |
1527 | char dummy; | |
1528 | ||
1529 | /* Convert to crypto api definition? */ | |
1530 | if (strchr(cipher_in, '(')) { | |
1531 | ti->error = "Bad cipher specification"; | |
1532 | return -EINVAL; | |
1533 | } | |
1534 | ||
1535 | cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); | |
1536 | if (!cc->cipher_string) | |
1537 | goto bad_mem; | |
1538 | ||
1539 | /* | |
1540 | * Legacy dm-crypt cipher specification | |
1541 | * cipher[:keycount]-mode-iv:ivopts | |
1542 | */ | |
1543 | tmp = cipher_in; | |
1544 | keycount = strsep(&tmp, "-"); | |
1545 | cipher = strsep(&keycount, ":"); | |
1546 | ||
1547 | if (!keycount) | |
1548 | cc->tfms_count = 1; | |
1549 | else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || | |
1550 | !is_power_of_2(cc->tfms_count)) { | |
1551 | ti->error = "Bad cipher key count specification"; | |
1552 | return -EINVAL; | |
1553 | } | |
1554 | cc->key_parts = cc->tfms_count; | |
1555 | cc->key_extra_size = 0; | |
1556 | ||
1557 | cc->cipher = kstrdup(cipher, GFP_KERNEL); | |
1558 | if (!cc->cipher) | |
1559 | goto bad_mem; | |
1560 | ||
1561 | chainmode = strsep(&tmp, "-"); | |
1562 | ivopts = strsep(&tmp, "-"); | |
1563 | ivmode = strsep(&ivopts, ":"); | |
1564 | ||
1565 | if (tmp) | |
1566 | DMWARN("Ignoring unexpected additional cipher options"); | |
1567 | ||
1568 | /* | |
1569 | * For compatibility with the original dm-crypt mapping format, if | |
1570 | * only the cipher name is supplied, use cbc-plain. | |
1571 | */ | |
1572 | if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { | |
1573 | chainmode = "cbc"; | |
1574 | ivmode = "plain"; | |
1575 | } | |
1576 | ||
1577 | if (strcmp(chainmode, "ecb") && !ivmode) { | |
1578 | ti->error = "IV mechanism required"; | |
1579 | return -EINVAL; | |
1580 | } | |
1581 | ||
1582 | cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); | |
1583 | if (!cipher_api) | |
1584 | goto bad_mem; | |
1585 | ||
1586 | ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, | |
1587 | "%s(%s)", chainmode, cipher); | |
1588 | if (ret < 0) { | |
1589 | kfree(cipher_api); | |
1590 | goto bad_mem; | |
1591 | } | |
1592 | ||
1593 | /* Allocate cipher */ | |
1594 | ret = crypt_alloc_tfms(cc, cipher_api); | |
1595 | if (ret < 0) { | |
1596 | ti->error = "Error allocating crypto tfm"; | |
1597 | goto bad; | |
1598 | } | |
1599 | ||
1600 | /* Initialize IV */ | |
1601 | cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); | |
1602 | if (cc->iv_size) | |
1603 | /* at least a 64 bit sector number should fit in our buffer */ | |
1604 | cc->iv_size = max(cc->iv_size, | |
1605 | (unsigned int)(sizeof(u64) / sizeof(u8))); | |
1606 | else if (ivmode) { | |
1607 | DMWARN("Selected cipher does not support IVs"); | |
1608 | ivmode = NULL; | |
1609 | } | |
1610 | ||
1611 | /* Choose ivmode, see comments at iv code. */ | |
1612 | if (ivmode == NULL) | |
1613 | cc->iv_gen_ops = NULL; | |
1614 | else if (strcmp(ivmode, "plain") == 0) | |
1615 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
1616 | else if (strcmp(ivmode, "plain64") == 0) | |
1617 | cc->iv_gen_ops = &crypt_iv_plain64_ops; | |
1618 | else if (strcmp(ivmode, "essiv") == 0) | |
1619 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
1620 | else if (strcmp(ivmode, "benbi") == 0) | |
1621 | cc->iv_gen_ops = &crypt_iv_benbi_ops; | |
1622 | else if (strcmp(ivmode, "null") == 0) | |
1623 | cc->iv_gen_ops = &crypt_iv_null_ops; | |
1624 | else if (strcmp(ivmode, "lmk") == 0) { | |
1625 | cc->iv_gen_ops = &crypt_iv_lmk_ops; | |
1626 | /* | |
1627 | * Version 2 and 3 is recognised according | |
1628 | * to length of provided multi-key string. | |
1629 | * If present (version 3), last key is used as IV seed. | |
1630 | * All keys (including IV seed) are always the same size. | |
1631 | */ | |
1632 | if (cc->key_size % cc->key_parts) { | |
1633 | cc->key_parts++; | |
1634 | cc->key_extra_size = cc->key_size / cc->key_parts; | |
1635 | } | |
1636 | } else if (strcmp(ivmode, "tcw") == 0) { | |
1637 | cc->iv_gen_ops = &crypt_iv_tcw_ops; | |
1638 | cc->key_parts += 2; /* IV + whitening */ | |
1639 | cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; | |
1640 | } else { | |
1641 | ret = -EINVAL; | |
1642 | ti->error = "Invalid IV mode"; | |
1643 | goto bad; | |
1644 | } | |
1645 | ||
1646 | /* Initialize and set key */ | |
1647 | ret = crypt_set_key(cc, key); | |
1648 | if (ret < 0) { | |
1649 | ti->error = "Error decoding and setting key"; | |
1650 | goto bad; | |
1651 | } | |
1652 | ||
1653 | /* Allocate IV */ | |
1654 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { | |
1655 | ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); | |
1656 | if (ret < 0) { | |
1657 | ti->error = "Error creating IV"; | |
1658 | goto bad; | |
1659 | } | |
1660 | } | |
1661 | ||
1662 | /* Initialize IV (set keys for ESSIV etc) */ | |
1663 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) { | |
1664 | ret = cc->iv_gen_ops->init(cc); | |
1665 | if (ret < 0) { | |
1666 | ti->error = "Error initialising IV"; | |
1667 | goto bad; | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | ret = 0; | |
1672 | bad: | |
1673 | kfree(cipher_api); | |
1674 | return ret; | |
1675 | ||
1676 | bad_mem: | |
1677 | ti->error = "Cannot allocate cipher strings"; | |
1678 | return -ENOMEM; | |
1679 | } | |
1680 | ||
1681 | /* | |
1682 | * Construct an encryption mapping: | |
1683 | * <cipher> <key> <iv_offset> <dev_path> <start> | |
1684 | */ | |
1685 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1686 | { | |
1687 | struct crypt_config *cc; | |
1688 | unsigned int key_size, opt_params; | |
1689 | unsigned long long tmpll; | |
1690 | int ret; | |
1691 | size_t iv_size_padding; | |
1692 | struct dm_arg_set as; | |
1693 | const char *opt_string; | |
1694 | char dummy; | |
1695 | ||
1696 | static struct dm_arg _args[] = { | |
1697 | {0, 1, "Invalid number of feature args"}, | |
1698 | }; | |
1699 | ||
1700 | if (argc < 5) { | |
1701 | ti->error = "Not enough arguments"; | |
1702 | return -EINVAL; | |
1703 | } | |
1704 | ||
1705 | key_size = strlen(argv[1]) >> 1; | |
1706 | ||
1707 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | |
1708 | if (!cc) { | |
1709 | ti->error = "Cannot allocate encryption context"; | |
1710 | return -ENOMEM; | |
1711 | } | |
1712 | cc->key_size = key_size; | |
1713 | ||
1714 | ti->private = cc; | |
1715 | ret = crypt_ctr_cipher(ti, argv[0], argv[1]); | |
1716 | if (ret < 0) | |
1717 | goto bad; | |
1718 | ||
1719 | ret = -ENOMEM; | |
1720 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); | |
1721 | if (!cc->io_pool) { | |
1722 | ti->error = "Cannot allocate crypt io mempool"; | |
1723 | goto bad; | |
1724 | } | |
1725 | ||
1726 | cc->dmreq_start = sizeof(struct ablkcipher_request); | |
1727 | cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); | |
1728 | cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); | |
1729 | ||
1730 | if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { | |
1731 | /* Allocate the padding exactly */ | |
1732 | iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) | |
1733 | & crypto_ablkcipher_alignmask(any_tfm(cc)); | |
1734 | } else { | |
1735 | /* | |
1736 | * If the cipher requires greater alignment than kmalloc | |
1737 | * alignment, we don't know the exact position of the | |
1738 | * initialization vector. We must assume worst case. | |
1739 | */ | |
1740 | iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); | |
1741 | } | |
1742 | ||
1743 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + | |
1744 | sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); | |
1745 | if (!cc->req_pool) { | |
1746 | ti->error = "Cannot allocate crypt request mempool"; | |
1747 | goto bad; | |
1748 | } | |
1749 | ||
1750 | cc->per_bio_data_size = ti->per_bio_data_size = | |
1751 | ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + | |
1752 | sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, | |
1753 | ARCH_KMALLOC_MINALIGN); | |
1754 | ||
1755 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); | |
1756 | if (!cc->page_pool) { | |
1757 | ti->error = "Cannot allocate page mempool"; | |
1758 | goto bad; | |
1759 | } | |
1760 | ||
1761 | cc->bs = bioset_create(MIN_IOS, 0); | |
1762 | if (!cc->bs) { | |
1763 | ti->error = "Cannot allocate crypt bioset"; | |
1764 | goto bad; | |
1765 | } | |
1766 | ||
1767 | ret = -EINVAL; | |
1768 | if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { | |
1769 | ti->error = "Invalid iv_offset sector"; | |
1770 | goto bad; | |
1771 | } | |
1772 | cc->iv_offset = tmpll; | |
1773 | ||
1774 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { | |
1775 | ti->error = "Device lookup failed"; | |
1776 | goto bad; | |
1777 | } | |
1778 | ||
1779 | if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { | |
1780 | ti->error = "Invalid device sector"; | |
1781 | goto bad; | |
1782 | } | |
1783 | cc->start = tmpll; | |
1784 | ||
1785 | argv += 5; | |
1786 | argc -= 5; | |
1787 | ||
1788 | /* Optional parameters */ | |
1789 | if (argc) { | |
1790 | as.argc = argc; | |
1791 | as.argv = argv; | |
1792 | ||
1793 | ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); | |
1794 | if (ret) | |
1795 | goto bad; | |
1796 | ||
1797 | opt_string = dm_shift_arg(&as); | |
1798 | ||
1799 | if (opt_params == 1 && opt_string && | |
1800 | !strcasecmp(opt_string, "allow_discards")) | |
1801 | ti->num_discard_bios = 1; | |
1802 | else if (opt_params) { | |
1803 | ret = -EINVAL; | |
1804 | ti->error = "Invalid feature arguments"; | |
1805 | goto bad; | |
1806 | } | |
1807 | } | |
1808 | ||
1809 | ret = -ENOMEM; | |
1810 | cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); | |
1811 | if (!cc->io_queue) { | |
1812 | ti->error = "Couldn't create kcryptd io queue"; | |
1813 | goto bad; | |
1814 | } | |
1815 | ||
1816 | cc->crypt_queue = alloc_workqueue("kcryptd", | |
1817 | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); | |
1818 | if (!cc->crypt_queue) { | |
1819 | ti->error = "Couldn't create kcryptd queue"; | |
1820 | goto bad; | |
1821 | } | |
1822 | ||
1823 | ti->num_flush_bios = 1; | |
1824 | ti->discard_zeroes_data_unsupported = true; | |
1825 | ||
1826 | return 0; | |
1827 | ||
1828 | bad: | |
1829 | crypt_dtr(ti); | |
1830 | return ret; | |
1831 | } | |
1832 | ||
1833 | static int crypt_map(struct dm_target *ti, struct bio *bio) | |
1834 | { | |
1835 | struct dm_crypt_io *io; | |
1836 | struct crypt_config *cc = ti->private; | |
1837 | ||
1838 | /* | |
1839 | * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. | |
1840 | * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight | |
1841 | * - for REQ_DISCARD caller must use flush if IO ordering matters | |
1842 | */ | |
1843 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { | |
1844 | bio->bi_bdev = cc->dev->bdev; | |
1845 | if (bio_sectors(bio)) | |
1846 | bio->bi_iter.bi_sector = cc->start + | |
1847 | dm_target_offset(ti, bio->bi_iter.bi_sector); | |
1848 | return DM_MAPIO_REMAPPED; | |
1849 | } | |
1850 | ||
1851 | io = dm_per_bio_data(bio, cc->per_bio_data_size); | |
1852 | crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | |
1853 | io->ctx.req = (struct ablkcipher_request *)(io + 1); | |
1854 | ||
1855 | if (bio_data_dir(io->base_bio) == READ) { | |
1856 | if (kcryptd_io_read(io, GFP_NOWAIT)) | |
1857 | kcryptd_queue_io(io); | |
1858 | } else | |
1859 | kcryptd_queue_crypt(io); | |
1860 | ||
1861 | return DM_MAPIO_SUBMITTED; | |
1862 | } | |
1863 | ||
1864 | static void crypt_status(struct dm_target *ti, status_type_t type, | |
1865 | unsigned status_flags, char *result, unsigned maxlen) | |
1866 | { | |
1867 | struct crypt_config *cc = ti->private; | |
1868 | unsigned i, sz = 0; | |
1869 | ||
1870 | switch (type) { | |
1871 | case STATUSTYPE_INFO: | |
1872 | result[0] = '\0'; | |
1873 | break; | |
1874 | ||
1875 | case STATUSTYPE_TABLE: | |
1876 | DMEMIT("%s ", cc->cipher_string); | |
1877 | ||
1878 | if (cc->key_size > 0) | |
1879 | for (i = 0; i < cc->key_size; i++) | |
1880 | DMEMIT("%02x", cc->key[i]); | |
1881 | else | |
1882 | DMEMIT("-"); | |
1883 | ||
1884 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, | |
1885 | cc->dev->name, (unsigned long long)cc->start); | |
1886 | ||
1887 | if (ti->num_discard_bios) | |
1888 | DMEMIT(" 1 allow_discards"); | |
1889 | ||
1890 | break; | |
1891 | } | |
1892 | } | |
1893 | ||
1894 | static void crypt_postsuspend(struct dm_target *ti) | |
1895 | { | |
1896 | struct crypt_config *cc = ti->private; | |
1897 | ||
1898 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1899 | } | |
1900 | ||
1901 | static int crypt_preresume(struct dm_target *ti) | |
1902 | { | |
1903 | struct crypt_config *cc = ti->private; | |
1904 | ||
1905 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { | |
1906 | DMERR("aborting resume - crypt key is not set."); | |
1907 | return -EAGAIN; | |
1908 | } | |
1909 | ||
1910 | return 0; | |
1911 | } | |
1912 | ||
1913 | static void crypt_resume(struct dm_target *ti) | |
1914 | { | |
1915 | struct crypt_config *cc = ti->private; | |
1916 | ||
1917 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1918 | } | |
1919 | ||
1920 | /* Message interface | |
1921 | * key set <key> | |
1922 | * key wipe | |
1923 | */ | |
1924 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | |
1925 | { | |
1926 | struct crypt_config *cc = ti->private; | |
1927 | int ret = -EINVAL; | |
1928 | ||
1929 | if (argc < 2) | |
1930 | goto error; | |
1931 | ||
1932 | if (!strcasecmp(argv[0], "key")) { | |
1933 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { | |
1934 | DMWARN("not suspended during key manipulation."); | |
1935 | return -EINVAL; | |
1936 | } | |
1937 | if (argc == 3 && !strcasecmp(argv[1], "set")) { | |
1938 | ret = crypt_set_key(cc, argv[2]); | |
1939 | if (ret) | |
1940 | return ret; | |
1941 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) | |
1942 | ret = cc->iv_gen_ops->init(cc); | |
1943 | return ret; | |
1944 | } | |
1945 | if (argc == 2 && !strcasecmp(argv[1], "wipe")) { | |
1946 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { | |
1947 | ret = cc->iv_gen_ops->wipe(cc); | |
1948 | if (ret) | |
1949 | return ret; | |
1950 | } | |
1951 | return crypt_wipe_key(cc); | |
1952 | } | |
1953 | } | |
1954 | ||
1955 | error: | |
1956 | DMWARN("unrecognised message received."); | |
1957 | return -EINVAL; | |
1958 | } | |
1959 | ||
1960 | static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |
1961 | struct bio_vec *biovec, int max_size) | |
1962 | { | |
1963 | struct crypt_config *cc = ti->private; | |
1964 | struct request_queue *q = bdev_get_queue(cc->dev->bdev); | |
1965 | ||
1966 | if (!q->merge_bvec_fn) | |
1967 | return max_size; | |
1968 | ||
1969 | bvm->bi_bdev = cc->dev->bdev; | |
1970 | bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); | |
1971 | ||
1972 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | |
1973 | } | |
1974 | ||
1975 | static int crypt_iterate_devices(struct dm_target *ti, | |
1976 | iterate_devices_callout_fn fn, void *data) | |
1977 | { | |
1978 | struct crypt_config *cc = ti->private; | |
1979 | ||
1980 | return fn(ti, cc->dev, cc->start, ti->len, data); | |
1981 | } | |
1982 | ||
1983 | static struct target_type crypt_target = { | |
1984 | .name = "crypt", | |
1985 | .version = {1, 13, 0}, | |
1986 | .module = THIS_MODULE, | |
1987 | .ctr = crypt_ctr, | |
1988 | .dtr = crypt_dtr, | |
1989 | .map = crypt_map, | |
1990 | .status = crypt_status, | |
1991 | .postsuspend = crypt_postsuspend, | |
1992 | .preresume = crypt_preresume, | |
1993 | .resume = crypt_resume, | |
1994 | .message = crypt_message, | |
1995 | .merge = crypt_merge, | |
1996 | .iterate_devices = crypt_iterate_devices, | |
1997 | }; | |
1998 | ||
1999 | static int __init dm_crypt_init(void) | |
2000 | { | |
2001 | int r; | |
2002 | ||
2003 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); | |
2004 | if (!_crypt_io_pool) | |
2005 | return -ENOMEM; | |
2006 | ||
2007 | r = dm_register_target(&crypt_target); | |
2008 | if (r < 0) { | |
2009 | DMERR("register failed %d", r); | |
2010 | kmem_cache_destroy(_crypt_io_pool); | |
2011 | } | |
2012 | ||
2013 | return r; | |
2014 | } | |
2015 | ||
2016 | static void __exit dm_crypt_exit(void) | |
2017 | { | |
2018 | dm_unregister_target(&crypt_target); | |
2019 | kmem_cache_destroy(_crypt_io_pool); | |
2020 | } | |
2021 | ||
2022 | module_init(dm_crypt_init); | |
2023 | module_exit(dm_crypt_exit); | |
2024 | ||
2025 | MODULE_AUTHOR("Jana Saout <jana@saout.de>"); | |
2026 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
2027 | MODULE_LICENSE("GPL"); |