1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2019 Google LLC
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
10 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
12 #include <crypto/skcipher.h>
13 #include <linux/blk-cgroup.h>
14 #include <linux/blk-crypto.h>
15 #include <linux/blkdev.h>
16 #include <linux/crypto.h>
17 #include <linux/keyslot-manager.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/random.h>
22 #include "blk-crypto-internal.h"
24 static unsigned int num_prealloc_bounce_pg
= 32;
25 module_param(num_prealloc_bounce_pg
, uint
, 0);
26 MODULE_PARM_DESC(num_prealloc_bounce_pg
,
27 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
29 static unsigned int blk_crypto_num_keyslots
= 100;
30 module_param_named(num_keyslots
, blk_crypto_num_keyslots
, uint
, 0);
31 MODULE_PARM_DESC(num_keyslots
,
32 "Number of keyslots for the blk-crypto crypto API fallback");
34 static unsigned int num_prealloc_fallback_crypt_ctxs
= 128;
35 module_param(num_prealloc_fallback_crypt_ctxs
, uint
, 0);
36 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs
,
37 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
39 struct bio_fallback_crypt_ctx
{
40 struct bio_crypt_ctx crypt_ctx
;
42 * Copy of the bvec_iter when this bio was submitted.
43 * We only want to en/decrypt the part of the bio as described by the
44 * bvec_iter upon submission because bio might be split before being
47 struct bvec_iter crypt_iter
;
50 struct work_struct work
;
54 void *bi_private_orig
;
55 bio_end_io_t
*bi_end_io_orig
;
60 static struct kmem_cache
*bio_fallback_crypt_ctx_cache
;
61 static mempool_t
*bio_fallback_crypt_ctx_pool
;
64 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
65 * all of a mode's tfms when that mode starts being used. Since each mode may
66 * need all the keyslots at some point, each mode needs its own tfm for each
67 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
68 * match the behavior of real inline encryption hardware (which only supports a
69 * single encryption context per keyslot), we only allow one tfm per keyslot to
70 * be used at a time - the rest of the unused tfms have their keys cleared.
72 static DEFINE_MUTEX(tfms_init_lock
);
73 static bool tfms_inited
[BLK_ENCRYPTION_MODE_MAX
];
75 static struct blk_crypto_keyslot
{
76 enum blk_crypto_mode_num crypto_mode
;
77 struct crypto_skcipher
*tfms
[BLK_ENCRYPTION_MODE_MAX
];
78 } *blk_crypto_keyslots
;
80 static struct blk_keyslot_manager blk_crypto_ksm
;
81 static struct workqueue_struct
*blk_crypto_wq
;
82 static mempool_t
*blk_crypto_bounce_page_pool
;
85 * This is the key we set when evicting a keyslot. This *should* be the all 0's
86 * key, but AES-XTS rejects that key, so we use some random bytes instead.
88 static u8 blank_key
[BLK_CRYPTO_MAX_KEY_SIZE
];
90 static void blk_crypto_evict_keyslot(unsigned int slot
)
92 struct blk_crypto_keyslot
*slotp
= &blk_crypto_keyslots
[slot
];
93 enum blk_crypto_mode_num crypto_mode
= slotp
->crypto_mode
;
96 WARN_ON(slotp
->crypto_mode
== BLK_ENCRYPTION_MODE_INVALID
);
98 /* Clear the key in the skcipher */
99 err
= crypto_skcipher_setkey(slotp
->tfms
[crypto_mode
], blank_key
,
100 blk_crypto_modes
[crypto_mode
].keysize
);
102 slotp
->crypto_mode
= BLK_ENCRYPTION_MODE_INVALID
;
105 static int blk_crypto_keyslot_program(struct blk_keyslot_manager
*ksm
,
106 const struct blk_crypto_key
*key
,
109 struct blk_crypto_keyslot
*slotp
= &blk_crypto_keyslots
[slot
];
110 const enum blk_crypto_mode_num crypto_mode
=
111 key
->crypto_cfg
.crypto_mode
;
114 if (crypto_mode
!= slotp
->crypto_mode
&&
115 slotp
->crypto_mode
!= BLK_ENCRYPTION_MODE_INVALID
)
116 blk_crypto_evict_keyslot(slot
);
118 slotp
->crypto_mode
= crypto_mode
;
119 err
= crypto_skcipher_setkey(slotp
->tfms
[crypto_mode
], key
->raw
,
122 blk_crypto_evict_keyslot(slot
);
128 static int blk_crypto_keyslot_evict(struct blk_keyslot_manager
*ksm
,
129 const struct blk_crypto_key
*key
,
132 blk_crypto_evict_keyslot(slot
);
137 * The crypto API fallback KSM ops - only used for a bio when it specifies a
138 * blk_crypto_key that was not supported by the device's inline encryption
141 static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops
= {
142 .keyslot_program
= blk_crypto_keyslot_program
,
143 .keyslot_evict
= blk_crypto_keyslot_evict
,
146 static void blk_crypto_fallback_encrypt_endio(struct bio
*enc_bio
)
148 struct bio
*src_bio
= enc_bio
->bi_private
;
151 for (i
= 0; i
< enc_bio
->bi_vcnt
; i
++)
152 mempool_free(enc_bio
->bi_io_vec
[i
].bv_page
,
153 blk_crypto_bounce_page_pool
);
155 src_bio
->bi_status
= enc_bio
->bi_status
;
161 static struct bio
*blk_crypto_clone_bio(struct bio
*bio_src
)
163 struct bvec_iter iter
;
167 bio
= bio_alloc_bioset(GFP_NOIO
, bio_segments(bio_src
), NULL
);
170 bio
->bi_disk
= bio_src
->bi_disk
;
171 bio
->bi_opf
= bio_src
->bi_opf
;
172 bio
->bi_ioprio
= bio_src
->bi_ioprio
;
173 bio
->bi_write_hint
= bio_src
->bi_write_hint
;
174 bio
->bi_iter
.bi_sector
= bio_src
->bi_iter
.bi_sector
;
175 bio
->bi_iter
.bi_size
= bio_src
->bi_iter
.bi_size
;
177 bio_for_each_segment(bv
, bio_src
, iter
)
178 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bv
;
180 bio_clone_blkg_association(bio
, bio_src
);
181 blkcg_bio_issue_init(bio
);
186 static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot
*slot
,
187 struct skcipher_request
**ciph_req_ret
,
188 struct crypto_wait
*wait
)
190 struct skcipher_request
*ciph_req
;
191 const struct blk_crypto_keyslot
*slotp
;
192 int keyslot_idx
= blk_ksm_get_slot_idx(slot
);
194 slotp
= &blk_crypto_keyslots
[keyslot_idx
];
195 ciph_req
= skcipher_request_alloc(slotp
->tfms
[slotp
->crypto_mode
],
200 skcipher_request_set_callback(ciph_req
,
201 CRYPTO_TFM_REQ_MAY_BACKLOG
|
202 CRYPTO_TFM_REQ_MAY_SLEEP
,
203 crypto_req_done
, wait
);
204 *ciph_req_ret
= ciph_req
;
209 static bool blk_crypto_split_bio_if_needed(struct bio
**bio_ptr
)
211 struct bio
*bio
= *bio_ptr
;
213 unsigned int num_sectors
= 0;
215 struct bvec_iter iter
;
217 bio_for_each_segment(bv
, bio
, iter
) {
218 num_sectors
+= bv
.bv_len
>> SECTOR_SHIFT
;
219 if (++i
== BIO_MAX_PAGES
)
222 if (num_sectors
< bio_sectors(bio
)) {
223 struct bio
*split_bio
;
225 split_bio
= bio_split(bio
, num_sectors
, GFP_NOIO
, NULL
);
227 bio
->bi_status
= BLK_STS_RESOURCE
;
230 bio_chain(split_bio
, bio
);
231 submit_bio_noacct(bio
);
232 *bio_ptr
= split_bio
;
238 union blk_crypto_iv
{
239 __le64 dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
];
240 u8 bytes
[BLK_CRYPTO_MAX_IV_SIZE
];
243 static void blk_crypto_dun_to_iv(const u64 dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
],
244 union blk_crypto_iv
*iv
)
248 for (i
= 0; i
< BLK_CRYPTO_DUN_ARRAY_SIZE
; i
++)
249 iv
->dun
[i
] = cpu_to_le64(dun
[i
]);
253 * The crypto API fallback's encryption routine.
254 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
255 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
256 * large. Returns true on success. Returns false and sets bio->bi_status on
259 static bool blk_crypto_fallback_encrypt_bio(struct bio
**bio_ptr
)
261 struct bio
*src_bio
, *enc_bio
;
262 struct bio_crypt_ctx
*bc
;
263 struct blk_ksm_keyslot
*slot
;
265 struct skcipher_request
*ciph_req
= NULL
;
266 DECLARE_CRYPTO_WAIT(wait
);
267 u64 curr_dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
];
268 struct scatterlist src
, dst
;
269 union blk_crypto_iv iv
;
274 /* Split the bio if it's too big for single page bvec */
275 if (!blk_crypto_split_bio_if_needed(bio_ptr
))
279 bc
= src_bio
->bi_crypt_context
;
280 data_unit_size
= bc
->bc_key
->crypto_cfg
.data_unit_size
;
282 /* Allocate bounce bio for encryption */
283 enc_bio
= blk_crypto_clone_bio(src_bio
);
285 src_bio
->bi_status
= BLK_STS_RESOURCE
;
290 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
291 * for the algorithm and key specified for this bio.
293 blk_st
= blk_ksm_get_slot_for_key(&blk_crypto_ksm
, bc
->bc_key
, &slot
);
294 if (blk_st
!= BLK_STS_OK
) {
295 src_bio
->bi_status
= blk_st
;
296 goto out_put_enc_bio
;
299 /* and then allocate an skcipher_request for it */
300 if (!blk_crypto_alloc_cipher_req(slot
, &ciph_req
, &wait
)) {
301 src_bio
->bi_status
= BLK_STS_RESOURCE
;
302 goto out_release_keyslot
;
305 memcpy(curr_dun
, bc
->bc_dun
, sizeof(curr_dun
));
306 sg_init_table(&src
, 1);
307 sg_init_table(&dst
, 1);
309 skcipher_request_set_crypt(ciph_req
, &src
, &dst
, data_unit_size
,
312 /* Encrypt each page in the bounce bio */
313 for (i
= 0; i
< enc_bio
->bi_vcnt
; i
++) {
314 struct bio_vec
*enc_bvec
= &enc_bio
->bi_io_vec
[i
];
315 struct page
*plaintext_page
= enc_bvec
->bv_page
;
316 struct page
*ciphertext_page
=
317 mempool_alloc(blk_crypto_bounce_page_pool
, GFP_NOIO
);
319 enc_bvec
->bv_page
= ciphertext_page
;
321 if (!ciphertext_page
) {
322 src_bio
->bi_status
= BLK_STS_RESOURCE
;
323 goto out_free_bounce_pages
;
326 sg_set_page(&src
, plaintext_page
, data_unit_size
,
327 enc_bvec
->bv_offset
);
328 sg_set_page(&dst
, ciphertext_page
, data_unit_size
,
329 enc_bvec
->bv_offset
);
331 /* Encrypt each data unit in this page */
332 for (j
= 0; j
< enc_bvec
->bv_len
; j
+= data_unit_size
) {
333 blk_crypto_dun_to_iv(curr_dun
, &iv
);
334 if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req
),
337 src_bio
->bi_status
= BLK_STS_IOERR
;
338 goto out_free_bounce_pages
;
340 bio_crypt_dun_increment(curr_dun
, 1);
341 src
.offset
+= data_unit_size
;
342 dst
.offset
+= data_unit_size
;
346 enc_bio
->bi_private
= src_bio
;
347 enc_bio
->bi_end_io
= blk_crypto_fallback_encrypt_endio
;
352 goto out_free_ciph_req
;
354 out_free_bounce_pages
:
356 mempool_free(enc_bio
->bi_io_vec
[--i
].bv_page
,
357 blk_crypto_bounce_page_pool
);
359 skcipher_request_free(ciph_req
);
361 blk_ksm_put_slot(slot
);
370 * The crypto API fallback's main decryption routine.
371 * Decrypts input bio in place, and calls bio_endio on the bio.
373 static void blk_crypto_fallback_decrypt_bio(struct work_struct
*work
)
375 struct bio_fallback_crypt_ctx
*f_ctx
=
376 container_of(work
, struct bio_fallback_crypt_ctx
, work
);
377 struct bio
*bio
= f_ctx
->bio
;
378 struct bio_crypt_ctx
*bc
= &f_ctx
->crypt_ctx
;
379 struct blk_ksm_keyslot
*slot
;
380 struct skcipher_request
*ciph_req
= NULL
;
381 DECLARE_CRYPTO_WAIT(wait
);
382 u64 curr_dun
[BLK_CRYPTO_DUN_ARRAY_SIZE
];
383 union blk_crypto_iv iv
;
384 struct scatterlist sg
;
386 struct bvec_iter iter
;
387 const int data_unit_size
= bc
->bc_key
->crypto_cfg
.data_unit_size
;
392 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
393 * for the algorithm and key specified for this bio.
395 blk_st
= blk_ksm_get_slot_for_key(&blk_crypto_ksm
, bc
->bc_key
, &slot
);
396 if (blk_st
!= BLK_STS_OK
) {
397 bio
->bi_status
= blk_st
;
401 /* and then allocate an skcipher_request for it */
402 if (!blk_crypto_alloc_cipher_req(slot
, &ciph_req
, &wait
)) {
403 bio
->bi_status
= BLK_STS_RESOURCE
;
407 memcpy(curr_dun
, bc
->bc_dun
, sizeof(curr_dun
));
408 sg_init_table(&sg
, 1);
409 skcipher_request_set_crypt(ciph_req
, &sg
, &sg
, data_unit_size
,
412 /* Decrypt each segment in the bio */
413 __bio_for_each_segment(bv
, bio
, iter
, f_ctx
->crypt_iter
) {
414 struct page
*page
= bv
.bv_page
;
416 sg_set_page(&sg
, page
, data_unit_size
, bv
.bv_offset
);
418 /* Decrypt each data unit in the segment */
419 for (i
= 0; i
< bv
.bv_len
; i
+= data_unit_size
) {
420 blk_crypto_dun_to_iv(curr_dun
, &iv
);
421 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req
),
423 bio
->bi_status
= BLK_STS_IOERR
;
426 bio_crypt_dun_increment(curr_dun
, 1);
427 sg
.offset
+= data_unit_size
;
432 skcipher_request_free(ciph_req
);
433 blk_ksm_put_slot(slot
);
435 mempool_free(f_ctx
, bio_fallback_crypt_ctx_pool
);
440 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
442 * @bio: the bio to queue
444 * Restore bi_private and bi_end_io, and queue the bio for decryption into a
445 * workqueue, since this function will be called from an atomic context.
447 static void blk_crypto_fallback_decrypt_endio(struct bio
*bio
)
449 struct bio_fallback_crypt_ctx
*f_ctx
= bio
->bi_private
;
451 bio
->bi_private
= f_ctx
->bi_private_orig
;
452 bio
->bi_end_io
= f_ctx
->bi_end_io_orig
;
454 /* If there was an IO error, don't queue for decrypt. */
455 if (bio
->bi_status
) {
456 mempool_free(f_ctx
, bio_fallback_crypt_ctx_pool
);
461 INIT_WORK(&f_ctx
->work
, blk_crypto_fallback_decrypt_bio
);
463 queue_work(blk_crypto_wq
, &f_ctx
->work
);
467 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
469 * @bio_ptr: pointer to the bio to prepare
471 * If bio is doing a WRITE operation, this splits the bio into two parts if it's
472 * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
473 * for the first part, encrypts it, and update bio_ptr to point to the bounce
476 * For a READ operation, we mark the bio for decryption by using bi_private and
479 * In either case, this function will make the bio look like a regular bio (i.e.
480 * as if no encryption context was ever specified) for the purposes of the rest
481 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
482 * currently supported together).
484 * Return: true on success. Sets bio->bi_status and returns false on error.
486 bool blk_crypto_fallback_bio_prep(struct bio
**bio_ptr
)
488 struct bio
*bio
= *bio_ptr
;
489 struct bio_crypt_ctx
*bc
= bio
->bi_crypt_context
;
490 struct bio_fallback_crypt_ctx
*f_ctx
;
492 if (WARN_ON_ONCE(!tfms_inited
[bc
->bc_key
->crypto_cfg
.crypto_mode
])) {
493 /* User didn't call blk_crypto_start_using_key() first */
494 bio
->bi_status
= BLK_STS_IOERR
;
498 if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm
,
499 &bc
->bc_key
->crypto_cfg
)) {
500 bio
->bi_status
= BLK_STS_NOTSUPP
;
504 if (bio_data_dir(bio
) == WRITE
)
505 return blk_crypto_fallback_encrypt_bio(bio_ptr
);
508 * bio READ case: Set up a f_ctx in the bio's bi_private and set the
509 * bi_end_io appropriately to trigger decryption when the bio is ended.
511 f_ctx
= mempool_alloc(bio_fallback_crypt_ctx_pool
, GFP_NOIO
);
512 f_ctx
->crypt_ctx
= *bc
;
513 f_ctx
->crypt_iter
= bio
->bi_iter
;
514 f_ctx
->bi_private_orig
= bio
->bi_private
;
515 f_ctx
->bi_end_io_orig
= bio
->bi_end_io
;
516 bio
->bi_private
= (void *)f_ctx
;
517 bio
->bi_end_io
= blk_crypto_fallback_decrypt_endio
;
518 bio_crypt_free_ctx(bio
);
523 int blk_crypto_fallback_evict_key(const struct blk_crypto_key
*key
)
525 return blk_ksm_evict_key(&blk_crypto_ksm
, key
);
528 static bool blk_crypto_fallback_inited
;
529 static int blk_crypto_fallback_init(void)
534 if (blk_crypto_fallback_inited
)
537 prandom_bytes(blank_key
, BLK_CRYPTO_MAX_KEY_SIZE
);
539 err
= blk_ksm_init(&blk_crypto_ksm
, blk_crypto_num_keyslots
);
544 blk_crypto_ksm
.ksm_ll_ops
= blk_crypto_ksm_ll_ops
;
545 blk_crypto_ksm
.max_dun_bytes_supported
= BLK_CRYPTO_MAX_IV_SIZE
;
547 /* All blk-crypto modes have a crypto API fallback. */
548 for (i
= 0; i
< BLK_ENCRYPTION_MODE_MAX
; i
++)
549 blk_crypto_ksm
.crypto_modes_supported
[i
] = 0xFFFFFFFF;
550 blk_crypto_ksm
.crypto_modes_supported
[BLK_ENCRYPTION_MODE_INVALID
] = 0;
552 blk_crypto_wq
= alloc_workqueue("blk_crypto_wq",
553 WQ_UNBOUND
| WQ_HIGHPRI
|
554 WQ_MEM_RECLAIM
, num_online_cpus());
558 blk_crypto_keyslots
= kcalloc(blk_crypto_num_keyslots
,
559 sizeof(blk_crypto_keyslots
[0]),
561 if (!blk_crypto_keyslots
)
564 blk_crypto_bounce_page_pool
=
565 mempool_create_page_pool(num_prealloc_bounce_pg
, 0);
566 if (!blk_crypto_bounce_page_pool
)
567 goto fail_free_keyslots
;
569 bio_fallback_crypt_ctx_cache
= KMEM_CACHE(bio_fallback_crypt_ctx
, 0);
570 if (!bio_fallback_crypt_ctx_cache
)
571 goto fail_free_bounce_page_pool
;
573 bio_fallback_crypt_ctx_pool
=
574 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs
,
575 bio_fallback_crypt_ctx_cache
);
576 if (!bio_fallback_crypt_ctx_pool
)
577 goto fail_free_crypt_ctx_cache
;
579 blk_crypto_fallback_inited
= true;
582 fail_free_crypt_ctx_cache
:
583 kmem_cache_destroy(bio_fallback_crypt_ctx_cache
);
584 fail_free_bounce_page_pool
:
585 mempool_destroy(blk_crypto_bounce_page_pool
);
587 kfree(blk_crypto_keyslots
);
589 destroy_workqueue(blk_crypto_wq
);
591 blk_ksm_destroy(&blk_crypto_ksm
);
597 * Prepare blk-crypto-fallback for the specified crypto mode.
598 * Returns -ENOPKG if the needed crypto API support is missing.
600 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num
)
602 const char *cipher_str
= blk_crypto_modes
[mode_num
].cipher_str
;
603 struct blk_crypto_keyslot
*slotp
;
609 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
610 * for each i are visible before we try to access them.
612 if (likely(smp_load_acquire(&tfms_inited
[mode_num
])))
615 mutex_lock(&tfms_init_lock
);
616 if (tfms_inited
[mode_num
])
619 err
= blk_crypto_fallback_init();
623 for (i
= 0; i
< blk_crypto_num_keyslots
; i
++) {
624 slotp
= &blk_crypto_keyslots
[i
];
625 slotp
->tfms
[mode_num
] = crypto_alloc_skcipher(cipher_str
, 0, 0);
626 if (IS_ERR(slotp
->tfms
[mode_num
])) {
627 err
= PTR_ERR(slotp
->tfms
[mode_num
]);
628 if (err
== -ENOENT
) {
629 pr_warn_once("Missing crypto API support for \"%s\"\n",
633 slotp
->tfms
[mode_num
] = NULL
;
637 crypto_skcipher_set_flags(slotp
->tfms
[mode_num
],
638 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS
);
642 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
643 * for each i are visible before we set tfms_inited[mode_num].
645 smp_store_release(&tfms_inited
[mode_num
], true);
649 for (i
= 0; i
< blk_crypto_num_keyslots
; i
++) {
650 slotp
= &blk_crypto_keyslots
[i
];
651 crypto_free_skcipher(slotp
->tfms
[mode_num
]);
652 slotp
->tfms
[mode_num
] = NULL
;
655 mutex_unlock(&tfms_init_lock
);