]>
Commit | Line | Data |
---|---|---|
a892c8d5 ST |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2019 Google LLC | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Refer to Documentation/block/inline-encryption.rst for detailed explanation. | |
8 | */ | |
9 | ||
10 | #define pr_fmt(fmt) "blk-crypto: " fmt | |
11 | ||
12 | #include <linux/bio.h> | |
13 | #include <linux/blkdev.h> | |
14 | #include <linux/keyslot-manager.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/slab.h> | |
17 | ||
18 | #include "blk-crypto-internal.h" | |
19 | ||
20 | const struct blk_crypto_mode blk_crypto_modes[] = { | |
21 | [BLK_ENCRYPTION_MODE_AES_256_XTS] = { | |
488f6682 | 22 | .cipher_str = "xts(aes)", |
a892c8d5 ST |
23 | .keysize = 64, |
24 | .ivsize = 16, | |
25 | }, | |
26 | [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { | |
488f6682 | 27 | .cipher_str = "essiv(cbc(aes),sha256)", |
a892c8d5 ST |
28 | .keysize = 16, |
29 | .ivsize = 16, | |
30 | }, | |
31 | [BLK_ENCRYPTION_MODE_ADIANTUM] = { | |
488f6682 | 32 | .cipher_str = "adiantum(xchacha12,aes)", |
a892c8d5 ST |
33 | .keysize = 32, |
34 | .ivsize = 32, | |
35 | }, | |
36 | }; | |
37 | ||
38 | /* | |
39 | * This number needs to be at least (the number of threads doing IO | |
40 | * concurrently) * (maximum recursive depth of a bio), so that we don't | |
41 | * deadlock on crypt_ctx allocations. The default is chosen to be the same | |
42 | * as the default number of post read contexts in both EXT4 and F2FS. | |
43 | */ | |
44 | static int num_prealloc_crypt_ctxs = 128; | |
45 | ||
46 | module_param(num_prealloc_crypt_ctxs, int, 0444); | |
47 | MODULE_PARM_DESC(num_prealloc_crypt_ctxs, | |
48 | "Number of bio crypto contexts to preallocate"); | |
49 | ||
50 | static struct kmem_cache *bio_crypt_ctx_cache; | |
51 | static mempool_t *bio_crypt_ctx_pool; | |
52 | ||
53 | static int __init bio_crypt_ctx_init(void) | |
54 | { | |
55 | size_t i; | |
56 | ||
57 | bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); | |
58 | if (!bio_crypt_ctx_cache) | |
59 | goto out_no_mem; | |
60 | ||
61 | bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, | |
62 | bio_crypt_ctx_cache); | |
63 | if (!bio_crypt_ctx_pool) | |
64 | goto out_no_mem; | |
65 | ||
66 | /* This is assumed in various places. */ | |
67 | BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); | |
68 | ||
69 | /* Sanity check that no algorithm exceeds the defined limits. */ | |
70 | for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { | |
71 | BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); | |
72 | BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); | |
73 | } | |
74 | ||
75 | return 0; | |
76 | out_no_mem: | |
77 | panic("Failed to allocate mem for bio crypt ctxs\n"); | |
78 | } | |
79 | subsys_initcall(bio_crypt_ctx_init); | |
80 | ||
81 | void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, | |
82 | const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) | |
83 | { | |
84 | struct bio_crypt_ctx *bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); | |
85 | ||
86 | bc->bc_key = key; | |
87 | memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); | |
88 | ||
89 | bio->bi_crypt_context = bc; | |
90 | } | |
91 | ||
92 | void __bio_crypt_free_ctx(struct bio *bio) | |
93 | { | |
94 | mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); | |
95 | bio->bi_crypt_context = NULL; | |
96 | } | |
97 | ||
07560151 | 98 | int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) |
a892c8d5 ST |
99 | { |
100 | dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); | |
07560151 EB |
101 | if (!dst->bi_crypt_context) |
102 | return -ENOMEM; | |
a892c8d5 | 103 | *dst->bi_crypt_context = *src->bi_crypt_context; |
07560151 | 104 | return 0; |
a892c8d5 ST |
105 | } |
106 | EXPORT_SYMBOL_GPL(__bio_crypt_clone); | |
107 | ||
108 | /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ | |
109 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], | |
110 | unsigned int inc) | |
111 | { | |
112 | int i; | |
113 | ||
114 | for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { | |
115 | dun[i] += inc; | |
116 | /* | |
117 | * If the addition in this limb overflowed, then we need to | |
118 | * carry 1 into the next limb. Else the carry is 0. | |
119 | */ | |
120 | if (dun[i] < inc) | |
121 | inc = 1; | |
122 | else | |
123 | inc = 0; | |
124 | } | |
125 | } | |
126 | ||
127 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes) | |
128 | { | |
129 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; | |
130 | ||
131 | bio_crypt_dun_increment(bc->bc_dun, | |
132 | bytes >> bc->bc_key->data_unit_size_bits); | |
133 | } | |
134 | ||
135 | /* | |
136 | * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to | |
137 | * @next_dun, treating the DUNs as multi-limb integers. | |
138 | */ | |
139 | bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, | |
140 | unsigned int bytes, | |
141 | const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) | |
142 | { | |
143 | int i; | |
144 | unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; | |
145 | ||
146 | for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { | |
147 | if (bc->bc_dun[i] + carry != next_dun[i]) | |
148 | return false; | |
149 | /* | |
150 | * If the addition in this limb overflowed, then we need to | |
151 | * carry 1 into the next limb. Else the carry is 0. | |
152 | */ | |
153 | if ((bc->bc_dun[i] + carry) < carry) | |
154 | carry = 1; | |
155 | else | |
156 | carry = 0; | |
157 | } | |
158 | ||
159 | /* If the DUN wrapped through 0, don't treat it as contiguous. */ | |
160 | return carry == 0; | |
161 | } | |
162 | ||
163 | /* | |
164 | * Checks that two bio crypt contexts are compatible - i.e. that | |
165 | * they are mergeable except for data_unit_num continuity. | |
166 | */ | |
167 | static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, | |
168 | struct bio_crypt_ctx *bc2) | |
169 | { | |
170 | if (!bc1) | |
171 | return !bc2; | |
172 | ||
173 | return bc2 && bc1->bc_key == bc2->bc_key; | |
174 | } | |
175 | ||
176 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) | |
177 | { | |
178 | return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); | |
179 | } | |
180 | ||
181 | /* | |
182 | * Checks that two bio crypt contexts are compatible, and also | |
183 | * that their data_unit_nums are continuous (and can hence be merged) | |
184 | * in the order @bc1 followed by @bc2. | |
185 | */ | |
186 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, | |
187 | struct bio_crypt_ctx *bc2) | |
188 | { | |
189 | if (!bio_crypt_ctx_compatible(bc1, bc2)) | |
190 | return false; | |
191 | ||
192 | return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun); | |
193 | } | |
194 | ||
195 | /* Check that all I/O segments are data unit aligned. */ | |
196 | static bool bio_crypt_check_alignment(struct bio *bio) | |
197 | { | |
198 | const unsigned int data_unit_size = | |
199 | bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; | |
200 | struct bvec_iter iter; | |
201 | struct bio_vec bv; | |
202 | ||
203 | bio_for_each_segment(bv, bio, iter) { | |
204 | if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) | |
205 | return false; | |
206 | } | |
207 | ||
208 | return true; | |
209 | } | |
210 | ||
211 | blk_status_t __blk_crypto_init_request(struct request *rq) | |
212 | { | |
213 | return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, | |
214 | &rq->crypt_keyslot); | |
215 | } | |
216 | ||
217 | /** | |
218 | * __blk_crypto_free_request - Uninitialize the crypto fields of a request. | |
219 | * | |
220 | * @rq: The request whose crypto fields to uninitialize. | |
221 | * | |
222 | * Completely uninitializes the crypto fields of a request. If a keyslot has | |
223 | * been programmed into some inline encryption hardware, that keyslot is | |
224 | * released. The rq->crypt_ctx is also freed. | |
225 | */ | |
226 | void __blk_crypto_free_request(struct request *rq) | |
227 | { | |
228 | blk_ksm_put_slot(rq->crypt_keyslot); | |
229 | mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); | |
230 | blk_crypto_rq_set_defaults(rq); | |
231 | } | |
232 | ||
233 | /** | |
234 | * __blk_crypto_bio_prep - Prepare bio for inline encryption | |
235 | * | |
236 | * @bio_ptr: pointer to original bio pointer | |
237 | * | |
488f6682 ST |
238 | * If the bio crypt context provided for the bio is supported by the underlying |
239 | * device's inline encryption hardware, do nothing. | |
240 | * | |
241 | * Otherwise, try to perform en/decryption for this bio by falling back to the | |
242 | * kernel crypto API. When the crypto API fallback is used for encryption, | |
243 | * blk-crypto may choose to split the bio into 2 - the first one that will | |
244 | * continue to be processed and the second one that will be resubmitted via | |
ed00aabd | 245 | * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents |
488f6682 ST |
246 | * of the aforementioned "first one", and *bio_ptr will be updated to this |
247 | * bounce bio. | |
a892c8d5 ST |
248 | * |
249 | * Caller must ensure bio has bio_crypt_ctx. | |
250 | * | |
251 | * Return: true on success; false on error (and bio->bi_status will be set | |
252 | * appropriately, and bio_endio() will have been called so bio | |
253 | * submission should abort). | |
254 | */ | |
255 | bool __blk_crypto_bio_prep(struct bio **bio_ptr) | |
256 | { | |
257 | struct bio *bio = *bio_ptr; | |
258 | const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; | |
a892c8d5 ST |
259 | |
260 | /* Error if bio has no data. */ | |
488f6682 ST |
261 | if (WARN_ON_ONCE(!bio_has_data(bio))) { |
262 | bio->bi_status = BLK_STS_IOERR; | |
a892c8d5 | 263 | goto fail; |
488f6682 | 264 | } |
a892c8d5 | 265 | |
488f6682 ST |
266 | if (!bio_crypt_check_alignment(bio)) { |
267 | bio->bi_status = BLK_STS_IOERR; | |
a892c8d5 | 268 | goto fail; |
488f6682 | 269 | } |
a892c8d5 ST |
270 | |
271 | /* | |
488f6682 ST |
272 | * Success if device supports the encryption context, or if we succeeded |
273 | * in falling back to the crypto API. | |
a892c8d5 | 274 | */ |
488f6682 ST |
275 | if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm, |
276 | &bc_key->crypto_cfg)) | |
277 | return true; | |
a892c8d5 | 278 | |
488f6682 ST |
279 | if (blk_crypto_fallback_bio_prep(bio_ptr)) |
280 | return true; | |
a892c8d5 | 281 | fail: |
a892c8d5 ST |
282 | bio_endio(*bio_ptr); |
283 | return false; | |
284 | } | |
285 | ||
286 | /** | |
287 | * __blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio | |
288 | * is inserted | |
289 | * | |
290 | * @rq: The request to prepare | |
291 | * @bio: The first bio being inserted into the request | |
292 | * @gfp_mask: gfp mask | |
293 | */ | |
294 | void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, | |
295 | gfp_t gfp_mask) | |
296 | { | |
297 | if (!rq->crypt_ctx) | |
298 | rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); | |
299 | *rq->crypt_ctx = *bio->bi_crypt_context; | |
300 | } | |
301 | ||
302 | /** | |
303 | * blk_crypto_init_key() - Prepare a key for use with blk-crypto | |
304 | * @blk_key: Pointer to the blk_crypto_key to initialize. | |
305 | * @raw_key: Pointer to the raw key. Must be the correct length for the chosen | |
306 | * @crypto_mode; see blk_crypto_modes[]. | |
307 | * @crypto_mode: identifier for the encryption algorithm to use | |
308 | * @dun_bytes: number of bytes that will be used to specify the DUN when this | |
309 | * key is used | |
310 | * @data_unit_size: the data unit size to use for en/decryption | |
311 | * | |
312 | * Return: 0 on success, -errno on failure. The caller is responsible for | |
313 | * zeroizing both blk_key and raw_key when done with them. | |
314 | */ | |
315 | int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, | |
316 | enum blk_crypto_mode_num crypto_mode, | |
317 | unsigned int dun_bytes, | |
318 | unsigned int data_unit_size) | |
319 | { | |
320 | const struct blk_crypto_mode *mode; | |
321 | ||
322 | memset(blk_key, 0, sizeof(*blk_key)); | |
323 | ||
324 | if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) | |
325 | return -EINVAL; | |
326 | ||
327 | mode = &blk_crypto_modes[crypto_mode]; | |
328 | if (mode->keysize == 0) | |
329 | return -EINVAL; | |
330 | ||
331 | if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE) | |
332 | return -EINVAL; | |
333 | ||
334 | if (!is_power_of_2(data_unit_size)) | |
335 | return -EINVAL; | |
336 | ||
337 | blk_key->crypto_cfg.crypto_mode = crypto_mode; | |
338 | blk_key->crypto_cfg.dun_bytes = dun_bytes; | |
339 | blk_key->crypto_cfg.data_unit_size = data_unit_size; | |
340 | blk_key->data_unit_size_bits = ilog2(data_unit_size); | |
341 | blk_key->size = mode->keysize; | |
342 | memcpy(blk_key->raw, raw_key, mode->keysize); | |
343 | ||
344 | return 0; | |
345 | } | |
346 | ||
488f6682 ST |
347 | /* |
348 | * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the | |
349 | * request queue it's submitted to supports inline crypto, or the | |
350 | * blk-crypto-fallback is enabled and supports the cfg). | |
351 | */ | |
a892c8d5 ST |
352 | bool blk_crypto_config_supported(struct request_queue *q, |
353 | const struct blk_crypto_config *cfg) | |
354 | { | |
488f6682 ST |
355 | return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || |
356 | blk_ksm_crypto_cfg_supported(q->ksm, cfg); | |
a892c8d5 ST |
357 | } |
358 | ||
359 | /** | |
360 | * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device | |
361 | * @key: A key to use on the device | |
362 | * @q: the request queue for the device | |
363 | * | |
488f6682 ST |
364 | * Upper layers must call this function to ensure that either the hardware |
365 | * supports the key's crypto settings, or the crypto API fallback has transforms | |
366 | * for the needed mode allocated and ready to go. This function may allocate | |
367 | * an skcipher, and *should not* be called from the data path, since that might | |
368 | * cause a deadlock | |
a892c8d5 | 369 | * |
488f6682 ST |
370 | * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and |
371 | * blk-crypto-fallback is either disabled or the needed algorithm | |
372 | * is disabled in the crypto API; or another -errno code. | |
a892c8d5 ST |
373 | */ |
374 | int blk_crypto_start_using_key(const struct blk_crypto_key *key, | |
375 | struct request_queue *q) | |
376 | { | |
377 | if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) | |
378 | return 0; | |
488f6682 | 379 | return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode); |
a892c8d5 ST |
380 | } |
381 | ||
382 | /** | |
383 | * blk_crypto_evict_key() - Evict a key from any inline encryption hardware | |
384 | * it may have been programmed into | |
385 | * @q: The request queue who's associated inline encryption hardware this key | |
386 | * might have been programmed into | |
387 | * @key: The key to evict | |
388 | * | |
389 | * Upper layers (filesystems) must call this function to ensure that a key is | |
390 | * evicted from any hardware that it might have been programmed into. The key | |
391 | * must not be in use by any in-flight IO when this function is called. | |
392 | * | |
393 | * Return: 0 on success or if key is not present in the q's ksm, -err on error. | |
394 | */ | |
395 | int blk_crypto_evict_key(struct request_queue *q, | |
396 | const struct blk_crypto_key *key) | |
397 | { | |
398 | if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) | |
399 | return blk_ksm_evict_key(q->ksm, key); | |
400 | ||
488f6682 ST |
401 | /* |
402 | * If the request queue's associated inline encryption hardware didn't | |
403 | * have support for the key, then the key might have been programmed | |
404 | * into the fallback keyslot manager, so try to evict from there. | |
405 | */ | |
406 | return blk_crypto_fallback_evict_key(key); | |
a892c8d5 | 407 | } |