]>
Commit | Line | Data |
---|---|---|
5fee3609 ST |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Inline encryption support for fscrypt | |
4 | * | |
5 | * Copyright 2019 Google LLC | |
6 | */ | |
7 | ||
8 | /* | |
9 | * With "inline encryption", the block layer handles the decryption/encryption | |
10 | * as part of the bio, instead of the filesystem doing the crypto itself via | |
11 | * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still | |
12 | * provides the key and IV to use. | |
13 | */ | |
14 | ||
15 | #include <linux/blk-crypto.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/buffer_head.h> | |
18 | #include <linux/sched/mm.h> | |
453431a5 | 19 | #include <linux/slab.h> |
5fee3609 ST |
20 | |
21 | #include "fscrypt_private.h" | |
22 | ||
23 | struct fscrypt_blk_crypto_key { | |
24 | struct blk_crypto_key base; | |
25 | int num_devs; | |
26 | struct request_queue *devs[]; | |
27 | }; | |
28 | ||
29 | static int fscrypt_get_num_devices(struct super_block *sb) | |
30 | { | |
31 | if (sb->s_cop->get_num_devices) | |
32 | return sb->s_cop->get_num_devices(sb); | |
33 | return 1; | |
34 | } | |
35 | ||
36 | static void fscrypt_get_devices(struct super_block *sb, int num_devs, | |
37 | struct request_queue **devs) | |
38 | { | |
39 | if (num_devs == 1) | |
40 | devs[0] = bdev_get_queue(sb->s_bdev); | |
41 | else | |
42 | sb->s_cop->get_devices(sb, devs); | |
43 | } | |
44 | ||
45 | static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) | |
46 | { | |
47 | struct super_block *sb = ci->ci_inode->i_sb; | |
48 | unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); | |
49 | int ino_bits = 64, lblk_bits = 64; | |
50 | ||
51 | if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) | |
52 | return offsetofend(union fscrypt_iv, nonce); | |
53 | ||
54 | if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) | |
55 | return sizeof(__le64); | |
56 | ||
57 | if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) | |
58 | return sizeof(__le32); | |
59 | ||
60 | /* Default case: IVs are just the file logical block number */ | |
61 | if (sb->s_cop->get_ino_and_lblk_bits) | |
62 | sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); | |
63 | return DIV_ROUND_UP(lblk_bits, 8); | |
64 | } | |
65 | ||
66 | /* Enable inline encryption for this file if supported. */ | |
67 | int fscrypt_select_encryption_impl(struct fscrypt_info *ci) | |
68 | { | |
69 | const struct inode *inode = ci->ci_inode; | |
70 | struct super_block *sb = inode->i_sb; | |
71 | struct blk_crypto_config crypto_cfg; | |
72 | int num_devs; | |
73 | struct request_queue **devs; | |
74 | int i; | |
75 | ||
76 | /* The file must need contents encryption, not filenames encryption */ | |
77 | if (!fscrypt_needs_contents_encryption(inode)) | |
78 | return 0; | |
79 | ||
80 | /* The crypto mode must have a blk-crypto counterpart */ | |
81 | if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) | |
82 | return 0; | |
83 | ||
84 | /* The filesystem must be mounted with -o inlinecrypt */ | |
85 | if (!(sb->s_flags & SB_INLINECRYPT)) | |
86 | return 0; | |
87 | ||
88 | /* | |
89 | * When a page contains multiple logically contiguous filesystem blocks, | |
90 | * some filesystem code only calls fscrypt_mergeable_bio() for the first | |
91 | * block in the page. This is fine for most of fscrypt's IV generation | |
92 | * strategies, where contiguous blocks imply contiguous IVs. But it | |
93 | * doesn't work with IV_INO_LBLK_32. For now, simply exclude | |
94 | * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. | |
95 | */ | |
96 | if ((fscrypt_policy_flags(&ci->ci_policy) & | |
97 | FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && | |
98 | sb->s_blocksize != PAGE_SIZE) | |
99 | return 0; | |
100 | ||
101 | /* | |
102 | * On all the filesystem's devices, blk-crypto must support the crypto | |
103 | * configuration that the file would use. | |
104 | */ | |
105 | crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; | |
106 | crypto_cfg.data_unit_size = sb->s_blocksize; | |
107 | crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); | |
108 | num_devs = fscrypt_get_num_devices(sb); | |
109 | devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS); | |
110 | if (!devs) | |
111 | return -ENOMEM; | |
112 | fscrypt_get_devices(sb, num_devs, devs); | |
113 | ||
114 | for (i = 0; i < num_devs; i++) { | |
115 | if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) | |
116 | goto out_free_devs; | |
117 | } | |
118 | ||
119 | ci->ci_inlinecrypt = true; | |
120 | out_free_devs: | |
121 | kfree(devs); | |
122 | ||
123 | return 0; | |
124 | } | |
125 | ||
126 | int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, | |
127 | const u8 *raw_key, | |
128 | const struct fscrypt_info *ci) | |
129 | { | |
130 | const struct inode *inode = ci->ci_inode; | |
131 | struct super_block *sb = inode->i_sb; | |
132 | enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; | |
133 | int num_devs = fscrypt_get_num_devices(sb); | |
134 | int queue_refs = 0; | |
135 | struct fscrypt_blk_crypto_key *blk_key; | |
136 | int err; | |
137 | int i; | |
138 | unsigned int flags; | |
139 | ||
140 | blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); | |
141 | if (!blk_key) | |
142 | return -ENOMEM; | |
143 | ||
144 | blk_key->num_devs = num_devs; | |
145 | fscrypt_get_devices(sb, num_devs, blk_key->devs); | |
146 | ||
147 | err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode, | |
148 | fscrypt_get_dun_bytes(ci), sb->s_blocksize); | |
149 | if (err) { | |
150 | fscrypt_err(inode, "error %d initializing blk-crypto key", err); | |
151 | goto fail; | |
152 | } | |
153 | ||
154 | /* | |
155 | * We have to start using blk-crypto on all the filesystem's devices. | |
156 | * We also have to save all the request_queue's for later so that the | |
157 | * key can be evicted from them. This is needed because some keys | |
158 | * aren't destroyed until after the filesystem was already unmounted | |
159 | * (namely, the per-mode keys in struct fscrypt_master_key). | |
160 | */ | |
161 | for (i = 0; i < num_devs; i++) { | |
162 | if (!blk_get_queue(blk_key->devs[i])) { | |
163 | fscrypt_err(inode, "couldn't get request_queue"); | |
164 | err = -EAGAIN; | |
165 | goto fail; | |
166 | } | |
167 | queue_refs++; | |
168 | ||
169 | flags = memalloc_nofs_save(); | |
170 | err = blk_crypto_start_using_key(&blk_key->base, | |
171 | blk_key->devs[i]); | |
172 | memalloc_nofs_restore(flags); | |
173 | if (err) { | |
174 | fscrypt_err(inode, | |
175 | "error %d starting to use blk-crypto", err); | |
176 | goto fail; | |
177 | } | |
178 | } | |
179 | /* | |
97c6327f EB |
180 | * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). |
181 | * I.e., here we publish ->blk_key with a RELEASE barrier so that | |
182 | * concurrent tasks can ACQUIRE it. Note that this concurrency is only | |
183 | * possible for per-mode keys, not for per-file keys. | |
5fee3609 ST |
184 | */ |
185 | smp_store_release(&prep_key->blk_key, blk_key); | |
186 | return 0; | |
187 | ||
188 | fail: | |
189 | for (i = 0; i < queue_refs; i++) | |
190 | blk_put_queue(blk_key->devs[i]); | |
453431a5 | 191 | kfree_sensitive(blk_key); |
5fee3609 ST |
192 | return err; |
193 | } | |
194 | ||
195 | void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) | |
196 | { | |
197 | struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; | |
198 | int i; | |
199 | ||
200 | if (blk_key) { | |
201 | for (i = 0; i < blk_key->num_devs; i++) { | |
202 | blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); | |
203 | blk_put_queue(blk_key->devs[i]); | |
204 | } | |
453431a5 | 205 | kfree_sensitive(blk_key); |
5fee3609 ST |
206 | } |
207 | } | |
208 | ||
209 | bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) | |
210 | { | |
211 | return inode->i_crypt_info->ci_inlinecrypt; | |
212 | } | |
213 | EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); | |
214 | ||
215 | static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, | |
216 | u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) | |
217 | { | |
218 | union fscrypt_iv iv; | |
219 | int i; | |
220 | ||
221 | fscrypt_generate_iv(&iv, lblk_num, ci); | |
222 | ||
223 | BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); | |
224 | memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); | |
225 | for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) | |
226 | dun[i] = le64_to_cpu(iv.dun[i]); | |
227 | } | |
228 | ||
229 | /** | |
230 | * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto | |
231 | * @bio: a bio which will eventually be submitted to the file | |
232 | * @inode: the file's inode | |
233 | * @first_lblk: the first file logical block number in the I/O | |
234 | * @gfp_mask: memory allocation flags - these must be a waiting mask so that | |
235 | * bio_crypt_set_ctx can't fail. | |
236 | * | |
237 | * If the contents of the file should be encrypted (or decrypted) with inline | |
238 | * encryption, then assign the appropriate encryption context to the bio. | |
239 | * | |
240 | * Normally the bio should be newly allocated (i.e. no pages added yet), as | |
241 | * otherwise fscrypt_mergeable_bio() won't work as intended. | |
242 | * | |
243 | * The encryption context will be freed automatically when the bio is freed. | |
244 | */ | |
245 | void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, | |
246 | u64 first_lblk, gfp_t gfp_mask) | |
247 | { | |
55e32c54 | 248 | const struct fscrypt_info *ci; |
5fee3609 ST |
249 | u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
250 | ||
251 | if (!fscrypt_inode_uses_inline_crypto(inode)) | |
252 | return; | |
55e32c54 | 253 | ci = inode->i_crypt_info; |
5fee3609 ST |
254 | |
255 | fscrypt_generate_dun(ci, first_lblk, dun); | |
256 | bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask); | |
257 | } | |
258 | EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); | |
259 | ||
260 | /* Extract the inode and logical block number from a buffer_head. */ | |
261 | static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, | |
262 | const struct inode **inode_ret, | |
263 | u64 *lblk_num_ret) | |
264 | { | |
265 | struct page *page = bh->b_page; | |
266 | const struct address_space *mapping; | |
267 | const struct inode *inode; | |
268 | ||
269 | /* | |
270 | * The ext4 journal (jbd2) can submit a buffer_head it directly created | |
271 | * for a non-pagecache page. fscrypt doesn't care about these. | |
272 | */ | |
273 | mapping = page_mapping(page); | |
274 | if (!mapping) | |
275 | return false; | |
276 | inode = mapping->host; | |
277 | ||
278 | *inode_ret = inode; | |
279 | *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + | |
280 | (bh_offset(bh) >> inode->i_blkbits); | |
281 | return true; | |
282 | } | |
283 | ||
284 | /** | |
285 | * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline | |
286 | * crypto | |
287 | * @bio: a bio which will eventually be submitted to the file | |
288 | * @first_bh: the first buffer_head for which I/O will be submitted | |
289 | * @gfp_mask: memory allocation flags | |
290 | * | |
291 | * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead | |
292 | * of an inode and block number directly. | |
293 | */ | |
294 | void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, | |
295 | const struct buffer_head *first_bh, | |
296 | gfp_t gfp_mask) | |
297 | { | |
298 | const struct inode *inode; | |
299 | u64 first_lblk; | |
300 | ||
301 | if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) | |
302 | fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); | |
303 | } | |
304 | EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); | |
305 | ||
306 | /** | |
307 | * fscrypt_mergeable_bio() - test whether data can be added to a bio | |
308 | * @bio: the bio being built up | |
309 | * @inode: the inode for the next part of the I/O | |
310 | * @next_lblk: the next file logical block number in the I/O | |
311 | * | |
312 | * When building a bio which may contain data which should undergo inline | |
313 | * encryption (or decryption) via fscrypt, filesystems should call this function | |
314 | * to ensure that the resulting bio contains only contiguous data unit numbers. | |
315 | * This will return false if the next part of the I/O cannot be merged with the | |
316 | * bio because either the encryption key would be different or the encryption | |
317 | * data unit numbers would be discontiguous. | |
318 | * | |
319 | * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. | |
320 | * | |
321 | * Return: true iff the I/O is mergeable | |
322 | */ | |
323 | bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, | |
324 | u64 next_lblk) | |
325 | { | |
326 | const struct bio_crypt_ctx *bc = bio->bi_crypt_context; | |
327 | u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; | |
328 | ||
329 | if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) | |
330 | return false; | |
331 | if (!bc) | |
332 | return true; | |
333 | ||
334 | /* | |
335 | * Comparing the key pointers is good enough, as all I/O for each key | |
336 | * uses the same pointer. I.e., there's currently no need to support | |
337 | * merging requests where the keys are the same but the pointers differ. | |
338 | */ | |
339 | if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base) | |
340 | return false; | |
341 | ||
342 | fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); | |
343 | return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); | |
344 | } | |
345 | EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); | |
346 | ||
347 | /** | |
348 | * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio | |
349 | * @bio: the bio being built up | |
350 | * @next_bh: the next buffer_head for which I/O will be submitted | |
351 | * | |
352 | * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of | |
353 | * an inode and block number directly. | |
354 | * | |
355 | * Return: true iff the I/O is mergeable | |
356 | */ | |
357 | bool fscrypt_mergeable_bio_bh(struct bio *bio, | |
358 | const struct buffer_head *next_bh) | |
359 | { | |
360 | const struct inode *inode; | |
361 | u64 next_lblk; | |
362 | ||
363 | if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) | |
364 | return !bio->bi_crypt_context; | |
365 | ||
366 | return fscrypt_mergeable_bio(bio, inode, next_lblk); | |
367 | } | |
368 | EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); |