]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ext4/crypto.c
Merge remote-tracking branches 'regmap/topic/mmio', 'regmap/topic/rbtree' and 'regmap...
[mirror_ubuntu-artful-kernel.git] / fs / ext4 / crypto.c
1 /*
2 * linux/fs/ext4/crypto.c
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains encryption functions for ext4
7 *
8 * Written by Michael Halcrow, 2014.
9 *
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 *
15 * This has not yet undergone a rigorous security audit.
16 *
17 * The usage of AES-XTS should conform to recommendations in NIST
18 * Special Publication 800-38E and IEEE P1619/D16.
19 */
20
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <keys/user-type.h>
24 #include <keys/encrypted-type.h>
25 #include <linux/crypto.h>
26 #include <linux/ecryptfs.h>
27 #include <linux/gfp.h>
28 #include <linux/kernel.h>
29 #include <linux/key.h>
30 #include <linux/list.h>
31 #include <linux/mempool.h>
32 #include <linux/module.h>
33 #include <linux/mutex.h>
34 #include <linux/random.h>
35 #include <linux/scatterlist.h>
36 #include <linux/spinlock_types.h>
37
38 #include "ext4_extents.h"
39 #include "xattr.h"
40
41 /* Encryption added and removed here! (L: */
42
43 static unsigned int num_prealloc_crypto_pages = 32;
44 static unsigned int num_prealloc_crypto_ctxs = 128;
45
46 module_param(num_prealloc_crypto_pages, uint, 0444);
47 MODULE_PARM_DESC(num_prealloc_crypto_pages,
48 "Number of crypto pages to preallocate");
49 module_param(num_prealloc_crypto_ctxs, uint, 0444);
50 MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
51 "Number of crypto contexts to preallocate");
52
53 static mempool_t *ext4_bounce_page_pool;
54
55 static LIST_HEAD(ext4_free_crypto_ctxs);
56 static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
57
58 static struct kmem_cache *ext4_crypto_ctx_cachep;
59 struct kmem_cache *ext4_crypt_info_cachep;
60
61 /**
62 * ext4_release_crypto_ctx() - Releases an encryption context
63 * @ctx: The encryption context to release.
64 *
65 * If the encryption context was allocated from the pre-allocated pool, returns
66 * it to that pool. Else, frees it.
67 *
68 * If there's a bounce page in the context, this frees that.
69 */
70 void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
71 {
72 unsigned long flags;
73
74 if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
75 mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
76 ctx->w.bounce_page = NULL;
77 ctx->w.control_page = NULL;
78 if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
79 kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
80 } else {
81 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
82 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
83 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
84 }
85 }
86
87 /**
88 * ext4_get_crypto_ctx() - Gets an encryption context
89 * @inode: The inode for which we are doing the crypto
90 *
91 * Allocates and initializes an encryption context.
92 *
93 * Return: An allocated and initialized encryption context on success; error
94 * value or NULL otherwise.
95 */
96 struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
97 {
98 struct ext4_crypto_ctx *ctx = NULL;
99 int res = 0;
100 unsigned long flags;
101 struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
102
103 if (ci == NULL)
104 return ERR_PTR(-ENOKEY);
105
106 /*
107 * We first try getting the ctx from a free list because in
108 * the common case the ctx will have an allocated and
109 * initialized crypto tfm, so it's probably a worthwhile
110 * optimization. For the bounce page, we first try getting it
111 * from the kernel allocator because that's just about as fast
112 * as getting it from a list and because a cache of free pages
113 * should generally be a "last resort" option for a filesystem
114 * to be able to do its job.
115 */
116 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
117 ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
118 struct ext4_crypto_ctx, free_list);
119 if (ctx)
120 list_del(&ctx->free_list);
121 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
122 if (!ctx) {
123 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
124 if (!ctx) {
125 res = -ENOMEM;
126 goto out;
127 }
128 ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
129 } else {
130 ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
131 }
132 ctx->flags &= ~EXT4_WRITE_PATH_FL;
133
134 out:
135 if (res) {
136 if (!IS_ERR_OR_NULL(ctx))
137 ext4_release_crypto_ctx(ctx);
138 ctx = ERR_PTR(res);
139 }
140 return ctx;
141 }
142
143 struct workqueue_struct *ext4_read_workqueue;
144 static DEFINE_MUTEX(crypto_init);
145
146 /**
147 * ext4_exit_crypto() - Shutdown the ext4 encryption system
148 */
149 void ext4_exit_crypto(void)
150 {
151 struct ext4_crypto_ctx *pos, *n;
152
153 list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
154 kmem_cache_free(ext4_crypto_ctx_cachep, pos);
155 INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
156 if (ext4_bounce_page_pool)
157 mempool_destroy(ext4_bounce_page_pool);
158 ext4_bounce_page_pool = NULL;
159 if (ext4_read_workqueue)
160 destroy_workqueue(ext4_read_workqueue);
161 ext4_read_workqueue = NULL;
162 if (ext4_crypto_ctx_cachep)
163 kmem_cache_destroy(ext4_crypto_ctx_cachep);
164 ext4_crypto_ctx_cachep = NULL;
165 if (ext4_crypt_info_cachep)
166 kmem_cache_destroy(ext4_crypt_info_cachep);
167 ext4_crypt_info_cachep = NULL;
168 }
169
170 /**
171 * ext4_init_crypto() - Set up for ext4 encryption.
172 *
173 * We only call this when we start accessing encrypted files, since it
174 * results in memory getting allocated that wouldn't otherwise be used.
175 *
176 * Return: Zero on success, non-zero otherwise.
177 */
178 int ext4_init_crypto(void)
179 {
180 int i, res = -ENOMEM;
181
182 mutex_lock(&crypto_init);
183 if (ext4_read_workqueue)
184 goto already_initialized;
185 ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
186 if (!ext4_read_workqueue)
187 goto fail;
188
189 ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
190 SLAB_RECLAIM_ACCOUNT);
191 if (!ext4_crypto_ctx_cachep)
192 goto fail;
193
194 ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
195 SLAB_RECLAIM_ACCOUNT);
196 if (!ext4_crypt_info_cachep)
197 goto fail;
198
199 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
200 struct ext4_crypto_ctx *ctx;
201
202 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
203 if (!ctx) {
204 res = -ENOMEM;
205 goto fail;
206 }
207 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
208 }
209
210 ext4_bounce_page_pool =
211 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
212 if (!ext4_bounce_page_pool) {
213 res = -ENOMEM;
214 goto fail;
215 }
216 already_initialized:
217 mutex_unlock(&crypto_init);
218 return 0;
219 fail:
220 ext4_exit_crypto();
221 mutex_unlock(&crypto_init);
222 return res;
223 }
224
225 void ext4_restore_control_page(struct page *data_page)
226 {
227 struct ext4_crypto_ctx *ctx =
228 (struct ext4_crypto_ctx *)page_private(data_page);
229
230 set_page_private(data_page, (unsigned long)NULL);
231 ClearPagePrivate(data_page);
232 unlock_page(data_page);
233 ext4_release_crypto_ctx(ctx);
234 }
235
236 /**
237 * ext4_crypt_complete() - The completion callback for page encryption
238 * @req: The asynchronous encryption request context
239 * @res: The result of the encryption operation
240 */
241 static void ext4_crypt_complete(struct crypto_async_request *req, int res)
242 {
243 struct ext4_completion_result *ecr = req->data;
244
245 if (res == -EINPROGRESS)
246 return;
247 ecr->res = res;
248 complete(&ecr->completion);
249 }
250
251 typedef enum {
252 EXT4_DECRYPT = 0,
253 EXT4_ENCRYPT,
254 } ext4_direction_t;
255
256 static int ext4_page_crypto(struct inode *inode,
257 ext4_direction_t rw,
258 pgoff_t index,
259 struct page *src_page,
260 struct page *dest_page)
261
262 {
263 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
264 struct ablkcipher_request *req = NULL;
265 DECLARE_EXT4_COMPLETION_RESULT(ecr);
266 struct scatterlist dst, src;
267 struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
268 struct crypto_ablkcipher *tfm = ci->ci_ctfm;
269 int res = 0;
270
271 req = ablkcipher_request_alloc(tfm, GFP_NOFS);
272 if (!req) {
273 printk_ratelimited(KERN_ERR
274 "%s: crypto_request_alloc() failed\n",
275 __func__);
276 return -ENOMEM;
277 }
278 ablkcipher_request_set_callback(
279 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
280 ext4_crypt_complete, &ecr);
281
282 BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
283 memcpy(xts_tweak, &index, sizeof(index));
284 memset(&xts_tweak[sizeof(index)], 0,
285 EXT4_XTS_TWEAK_SIZE - sizeof(index));
286
287 sg_init_table(&dst, 1);
288 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
289 sg_init_table(&src, 1);
290 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
291 ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
292 xts_tweak);
293 if (rw == EXT4_DECRYPT)
294 res = crypto_ablkcipher_decrypt(req);
295 else
296 res = crypto_ablkcipher_encrypt(req);
297 if (res == -EINPROGRESS || res == -EBUSY) {
298 wait_for_completion(&ecr.completion);
299 res = ecr.res;
300 }
301 ablkcipher_request_free(req);
302 if (res) {
303 printk_ratelimited(
304 KERN_ERR
305 "%s: crypto_ablkcipher_encrypt() returned %d\n",
306 __func__, res);
307 return res;
308 }
309 return 0;
310 }
311
312 static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
313 {
314 ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
315 if (ctx->w.bounce_page == NULL)
316 return ERR_PTR(-ENOMEM);
317 ctx->flags |= EXT4_WRITE_PATH_FL;
318 return ctx->w.bounce_page;
319 }
320
321 /**
322 * ext4_encrypt() - Encrypts a page
323 * @inode: The inode for which the encryption should take place
324 * @plaintext_page: The page to encrypt. Must be locked.
325 *
326 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
327 * encryption context.
328 *
329 * Called on the page write path. The caller must call
330 * ext4_restore_control_page() on the returned ciphertext page to
331 * release the bounce buffer and the encryption context.
332 *
333 * Return: An allocated page with the encrypted content on success. Else, an
334 * error value or NULL.
335 */
336 struct page *ext4_encrypt(struct inode *inode,
337 struct page *plaintext_page)
338 {
339 struct ext4_crypto_ctx *ctx;
340 struct page *ciphertext_page = NULL;
341 int err;
342
343 BUG_ON(!PageLocked(plaintext_page));
344
345 ctx = ext4_get_crypto_ctx(inode);
346 if (IS_ERR(ctx))
347 return (struct page *) ctx;
348
349 /* The encryption operation will require a bounce page. */
350 ciphertext_page = alloc_bounce_page(ctx);
351 if (IS_ERR(ciphertext_page))
352 goto errout;
353 ctx->w.control_page = plaintext_page;
354 err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
355 plaintext_page, ciphertext_page);
356 if (err) {
357 ciphertext_page = ERR_PTR(err);
358 errout:
359 ext4_release_crypto_ctx(ctx);
360 return ciphertext_page;
361 }
362 SetPagePrivate(ciphertext_page);
363 set_page_private(ciphertext_page, (unsigned long)ctx);
364 lock_page(ciphertext_page);
365 return ciphertext_page;
366 }
367
368 /**
369 * ext4_decrypt() - Decrypts a page in-place
370 * @ctx: The encryption context.
371 * @page: The page to decrypt. Must be locked.
372 *
373 * Decrypts page in-place using the ctx encryption context.
374 *
375 * Called from the read completion callback.
376 *
377 * Return: Zero on success, non-zero otherwise.
378 */
379 int ext4_decrypt(struct page *page)
380 {
381 BUG_ON(!PageLocked(page));
382
383 return ext4_page_crypto(page->mapping->host,
384 EXT4_DECRYPT, page->index, page, page);
385 }
386
387 int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
388 {
389 struct ext4_crypto_ctx *ctx;
390 struct page *ciphertext_page = NULL;
391 struct bio *bio;
392 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
393 ext4_fsblk_t pblk = ext4_ext_pblock(ex);
394 unsigned int len = ext4_ext_get_actual_len(ex);
395 int ret, err = 0;
396
397 #if 0
398 ext4_msg(inode->i_sb, KERN_CRIT,
399 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
400 (unsigned long) inode->i_ino, lblk, len);
401 #endif
402
403 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
404
405 ctx = ext4_get_crypto_ctx(inode);
406 if (IS_ERR(ctx))
407 return PTR_ERR(ctx);
408
409 ciphertext_page = alloc_bounce_page(ctx);
410 if (IS_ERR(ciphertext_page)) {
411 err = PTR_ERR(ciphertext_page);
412 goto errout;
413 }
414
415 while (len--) {
416 err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
417 ZERO_PAGE(0), ciphertext_page);
418 if (err)
419 goto errout;
420
421 bio = bio_alloc(GFP_KERNEL, 1);
422 if (!bio) {
423 err = -ENOMEM;
424 goto errout;
425 }
426 bio->bi_bdev = inode->i_sb->s_bdev;
427 bio->bi_iter.bi_sector =
428 pblk << (inode->i_sb->s_blocksize_bits - 9);
429 ret = bio_add_page(bio, ciphertext_page,
430 inode->i_sb->s_blocksize, 0);
431 if (ret != inode->i_sb->s_blocksize) {
432 /* should never happen! */
433 ext4_msg(inode->i_sb, KERN_ERR,
434 "bio_add_page failed: %d", ret);
435 WARN_ON(1);
436 bio_put(bio);
437 err = -EIO;
438 goto errout;
439 }
440 err = submit_bio_wait(WRITE, bio);
441 if ((err == 0) && bio->bi_error)
442 err = -EIO;
443 bio_put(bio);
444 if (err)
445 goto errout;
446 lblk++; pblk++;
447 }
448 err = 0;
449 errout:
450 ext4_release_crypto_ctx(ctx);
451 return err;
452 }
453
454 bool ext4_valid_contents_enc_mode(uint32_t mode)
455 {
456 return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
457 }
458
459 /**
460 * ext4_validate_encryption_key_size() - Validate the encryption key size
461 * @mode: The key mode.
462 * @size: The key size to validate.
463 *
464 * Return: The validated key size for @mode. Zero if invalid.
465 */
466 uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
467 {
468 if (size == ext4_encryption_key_size(mode))
469 return size;
470 return 0;
471 }