]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - security/integrity/ima/ima_crypto.c
ima: add missing '__init' keywords
[mirror_ubuntu-bionic-kernel.git] / security / integrity / ima / ima_crypto.c
1 /*
2 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
3 *
4 * Authors:
5 * Mimi Zohar <zohar@us.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, version 2 of the License.
11 *
12 * File: ima_crypto.c
13 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/kernel.h>
19 #include <linux/moduleparam.h>
20 #include <linux/ratelimit.h>
21 #include <linux/file.h>
22 #include <linux/crypto.h>
23 #include <linux/scatterlist.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <crypto/hash.h>
27 #include <crypto/hash_info.h>
28 #include "ima.h"
29
30 struct ahash_completion {
31 struct completion completion;
32 int err;
33 };
34
35 /* minimum file size for ahash use */
36 static unsigned long ima_ahash_minsize;
37 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
38 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
39
40 /* default is 0 - 1 page. */
41 static int ima_maxorder;
42 static unsigned int ima_bufsize = PAGE_SIZE;
43
44 static int param_set_bufsize(const char *val, const struct kernel_param *kp)
45 {
46 unsigned long long size;
47 int order;
48
49 size = memparse(val, NULL);
50 order = get_order(size);
51 if (order >= MAX_ORDER)
52 return -EINVAL;
53 ima_maxorder = order;
54 ima_bufsize = PAGE_SIZE << order;
55 return 0;
56 }
57
58 static struct kernel_param_ops param_ops_bufsize = {
59 .set = param_set_bufsize,
60 .get = param_get_uint,
61 };
62 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
63
64 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
65 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
66
67 static struct crypto_shash *ima_shash_tfm;
68 static struct crypto_ahash *ima_ahash_tfm;
69
70 /**
71 * ima_kernel_read - read file content
72 *
73 * This is a function for reading file content instead of kernel_read().
74 * It does not perform locking checks to ensure it cannot be blocked.
75 * It does not perform security checks because it is irrelevant for IMA.
76 *
77 */
78 static int ima_kernel_read(struct file *file, loff_t offset,
79 char *addr, unsigned long count)
80 {
81 mm_segment_t old_fs;
82 char __user *buf = addr;
83 ssize_t ret = -EINVAL;
84
85 if (!(file->f_mode & FMODE_READ))
86 return -EBADF;
87
88 old_fs = get_fs();
89 set_fs(get_ds());
90 if (file->f_op->read)
91 ret = file->f_op->read(file, buf, count, &offset);
92 else if (file->f_op->aio_read)
93 ret = do_sync_read(file, buf, count, &offset);
94 else if (file->f_op->read_iter)
95 ret = new_sync_read(file, buf, count, &offset);
96 set_fs(old_fs);
97 return ret;
98 }
99
100 int __init ima_init_crypto(void)
101 {
102 long rc;
103
104 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
105 if (IS_ERR(ima_shash_tfm)) {
106 rc = PTR_ERR(ima_shash_tfm);
107 pr_err("Can not allocate %s (reason: %ld)\n",
108 hash_algo_name[ima_hash_algo], rc);
109 return rc;
110 }
111 return 0;
112 }
113
114 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
115 {
116 struct crypto_shash *tfm = ima_shash_tfm;
117 int rc;
118
119 if (algo < 0 || algo >= HASH_ALGO__LAST)
120 algo = ima_hash_algo;
121
122 if (algo != ima_hash_algo) {
123 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
124 if (IS_ERR(tfm)) {
125 rc = PTR_ERR(tfm);
126 pr_err("Can not allocate %s (reason: %d)\n",
127 hash_algo_name[algo], rc);
128 }
129 }
130 return tfm;
131 }
132
133 static void ima_free_tfm(struct crypto_shash *tfm)
134 {
135 if (tfm != ima_shash_tfm)
136 crypto_free_shash(tfm);
137 }
138
139 /**
140 * ima_alloc_pages() - Allocate contiguous pages.
141 * @max_size: Maximum amount of memory to allocate.
142 * @allocated_size: Returned size of actual allocation.
143 * @last_warn: Should the min_size allocation warn or not.
144 *
145 * Tries to do opportunistic allocation for memory first trying to allocate
146 * max_size amount of memory and then splitting that until zero order is
147 * reached. Allocation is tried without generating allocation warnings unless
148 * last_warn is set. Last_warn set affects only last allocation of zero order.
149 *
150 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
151 *
152 * Return pointer to allocated memory, or NULL on failure.
153 */
154 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
155 int last_warn)
156 {
157 void *ptr;
158 int order = ima_maxorder;
159 gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY;
160
161 if (order)
162 order = min(get_order(max_size), order);
163
164 for (; order; order--) {
165 ptr = (void *)__get_free_pages(gfp_mask, order);
166 if (ptr) {
167 *allocated_size = PAGE_SIZE << order;
168 return ptr;
169 }
170 }
171
172 /* order is zero - one page */
173
174 gfp_mask = GFP_KERNEL;
175
176 if (!last_warn)
177 gfp_mask |= __GFP_NOWARN;
178
179 ptr = (void *)__get_free_pages(gfp_mask, 0);
180 if (ptr) {
181 *allocated_size = PAGE_SIZE;
182 return ptr;
183 }
184
185 *allocated_size = 0;
186 return NULL;
187 }
188
189 /**
190 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
191 * @ptr: Pointer to allocated pages.
192 * @size: Size of allocated buffer.
193 */
194 static void ima_free_pages(void *ptr, size_t size)
195 {
196 if (!ptr)
197 return;
198 free_pages((unsigned long)ptr, get_order(size));
199 }
200
201 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
202 {
203 struct crypto_ahash *tfm = ima_ahash_tfm;
204 int rc;
205
206 if (algo < 0 || algo >= HASH_ALGO__LAST)
207 algo = ima_hash_algo;
208
209 if (algo != ima_hash_algo || !tfm) {
210 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
211 if (!IS_ERR(tfm)) {
212 if (algo == ima_hash_algo)
213 ima_ahash_tfm = tfm;
214 } else {
215 rc = PTR_ERR(tfm);
216 pr_err("Can not allocate %s (reason: %d)\n",
217 hash_algo_name[algo], rc);
218 }
219 }
220 return tfm;
221 }
222
223 static void ima_free_atfm(struct crypto_ahash *tfm)
224 {
225 if (tfm != ima_ahash_tfm)
226 crypto_free_ahash(tfm);
227 }
228
229 static void ahash_complete(struct crypto_async_request *req, int err)
230 {
231 struct ahash_completion *res = req->data;
232
233 if (err == -EINPROGRESS)
234 return;
235 res->err = err;
236 complete(&res->completion);
237 }
238
239 static int ahash_wait(int err, struct ahash_completion *res)
240 {
241 switch (err) {
242 case 0:
243 break;
244 case -EINPROGRESS:
245 case -EBUSY:
246 wait_for_completion(&res->completion);
247 reinit_completion(&res->completion);
248 err = res->err;
249 /* fall through */
250 default:
251 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
252 }
253
254 return err;
255 }
256
257 static int ima_calc_file_hash_atfm(struct file *file,
258 struct ima_digest_data *hash,
259 struct crypto_ahash *tfm)
260 {
261 loff_t i_size, offset;
262 char *rbuf[2] = { NULL, };
263 int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
264 struct ahash_request *req;
265 struct scatterlist sg[1];
266 struct ahash_completion res;
267 size_t rbuf_size[2];
268
269 hash->length = crypto_ahash_digestsize(tfm);
270
271 req = ahash_request_alloc(tfm, GFP_KERNEL);
272 if (!req)
273 return -ENOMEM;
274
275 init_completion(&res.completion);
276 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
277 CRYPTO_TFM_REQ_MAY_SLEEP,
278 ahash_complete, &res);
279
280 rc = ahash_wait(crypto_ahash_init(req), &res);
281 if (rc)
282 goto out1;
283
284 i_size = i_size_read(file_inode(file));
285
286 if (i_size == 0)
287 goto out2;
288
289 /*
290 * Try to allocate maximum size of memory.
291 * Fail if even a single page cannot be allocated.
292 */
293 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
294 if (!rbuf[0]) {
295 rc = -ENOMEM;
296 goto out1;
297 }
298
299 /* Only allocate one buffer if that is enough. */
300 if (i_size > rbuf_size[0]) {
301 /*
302 * Try to allocate secondary buffer. If that fails fallback to
303 * using single buffering. Use previous memory allocation size
304 * as baseline for possible allocation size.
305 */
306 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
307 &rbuf_size[1], 0);
308 }
309
310 if (!(file->f_mode & FMODE_READ)) {
311 file->f_mode |= FMODE_READ;
312 read = 1;
313 }
314
315 for (offset = 0; offset < i_size; offset += rbuf_len) {
316 if (!rbuf[1] && offset) {
317 /* Not using two buffers, and it is not the first
318 * read/request, wait for the completion of the
319 * previous ahash_update() request.
320 */
321 rc = ahash_wait(ahash_rc, &res);
322 if (rc)
323 goto out3;
324 }
325 /* read buffer */
326 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
327 rc = ima_kernel_read(file, offset, rbuf[active], rbuf_len);
328 if (rc != rbuf_len)
329 goto out3;
330
331 if (rbuf[1] && offset) {
332 /* Using two buffers, and it is not the first
333 * read/request, wait for the completion of the
334 * previous ahash_update() request.
335 */
336 rc = ahash_wait(ahash_rc, &res);
337 if (rc)
338 goto out3;
339 }
340
341 sg_init_one(&sg[0], rbuf[active], rbuf_len);
342 ahash_request_set_crypt(req, sg, NULL, rbuf_len);
343
344 ahash_rc = crypto_ahash_update(req);
345
346 if (rbuf[1])
347 active = !active; /* swap buffers, if we use two */
348 }
349 /* wait for the last update request to complete */
350 rc = ahash_wait(ahash_rc, &res);
351 out3:
352 if (read)
353 file->f_mode &= ~FMODE_READ;
354 ima_free_pages(rbuf[0], rbuf_size[0]);
355 ima_free_pages(rbuf[1], rbuf_size[1]);
356 out2:
357 if (!rc) {
358 ahash_request_set_crypt(req, NULL, hash->digest, 0);
359 rc = ahash_wait(crypto_ahash_final(req), &res);
360 }
361 out1:
362 ahash_request_free(req);
363 return rc;
364 }
365
366 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
367 {
368 struct crypto_ahash *tfm;
369 int rc;
370
371 tfm = ima_alloc_atfm(hash->algo);
372 if (IS_ERR(tfm))
373 return PTR_ERR(tfm);
374
375 rc = ima_calc_file_hash_atfm(file, hash, tfm);
376
377 ima_free_atfm(tfm);
378
379 return rc;
380 }
381
382 static int ima_calc_file_hash_tfm(struct file *file,
383 struct ima_digest_data *hash,
384 struct crypto_shash *tfm)
385 {
386 loff_t i_size, offset = 0;
387 char *rbuf;
388 int rc, read = 0;
389 struct {
390 struct shash_desc shash;
391 char ctx[crypto_shash_descsize(tfm)];
392 } desc;
393
394 desc.shash.tfm = tfm;
395 desc.shash.flags = 0;
396
397 hash->length = crypto_shash_digestsize(tfm);
398
399 rc = crypto_shash_init(&desc.shash);
400 if (rc != 0)
401 return rc;
402
403 i_size = i_size_read(file_inode(file));
404
405 if (i_size == 0)
406 goto out;
407
408 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
409 if (!rbuf)
410 return -ENOMEM;
411
412 if (!(file->f_mode & FMODE_READ)) {
413 file->f_mode |= FMODE_READ;
414 read = 1;
415 }
416
417 while (offset < i_size) {
418 int rbuf_len;
419
420 rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
421 if (rbuf_len < 0) {
422 rc = rbuf_len;
423 break;
424 }
425 if (rbuf_len == 0)
426 break;
427 offset += rbuf_len;
428
429 rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len);
430 if (rc)
431 break;
432 }
433 if (read)
434 file->f_mode &= ~FMODE_READ;
435 kfree(rbuf);
436 out:
437 if (!rc)
438 rc = crypto_shash_final(&desc.shash, hash->digest);
439 return rc;
440 }
441
442 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
443 {
444 struct crypto_shash *tfm;
445 int rc;
446
447 tfm = ima_alloc_tfm(hash->algo);
448 if (IS_ERR(tfm))
449 return PTR_ERR(tfm);
450
451 rc = ima_calc_file_hash_tfm(file, hash, tfm);
452
453 ima_free_tfm(tfm);
454
455 return rc;
456 }
457
458 /*
459 * ima_calc_file_hash - calculate file hash
460 *
461 * Asynchronous hash (ahash) allows using HW acceleration for calculating
462 * a hash. ahash performance varies for different data sizes on different
463 * crypto accelerators. shash performance might be better for smaller files.
464 * The 'ima.ahash_minsize' module parameter allows specifying the best
465 * minimum file size for using ahash on the system.
466 *
467 * If the ima.ahash_minsize parameter is not specified, this function uses
468 * shash for the hash calculation. If ahash fails, it falls back to using
469 * shash.
470 */
471 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
472 {
473 loff_t i_size;
474 int rc;
475
476 i_size = i_size_read(file_inode(file));
477
478 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
479 rc = ima_calc_file_ahash(file, hash);
480 if (!rc)
481 return 0;
482 }
483
484 return ima_calc_file_shash(file, hash);
485 }
486
487 /*
488 * Calculate the hash of template data
489 */
490 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
491 struct ima_template_desc *td,
492 int num_fields,
493 struct ima_digest_data *hash,
494 struct crypto_shash *tfm)
495 {
496 struct {
497 struct shash_desc shash;
498 char ctx[crypto_shash_descsize(tfm)];
499 } desc;
500 int rc, i;
501
502 desc.shash.tfm = tfm;
503 desc.shash.flags = 0;
504
505 hash->length = crypto_shash_digestsize(tfm);
506
507 rc = crypto_shash_init(&desc.shash);
508 if (rc != 0)
509 return rc;
510
511 for (i = 0; i < num_fields; i++) {
512 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
513 u8 *data_to_hash = field_data[i].data;
514 u32 datalen = field_data[i].len;
515
516 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
517 rc = crypto_shash_update(&desc.shash,
518 (const u8 *) &field_data[i].len,
519 sizeof(field_data[i].len));
520 if (rc)
521 break;
522 } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
523 memcpy(buffer, data_to_hash, datalen);
524 data_to_hash = buffer;
525 datalen = IMA_EVENT_NAME_LEN_MAX + 1;
526 }
527 rc = crypto_shash_update(&desc.shash, data_to_hash, datalen);
528 if (rc)
529 break;
530 }
531
532 if (!rc)
533 rc = crypto_shash_final(&desc.shash, hash->digest);
534
535 return rc;
536 }
537
538 int ima_calc_field_array_hash(struct ima_field_data *field_data,
539 struct ima_template_desc *desc, int num_fields,
540 struct ima_digest_data *hash)
541 {
542 struct crypto_shash *tfm;
543 int rc;
544
545 tfm = ima_alloc_tfm(hash->algo);
546 if (IS_ERR(tfm))
547 return PTR_ERR(tfm);
548
549 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
550 hash, tfm);
551
552 ima_free_tfm(tfm);
553
554 return rc;
555 }
556
557 static void __init ima_pcrread(int idx, u8 *pcr)
558 {
559 if (!ima_used_chip)
560 return;
561
562 if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
563 pr_err("Error Communicating to TPM chip\n");
564 }
565
566 /*
567 * Calculate the boot aggregate hash
568 */
569 static int __init ima_calc_boot_aggregate_tfm(char *digest,
570 struct crypto_shash *tfm)
571 {
572 u8 pcr_i[TPM_DIGEST_SIZE];
573 int rc, i;
574 struct {
575 struct shash_desc shash;
576 char ctx[crypto_shash_descsize(tfm)];
577 } desc;
578
579 desc.shash.tfm = tfm;
580 desc.shash.flags = 0;
581
582 rc = crypto_shash_init(&desc.shash);
583 if (rc != 0)
584 return rc;
585
586 /* cumulative sha1 over tpm registers 0-7 */
587 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
588 ima_pcrread(i, pcr_i);
589 /* now accumulate with current aggregate */
590 rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE);
591 }
592 if (!rc)
593 crypto_shash_final(&desc.shash, digest);
594 return rc;
595 }
596
597 int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
598 {
599 struct crypto_shash *tfm;
600 int rc;
601
602 tfm = ima_alloc_tfm(hash->algo);
603 if (IS_ERR(tfm))
604 return PTR_ERR(tfm);
605
606 hash->length = crypto_shash_digestsize(tfm);
607 rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
608
609 ima_free_tfm(tfm);
610
611 return rc;
612 }