]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - crypto/echainiv.c
crypto: echainiv - Copy AD along with plain text
[mirror_ubuntu-hirsute-kernel.git] / crypto / echainiv.c
CommitLineData
a10f554f
HX
1/*
2 * echainiv: Encrypted Chain IV Generator
3 *
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
8 *
9 * This generator can only be used by algorithms where authentication
10 * is performed after encryption (i.e., authenc).
11 *
12 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 */
20
21#include <crypto/internal/aead.h>
22#include <crypto/null.h>
23#include <crypto/rng.h>
24#include <crypto/scatterwalk.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
31#include <linux/spinlock.h>
32#include <linux/string.h>
33
34#define MAX_IV_SIZE 16
35
36struct echainiv_request_ctx {
37 struct scatterlist src[2];
38 struct scatterlist dst[2];
39 struct scatterlist ivbuf[2];
40 struct scatterlist *ivsg;
41 struct aead_givcrypt_request subreq;
42};
43
44struct echainiv_ctx {
45 struct crypto_aead *child;
46 spinlock_t lock;
47 struct crypto_blkcipher *null;
48 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
49};
50
51static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
52
53static int echainiv_setkey(struct crypto_aead *tfm,
54 const u8 *key, unsigned int keylen)
55{
56 struct echainiv_ctx *ctx = crypto_aead_ctx(tfm);
57
58 return crypto_aead_setkey(ctx->child, key, keylen);
59}
60
61static int echainiv_setauthsize(struct crypto_aead *tfm,
62 unsigned int authsize)
63{
64 struct echainiv_ctx *ctx = crypto_aead_ctx(tfm);
65
66 return crypto_aead_setauthsize(ctx->child, authsize);
67}
68
69/* We don't care if we get preempted and read/write IVs from the next CPU. */
622ff875 70static void echainiv_read_iv(u8 *dst, unsigned size)
a10f554f
HX
71{
72 u32 *a = (u32 *)dst;
73 u32 __percpu *b = echainiv_iv;
74
75 for (; size >= 4; size -= 4) {
76 *a++ = this_cpu_read(*b);
77 b++;
78 }
79}
80
622ff875 81static void echainiv_write_iv(const u8 *src, unsigned size)
a10f554f
HX
82{
83 const u32 *a = (const u32 *)src;
84 u32 __percpu *b = echainiv_iv;
85
86 for (; size >= 4; size -= 4) {
87 this_cpu_write(*b, *a);
88 a++;
89 b++;
90 }
91}
92
93static void echainiv_encrypt_compat_complete2(struct aead_request *req,
94 int err)
95{
96 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
97 struct aead_givcrypt_request *subreq = &rctx->subreq;
98 struct crypto_aead *geniv;
99
100 if (err == -EINPROGRESS)
101 return;
102
103 if (err)
104 goto out;
105
106 geniv = crypto_aead_reqtfm(req);
107 scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
108 crypto_aead_ivsize(geniv), 1);
109
110out:
111 kzfree(subreq->giv);
112}
113
114static void echainiv_encrypt_compat_complete(
115 struct crypto_async_request *base, int err)
116{
117 struct aead_request *req = base->data;
118
119 echainiv_encrypt_compat_complete2(req, err);
120 aead_request_complete(req, err);
121}
122
123static void echainiv_encrypt_complete2(struct aead_request *req, int err)
124{
125 struct aead_request *subreq = aead_request_ctx(req);
126 struct crypto_aead *geniv;
127 unsigned int ivsize;
128
129 if (err == -EINPROGRESS)
130 return;
131
132 if (err)
133 goto out;
134
135 geniv = crypto_aead_reqtfm(req);
136 ivsize = crypto_aead_ivsize(geniv);
137
138 echainiv_write_iv(subreq->iv, ivsize);
139
140 if (req->iv != subreq->iv)
141 memcpy(req->iv, subreq->iv, ivsize);
142
143out:
144 if (req->iv != subreq->iv)
145 kzfree(subreq->iv);
146}
147
148static void echainiv_encrypt_complete(struct crypto_async_request *base,
149 int err)
150{
151 struct aead_request *req = base->data;
152
153 echainiv_encrypt_complete2(req, err);
154 aead_request_complete(req, err);
155}
156
157static int echainiv_encrypt_compat(struct aead_request *req)
158{
159 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
160 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
161 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
162 struct aead_givcrypt_request *subreq = &rctx->subreq;
163 unsigned int ivsize = crypto_aead_ivsize(geniv);
164 crypto_completion_t compl;
165 void *data;
166 u8 *info;
167 __be64 seq;
168 int err;
169
823655c9
HX
170 if (req->cryptlen < ivsize)
171 return -EINVAL;
172
a10f554f
HX
173 compl = req->base.complete;
174 data = req->base.data;
175
176 rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
177 info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
178
179 if (!info) {
180 info = kmalloc(ivsize, req->base.flags &
181 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
182 GFP_ATOMIC);
183 if (!info)
184 return -ENOMEM;
185
186 compl = echainiv_encrypt_compat_complete;
187 data = req;
188 }
189
190 memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
191
192 aead_givcrypt_set_tfm(subreq, ctx->child);
193 aead_givcrypt_set_callback(subreq, req->base.flags,
194 req->base.complete, req->base.data);
195 aead_givcrypt_set_crypt(subreq,
196 scatterwalk_ffwd(rctx->src, req->src,
197 req->assoclen + ivsize),
198 scatterwalk_ffwd(rctx->dst, rctx->ivsg,
199 ivsize),
200 req->cryptlen - ivsize, req->iv);
201 aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
202 aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
203
204 err = crypto_aead_givencrypt(subreq);
205 if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
206 echainiv_encrypt_compat_complete2(req, err);
207 return err;
208}
209
210static int echainiv_encrypt(struct aead_request *req)
211{
212 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
213 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
214 struct aead_request *subreq = aead_request_ctx(req);
215 crypto_completion_t compl;
216 void *data;
217 u8 *info;
823655c9 218 unsigned int ivsize = crypto_aead_ivsize(geniv);
a10f554f
HX
219 int err;
220
823655c9
HX
221 if (req->cryptlen < ivsize)
222 return -EINVAL;
223
a10f554f
HX
224 aead_request_set_tfm(subreq, ctx->child);
225
226 compl = echainiv_encrypt_complete;
227 data = req;
228 info = req->iv;
229
a10f554f 230 if (req->src != req->dst) {
a10f554f
HX
231 struct blkcipher_desc desc = {
232 .tfm = ctx->null,
233 };
234
235 err = crypto_blkcipher_encrypt(
838c9d56
HX
236 &desc, req->dst, req->src,
237 req->assoclen + req->cryptlen);
a10f554f
HX
238 if (err)
239 return err;
240 }
241
242 if (unlikely(!IS_ALIGNED((unsigned long)info,
243 crypto_aead_alignmask(geniv) + 1))) {
244 info = kmalloc(ivsize, req->base.flags &
245 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
246 GFP_ATOMIC);
247 if (!info)
248 return -ENOMEM;
249
250 memcpy(info, req->iv, ivsize);
251 }
252
253 aead_request_set_callback(subreq, req->base.flags, compl, data);
254 aead_request_set_crypt(subreq, req->dst, req->dst,
255 req->cryptlen - ivsize, info);
374d4ad1 256 aead_request_set_ad(subreq, req->assoclen + ivsize);
a10f554f
HX
257
258 crypto_xor(info, ctx->salt, ivsize);
259 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
260 echainiv_read_iv(info, ivsize);
261
262 err = crypto_aead_encrypt(subreq);
263 echainiv_encrypt_complete2(req, err);
264 return err;
265}
266
267static int echainiv_decrypt_compat(struct aead_request *req)
268{
269 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
270 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
823655c9
HX
271 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
272 struct aead_request *subreq = &rctx->subreq.areq;
a10f554f
HX
273 crypto_completion_t compl;
274 void *data;
823655c9
HX
275 unsigned int ivsize = crypto_aead_ivsize(geniv);
276
277 if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
278 return -EINVAL;
a10f554f
HX
279
280 aead_request_set_tfm(subreq, ctx->child);
281
282 compl = req->base.complete;
283 data = req->base.data;
284
a10f554f 285 aead_request_set_callback(subreq, req->base.flags, compl, data);
823655c9
HX
286 aead_request_set_crypt(subreq,
287 scatterwalk_ffwd(rctx->src, req->src,
288 req->assoclen + ivsize),
289 scatterwalk_ffwd(rctx->dst, req->dst,
290 req->assoclen + ivsize),
a10f554f 291 req->cryptlen - ivsize, req->iv);
823655c9 292 aead_request_set_assoc(subreq, req->src, req->assoclen);
a10f554f
HX
293
294 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
295
296 return crypto_aead_decrypt(subreq);
297}
298
299static int echainiv_decrypt(struct aead_request *req)
300{
301 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
302 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
303 struct aead_request *subreq = aead_request_ctx(req);
304 crypto_completion_t compl;
305 void *data;
823655c9
HX
306 unsigned int ivsize = crypto_aead_ivsize(geniv);
307
308 if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
309 return -EINVAL;
a10f554f
HX
310
311 aead_request_set_tfm(subreq, ctx->child);
312
313 compl = req->base.complete;
314 data = req->base.data;
315
a10f554f
HX
316 aead_request_set_callback(subreq, req->base.flags, compl, data);
317 aead_request_set_crypt(subreq, req->src, req->dst,
318 req->cryptlen - ivsize, req->iv);
374d4ad1 319 aead_request_set_ad(subreq, req->assoclen + ivsize);
a10f554f
HX
320
321 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
322 if (req->src != req->dst)
323 scatterwalk_map_and_copy(req->iv, req->dst,
324 req->assoclen, ivsize, 1);
325
326 return crypto_aead_decrypt(subreq);
327}
328
329static int echainiv_encrypt_compat_first(struct aead_request *req)
330{
331 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
332 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
333 int err = 0;
334
335 spin_lock_bh(&ctx->lock);
336 if (geniv->encrypt != echainiv_encrypt_compat_first)
337 goto unlock;
338
339 geniv->encrypt = echainiv_encrypt_compat;
340 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
341 crypto_aead_ivsize(geniv));
342
343unlock:
344 spin_unlock_bh(&ctx->lock);
345
346 if (err)
347 return err;
348
349 return echainiv_encrypt_compat(req);
350}
351
352static int echainiv_encrypt_first(struct aead_request *req)
353{
354 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
355 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
356 int err = 0;
357
358 spin_lock_bh(&ctx->lock);
359 if (geniv->encrypt != echainiv_encrypt_first)
360 goto unlock;
361
362 geniv->encrypt = echainiv_encrypt;
363 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
364 crypto_aead_ivsize(geniv));
365
366unlock:
367 spin_unlock_bh(&ctx->lock);
368
369 if (err)
370 return err;
371
372 return echainiv_encrypt(req);
373}
374
375static int echainiv_compat_init(struct crypto_tfm *tfm)
376{
377 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
378 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
379 int err;
380
381 spin_lock_init(&ctx->lock);
382
383 crypto_aead_set_reqsize(geniv, sizeof(struct echainiv_request_ctx));
384
385 err = aead_geniv_init(tfm);
386
387 ctx->child = geniv->child;
388 geniv->child = geniv;
389
390 return err;
391}
392
393static int echainiv_init(struct crypto_tfm *tfm)
394{
395 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
396 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
397 int err;
398
399 spin_lock_init(&ctx->lock);
400
401 crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
402
403 ctx->null = crypto_get_default_null_skcipher();
404 err = PTR_ERR(ctx->null);
405 if (IS_ERR(ctx->null))
406 goto out;
407
408 err = aead_geniv_init(tfm);
409 if (err)
410 goto drop_null;
411
412 ctx->child = geniv->child;
413 geniv->child = geniv;
414
415out:
416 return err;
417
418drop_null:
419 crypto_put_default_null_skcipher();
420 goto out;
421}
422
423static void echainiv_compat_exit(struct crypto_tfm *tfm)
424{
425 struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
426
427 crypto_free_aead(ctx->child);
428}
429
430static void echainiv_exit(struct crypto_tfm *tfm)
431{
432 struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
433
434 crypto_free_aead(ctx->child);
435 crypto_put_default_null_skcipher();
436}
437
1e419c79
HX
438static int echainiv_aead_create(struct crypto_template *tmpl,
439 struct rtattr **tb)
a10f554f
HX
440{
441 struct aead_instance *inst;
442 struct crypto_aead_spawn *spawn;
443 struct aead_alg *alg;
1e419c79 444 int err;
a10f554f 445
1e419c79 446 inst = aead_geniv_alloc(tmpl, tb, 0, 0);
a10f554f
HX
447
448 if (IS_ERR(inst))
1e419c79 449 return PTR_ERR(inst);
a10f554f 450
1e419c79 451 err = -EINVAL;
a10f554f
HX
452 if (inst->alg.ivsize < sizeof(u64) ||
453 inst->alg.ivsize & (sizeof(u32) - 1) ||
1e419c79
HX
454 inst->alg.ivsize > MAX_IV_SIZE)
455 goto free_inst;
a10f554f
HX
456
457 spawn = aead_instance_ctx(inst);
458 alg = crypto_spawn_aead_alg(spawn);
459
460 inst->alg.setkey = echainiv_setkey;
461 inst->alg.setauthsize = echainiv_setauthsize;
462 inst->alg.encrypt = echainiv_encrypt_first;
463 inst->alg.decrypt = echainiv_decrypt;
464
465 inst->alg.base.cra_init = echainiv_init;
466 inst->alg.base.cra_exit = echainiv_exit;
467
468 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
469 inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
470 inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
471
472 if (alg->base.cra_aead.encrypt) {
473 inst->alg.encrypt = echainiv_encrypt_compat_first;
474 inst->alg.decrypt = echainiv_decrypt_compat;
475
476 inst->alg.base.cra_init = echainiv_compat_init;
477 inst->alg.base.cra_exit = echainiv_compat_exit;
478 }
479
1e419c79
HX
480 err = aead_register_instance(tmpl, inst);
481 if (err)
482 goto free_inst;
483
a10f554f 484out:
1e419c79
HX
485 return err;
486
487free_inst:
488 aead_geniv_free(inst);
489 goto out;
a10f554f
HX
490}
491
1e419c79 492static int echainiv_create(struct crypto_template *tmpl, struct rtattr **tb)
a10f554f 493{
a10f554f
HX
494 int err;
495
496 err = crypto_get_default_rng();
497 if (err)
1e419c79 498 goto out;
a10f554f 499
1e419c79
HX
500 err = echainiv_aead_create(tmpl, tb);
501 if (err)
a10f554f
HX
502 goto put_rng;
503
504out:
1e419c79 505 return err;
a10f554f
HX
506
507put_rng:
508 crypto_put_default_rng();
509 goto out;
510}
511
512static void echainiv_free(struct crypto_instance *inst)
513{
514 aead_geniv_free(aead_instance(inst));
515 crypto_put_default_rng();
516}
517
518static struct crypto_template echainiv_tmpl = {
519 .name = "echainiv",
1e419c79 520 .create = echainiv_create,
a10f554f
HX
521 .free = echainiv_free,
522 .module = THIS_MODULE,
523};
524
525static int __init echainiv_module_init(void)
526{
527 return crypto_register_template(&echainiv_tmpl);
528}
529
530static void __exit echainiv_module_exit(void)
531{
532 crypto_unregister_template(&echainiv_tmpl);
533}
534
535module_init(echainiv_module_init);
536module_exit(echainiv_module_exit);
537
538MODULE_LICENSE("GPL");
539MODULE_DESCRIPTION("Encrypted Chain IV Generator");
540MODULE_ALIAS_CRYPTO("echainiv");