]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - crypto/seqiv.c
nfsd: Add Jeff Layton as co-maintainer
[mirror_ubuntu-artful-kernel.git] / crypto / seqiv.c
1 /*
2 * seqiv: Sequence Number IV Generator
3 *
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt. This algorithm is mainly useful for CTR and similar modes.
6 *
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/geniv.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/null.h>
19 #include <crypto/rng.h>
20 #include <crypto/scatterwalk.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28
29 struct seqniv_request_ctx {
30 struct scatterlist dst[2];
31 struct aead_request subreq;
32 };
33
34 struct seqiv_ctx {
35 spinlock_t lock;
36 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
37 };
38
39 struct seqiv_aead_ctx {
40 /* aead_geniv_ctx must be first the element */
41 struct aead_geniv_ctx geniv;
42 struct crypto_blkcipher *null;
43 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
44 };
45
46 static void seqiv_free(struct crypto_instance *inst);
47
48 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
49 {
50 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
51 struct crypto_ablkcipher *geniv;
52
53 if (err == -EINPROGRESS)
54 return;
55
56 if (err)
57 goto out;
58
59 geniv = skcipher_givcrypt_reqtfm(req);
60 memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
61
62 out:
63 kfree(subreq->info);
64 }
65
66 static void seqiv_complete(struct crypto_async_request *base, int err)
67 {
68 struct skcipher_givcrypt_request *req = base->data;
69
70 seqiv_complete2(req, err);
71 skcipher_givcrypt_complete(req, err);
72 }
73
74 static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
75 {
76 struct aead_request *subreq = aead_givcrypt_reqctx(req);
77 struct crypto_aead *geniv;
78
79 if (err == -EINPROGRESS)
80 return;
81
82 if (err)
83 goto out;
84
85 geniv = aead_givcrypt_reqtfm(req);
86 memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
87
88 out:
89 kfree(subreq->iv);
90 }
91
92 static void seqiv_aead_complete(struct crypto_async_request *base, int err)
93 {
94 struct aead_givcrypt_request *req = base->data;
95
96 seqiv_aead_complete2(req, err);
97 aead_givcrypt_complete(req, err);
98 }
99
100 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
101 {
102 struct aead_request *subreq = aead_request_ctx(req);
103 struct crypto_aead *geniv;
104
105 if (err == -EINPROGRESS)
106 return;
107
108 if (err)
109 goto out;
110
111 geniv = crypto_aead_reqtfm(req);
112 memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv));
113
114 out:
115 kzfree(subreq->iv);
116 }
117
118 static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
119 int err)
120 {
121 struct aead_request *req = base->data;
122
123 seqiv_aead_encrypt_complete2(req, err);
124 aead_request_complete(req, err);
125 }
126
127 static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err)
128 {
129 unsigned int ivsize = 8;
130 u8 data[20];
131
132 if (err == -EINPROGRESS)
133 return;
134
135 /* Swap IV and ESP header back to correct order. */
136 scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0);
137 scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1);
138 scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1);
139 }
140
141 static void seqniv_aead_encrypt_complete(struct crypto_async_request *base,
142 int err)
143 {
144 struct aead_request *req = base->data;
145
146 seqniv_aead_encrypt_complete2(req, err);
147 aead_request_complete(req, err);
148 }
149
150 static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err)
151 {
152 u8 data[4];
153
154 if (err == -EINPROGRESS)
155 return;
156
157 /* Move ESP header back to correct location. */
158 scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0);
159 scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1);
160 }
161
162 static void seqniv_aead_decrypt_complete(struct crypto_async_request *base,
163 int err)
164 {
165 struct aead_request *req = base->data;
166
167 seqniv_aead_decrypt_complete2(req, err);
168 aead_request_complete(req, err);
169 }
170
171 static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
172 unsigned int ivsize)
173 {
174 unsigned int len = ivsize;
175
176 if (ivsize > sizeof(u64)) {
177 memset(info, 0, ivsize - sizeof(u64));
178 len = sizeof(u64);
179 }
180 seq = cpu_to_be64(seq);
181 memcpy(info + ivsize - len, &seq, len);
182 crypto_xor(info, ctx->salt, ivsize);
183 }
184
185 static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
186 {
187 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
188 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
189 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
190 crypto_completion_t compl;
191 void *data;
192 u8 *info;
193 unsigned int ivsize;
194 int err;
195
196 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
197
198 compl = req->creq.base.complete;
199 data = req->creq.base.data;
200 info = req->creq.info;
201
202 ivsize = crypto_ablkcipher_ivsize(geniv);
203
204 if (unlikely(!IS_ALIGNED((unsigned long)info,
205 crypto_ablkcipher_alignmask(geniv) + 1))) {
206 info = kmalloc(ivsize, req->creq.base.flags &
207 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
208 GFP_ATOMIC);
209 if (!info)
210 return -ENOMEM;
211
212 compl = seqiv_complete;
213 data = req;
214 }
215
216 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
217 data);
218 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
219 req->creq.nbytes, info);
220
221 seqiv_geniv(ctx, info, req->seq, ivsize);
222 memcpy(req->giv, info, ivsize);
223
224 err = crypto_ablkcipher_encrypt(subreq);
225 if (unlikely(info != req->creq.info))
226 seqiv_complete2(req, err);
227 return err;
228 }
229
230 static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
231 {
232 struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
233 struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
234 struct aead_request *areq = &req->areq;
235 struct aead_request *subreq = aead_givcrypt_reqctx(req);
236 crypto_completion_t compl;
237 void *data;
238 u8 *info;
239 unsigned int ivsize;
240 int err;
241
242 aead_request_set_tfm(subreq, aead_geniv_base(geniv));
243
244 compl = areq->base.complete;
245 data = areq->base.data;
246 info = areq->iv;
247
248 ivsize = crypto_aead_ivsize(geniv);
249
250 if (unlikely(!IS_ALIGNED((unsigned long)info,
251 crypto_aead_alignmask(geniv) + 1))) {
252 info = kmalloc(ivsize, areq->base.flags &
253 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
254 GFP_ATOMIC);
255 if (!info)
256 return -ENOMEM;
257
258 compl = seqiv_aead_complete;
259 data = req;
260 }
261
262 aead_request_set_callback(subreq, areq->base.flags, compl, data);
263 aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
264 info);
265 aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
266
267 seqiv_geniv(ctx, info, req->seq, ivsize);
268 memcpy(req->giv, info, ivsize);
269
270 err = crypto_aead_encrypt(subreq);
271 if (unlikely(info != areq->iv))
272 seqiv_aead_complete2(req, err);
273 return err;
274 }
275
276 static int seqniv_aead_encrypt(struct aead_request *req)
277 {
278 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
279 struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
280 struct seqniv_request_ctx *rctx = aead_request_ctx(req);
281 struct aead_request *subreq = &rctx->subreq;
282 struct scatterlist *dst;
283 crypto_completion_t compl;
284 void *data;
285 unsigned int ivsize = 8;
286 u8 buf[20] __attribute__ ((aligned(__alignof__(u32))));
287 int err;
288
289 if (req->cryptlen < ivsize)
290 return -EINVAL;
291
292 /* ESP AD is at most 12 bytes (ESN). */
293 if (req->assoclen > 12)
294 return -EINVAL;
295
296 aead_request_set_tfm(subreq, ctx->geniv.child);
297
298 compl = seqniv_aead_encrypt_complete;
299 data = req;
300
301 if (req->src != req->dst) {
302 struct blkcipher_desc desc = {
303 .tfm = ctx->null,
304 };
305
306 err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
307 req->assoclen + req->cryptlen);
308 if (err)
309 return err;
310 }
311
312 dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
313
314 aead_request_set_callback(subreq, req->base.flags, compl, data);
315 aead_request_set_crypt(subreq, dst, dst,
316 req->cryptlen - ivsize, req->iv);
317 aead_request_set_ad(subreq, req->assoclen);
318
319 memcpy(buf, req->iv, ivsize);
320 crypto_xor(buf, ctx->salt, ivsize);
321 memcpy(req->iv, buf, ivsize);
322
323 /* Swap order of IV and ESP AD for ICV generation. */
324 scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0);
325 scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1);
326
327 err = crypto_aead_encrypt(subreq);
328 seqniv_aead_encrypt_complete2(req, err);
329 return err;
330 }
331
332 static int seqiv_aead_encrypt(struct aead_request *req)
333 {
334 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
335 struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
336 struct aead_request *subreq = aead_request_ctx(req);
337 crypto_completion_t compl;
338 void *data;
339 u8 *info;
340 unsigned int ivsize = 8;
341 int err;
342
343 if (req->cryptlen < ivsize)
344 return -EINVAL;
345
346 aead_request_set_tfm(subreq, ctx->geniv.child);
347
348 compl = req->base.complete;
349 data = req->base.data;
350 info = req->iv;
351
352 if (req->src != req->dst) {
353 struct blkcipher_desc desc = {
354 .tfm = ctx->null,
355 };
356
357 err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
358 req->assoclen + req->cryptlen);
359 if (err)
360 return err;
361 }
362
363 if (unlikely(!IS_ALIGNED((unsigned long)info,
364 crypto_aead_alignmask(geniv) + 1))) {
365 info = kmalloc(ivsize, req->base.flags &
366 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
367 GFP_ATOMIC);
368 if (!info)
369 return -ENOMEM;
370
371 memcpy(info, req->iv, ivsize);
372 compl = seqiv_aead_encrypt_complete;
373 data = req;
374 }
375
376 aead_request_set_callback(subreq, req->base.flags, compl, data);
377 aead_request_set_crypt(subreq, req->dst, req->dst,
378 req->cryptlen - ivsize, info);
379 aead_request_set_ad(subreq, req->assoclen + ivsize);
380
381 crypto_xor(info, ctx->salt, ivsize);
382 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
383
384 err = crypto_aead_encrypt(subreq);
385 if (unlikely(info != req->iv))
386 seqiv_aead_encrypt_complete2(req, err);
387 return err;
388 }
389
390 static int seqniv_aead_decrypt(struct aead_request *req)
391 {
392 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
393 struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
394 struct seqniv_request_ctx *rctx = aead_request_ctx(req);
395 struct aead_request *subreq = &rctx->subreq;
396 struct scatterlist *dst;
397 crypto_completion_t compl;
398 void *data;
399 unsigned int ivsize = 8;
400 u8 buf[20];
401 int err;
402
403 if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
404 return -EINVAL;
405
406 aead_request_set_tfm(subreq, ctx->geniv.child);
407
408 compl = req->base.complete;
409 data = req->base.data;
410
411 if (req->assoclen > 12)
412 return -EINVAL;
413 else if (req->assoclen > 8) {
414 compl = seqniv_aead_decrypt_complete;
415 data = req;
416 }
417
418 if (req->src != req->dst) {
419 struct blkcipher_desc desc = {
420 .tfm = ctx->null,
421 };
422
423 err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
424 req->assoclen + req->cryptlen);
425 if (err)
426 return err;
427 }
428
429 /* Move ESP AD forward for ICV generation. */
430 scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0);
431 memcpy(req->iv, buf + req->assoclen, ivsize);
432 scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1);
433
434 dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
435
436 aead_request_set_callback(subreq, req->base.flags, compl, data);
437 aead_request_set_crypt(subreq, dst, dst,
438 req->cryptlen - ivsize, req->iv);
439 aead_request_set_ad(subreq, req->assoclen);
440
441 err = crypto_aead_decrypt(subreq);
442 if (req->assoclen > 8)
443 seqniv_aead_decrypt_complete2(req, err);
444 return err;
445 }
446
447 static int seqiv_aead_decrypt(struct aead_request *req)
448 {
449 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
450 struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
451 struct aead_request *subreq = aead_request_ctx(req);
452 crypto_completion_t compl;
453 void *data;
454 unsigned int ivsize = 8;
455
456 if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
457 return -EINVAL;
458
459 aead_request_set_tfm(subreq, ctx->geniv.child);
460
461 compl = req->base.complete;
462 data = req->base.data;
463
464 aead_request_set_callback(subreq, req->base.flags, compl, data);
465 aead_request_set_crypt(subreq, req->src, req->dst,
466 req->cryptlen - ivsize, req->iv);
467 aead_request_set_ad(subreq, req->assoclen + ivsize);
468
469 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
470 if (req->src != req->dst)
471 scatterwalk_map_and_copy(req->iv, req->dst,
472 req->assoclen, ivsize, 1);
473
474 return crypto_aead_decrypt(subreq);
475 }
476
477 static int seqiv_init(struct crypto_tfm *tfm)
478 {
479 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
480 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
481 int err;
482
483 spin_lock_init(&ctx->lock);
484
485 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
486
487 err = 0;
488 if (!crypto_get_default_rng()) {
489 crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
490 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
491 crypto_ablkcipher_ivsize(geniv));
492 crypto_put_default_rng();
493 }
494
495 return err ?: skcipher_geniv_init(tfm);
496 }
497
498 static int seqiv_old_aead_init(struct crypto_tfm *tfm)
499 {
500 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
501 struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
502 int err;
503
504 spin_lock_init(&ctx->lock);
505
506 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
507 sizeof(struct aead_request));
508 err = 0;
509 if (!crypto_get_default_rng()) {
510 geniv->givencrypt = seqiv_aead_givencrypt;
511 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
512 crypto_aead_ivsize(geniv));
513 crypto_put_default_rng();
514 }
515
516 return err ?: aead_geniv_init(tfm);
517 }
518
519 static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize)
520 {
521 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
522 struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
523 int err;
524
525 spin_lock_init(&ctx->geniv.lock);
526
527 crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
528
529 err = crypto_get_default_rng();
530 if (err)
531 goto out;
532
533 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
534 crypto_aead_ivsize(geniv));
535 crypto_put_default_rng();
536 if (err)
537 goto out;
538
539 ctx->null = crypto_get_default_null_skcipher();
540 err = PTR_ERR(ctx->null);
541 if (IS_ERR(ctx->null))
542 goto out;
543
544 err = aead_geniv_init(tfm);
545 if (err)
546 goto drop_null;
547
548 ctx->geniv.child = geniv->child;
549 geniv->child = geniv;
550
551 out:
552 return err;
553
554 drop_null:
555 crypto_put_default_null_skcipher();
556 goto out;
557 }
558
559 static int seqiv_aead_init(struct crypto_tfm *tfm)
560 {
561 return seqiv_aead_init_common(tfm, sizeof(struct aead_request));
562 }
563
564 static int seqniv_aead_init(struct crypto_tfm *tfm)
565 {
566 return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx));
567 }
568
569 static void seqiv_aead_exit(struct crypto_tfm *tfm)
570 {
571 struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
572
573 crypto_free_aead(ctx->geniv.child);
574 crypto_put_default_null_skcipher();
575 }
576
577 static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
578 struct rtattr **tb)
579 {
580 struct crypto_instance *inst;
581 int err;
582
583 inst = skcipher_geniv_alloc(tmpl, tb, 0, 0);
584
585 if (IS_ERR(inst))
586 return PTR_ERR(inst);
587
588 err = -EINVAL;
589 if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64))
590 goto free_inst;
591
592 inst->alg.cra_init = seqiv_init;
593 inst->alg.cra_exit = skcipher_geniv_exit;
594
595 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
596 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
597
598 inst->alg.cra_alignmask |= __alignof__(u32) - 1;
599
600 err = crypto_register_instance(tmpl, inst);
601 if (err)
602 goto free_inst;
603
604 out:
605 return err;
606
607 free_inst:
608 skcipher_geniv_free(inst);
609 goto out;
610 }
611
612 static int seqiv_old_aead_create(struct crypto_template *tmpl,
613 struct aead_instance *aead)
614 {
615 struct crypto_instance *inst = aead_crypto_instance(aead);
616 int err = -EINVAL;
617
618 if (inst->alg.cra_aead.ivsize < sizeof(u64))
619 goto free_inst;
620
621 inst->alg.cra_init = seqiv_old_aead_init;
622 inst->alg.cra_exit = aead_geniv_exit;
623
624 inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
625 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
626
627 err = crypto_register_instance(tmpl, inst);
628 if (err)
629 goto free_inst;
630
631 out:
632 return err;
633
634 free_inst:
635 aead_geniv_free(aead);
636 goto out;
637 }
638
639 static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
640 {
641 struct aead_instance *inst;
642 struct crypto_aead_spawn *spawn;
643 struct aead_alg *alg;
644 int err;
645
646 inst = aead_geniv_alloc(tmpl, tb, 0, 0);
647
648 if (IS_ERR(inst))
649 return PTR_ERR(inst);
650
651 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
652
653 if (inst->alg.base.cra_aead.encrypt)
654 return seqiv_old_aead_create(tmpl, inst);
655
656 spawn = aead_instance_ctx(inst);
657 alg = crypto_spawn_aead_alg(spawn);
658
659 if (alg->base.cra_aead.encrypt)
660 goto done;
661
662 err = -EINVAL;
663 if (inst->alg.ivsize != sizeof(u64))
664 goto free_inst;
665
666 inst->alg.encrypt = seqiv_aead_encrypt;
667 inst->alg.decrypt = seqiv_aead_decrypt;
668
669 inst->alg.base.cra_init = seqiv_aead_init;
670 inst->alg.base.cra_exit = seqiv_aead_exit;
671
672 inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
673 inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
674
675 done:
676 err = aead_register_instance(tmpl, inst);
677 if (err)
678 goto free_inst;
679
680 out:
681 return err;
682
683 free_inst:
684 aead_geniv_free(inst);
685 goto out;
686 }
687
688 static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
689 {
690 struct crypto_attr_type *algt;
691 int err;
692
693 algt = crypto_get_attr_type(tb);
694 if (IS_ERR(algt))
695 return PTR_ERR(algt);
696
697 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
698 err = seqiv_ablkcipher_create(tmpl, tb);
699 else
700 err = seqiv_aead_create(tmpl, tb);
701
702 return err;
703 }
704
705 static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb)
706 {
707 struct aead_instance *inst;
708 struct crypto_aead_spawn *spawn;
709 struct aead_alg *alg;
710 int err;
711
712 inst = aead_geniv_alloc(tmpl, tb, 0, 0);
713 err = PTR_ERR(inst);
714 if (IS_ERR(inst))
715 goto out;
716
717 spawn = aead_instance_ctx(inst);
718 alg = crypto_spawn_aead_alg(spawn);
719
720 if (alg->base.cra_aead.encrypt)
721 goto done;
722
723 err = -EINVAL;
724 if (inst->alg.ivsize != sizeof(u64))
725 goto free_inst;
726
727 inst->alg.encrypt = seqniv_aead_encrypt;
728 inst->alg.decrypt = seqniv_aead_decrypt;
729
730 inst->alg.base.cra_init = seqniv_aead_init;
731 inst->alg.base.cra_exit = seqiv_aead_exit;
732
733 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
734 inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
735 inst->alg.base.cra_ctxsize += inst->alg.ivsize;
736
737 done:
738 err = aead_register_instance(tmpl, inst);
739 if (err)
740 goto free_inst;
741
742 out:
743 return err;
744
745 free_inst:
746 aead_geniv_free(inst);
747 goto out;
748 }
749
750 static void seqiv_free(struct crypto_instance *inst)
751 {
752 if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
753 skcipher_geniv_free(inst);
754 else
755 aead_geniv_free(aead_instance(inst));
756 }
757
758 static struct crypto_template seqiv_tmpl = {
759 .name = "seqiv",
760 .create = seqiv_create,
761 .free = seqiv_free,
762 .module = THIS_MODULE,
763 };
764
765 static struct crypto_template seqniv_tmpl = {
766 .name = "seqniv",
767 .create = seqniv_create,
768 .free = seqiv_free,
769 .module = THIS_MODULE,
770 };
771
772 static int __init seqiv_module_init(void)
773 {
774 int err;
775
776 err = crypto_register_template(&seqiv_tmpl);
777 if (err)
778 goto out;
779
780 err = crypto_register_template(&seqniv_tmpl);
781 if (err)
782 goto out_undo_niv;
783
784 out:
785 return err;
786
787 out_undo_niv:
788 crypto_unregister_template(&seqiv_tmpl);
789 goto out;
790 }
791
792 static void __exit seqiv_module_exit(void)
793 {
794 crypto_unregister_template(&seqniv_tmpl);
795 crypto_unregister_template(&seqiv_tmpl);
796 }
797
798 module_init(seqiv_module_init);
799 module_exit(seqiv_module_exit);
800
801 MODULE_LICENSE("GPL");
802 MODULE_DESCRIPTION("Sequence Number IV Generator");
803 MODULE_ALIAS_CRYPTO("seqiv");
804 MODULE_ALIAS_CRYPTO("seqniv");