]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/esp6.c
UBUNTU: [Config] CONFIG_SND_SOC_ES8316=m
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / esp6.c
1 /*
2 * Copyright (C)2002 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * Authors
18 *
19 * Mitsuru KANDA @USAGI : IPv6 Support
20 * Kazunori MIYAZAWA @USAGI :
21 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
22 *
23 * This file is derived from net/ipv4/esp.c
24 */
25
26 #define pr_fmt(fmt) "IPv6: " fmt
27
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <net/ip.h>
33 #include <net/xfrm.h>
34 #include <net/esp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
46
47 #include <linux/highmem.h>
48
49 struct esp_skb_cb {
50 struct xfrm_skb_cb xfrm;
51 void *tmp;
52 };
53
54 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55
56 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
57
58 /*
59 * Allocate an AEAD request structure with extra space for SG and IV.
60 *
61 * For alignment considerations the upper 32 bits of the sequence number are
62 * placed at the front, if present. Followed by the IV, the request and finally
63 * the SG list.
64 *
65 * TODO: Use spare space in skb for this where possible.
66 */
67 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
68 {
69 unsigned int len;
70
71 len = seqihlen;
72
73 len += crypto_aead_ivsize(aead);
74
75 if (len) {
76 len += crypto_aead_alignmask(aead) &
77 ~(crypto_tfm_ctx_alignment() - 1);
78 len = ALIGN(len, crypto_tfm_ctx_alignment());
79 }
80
81 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
82 len = ALIGN(len, __alignof__(struct scatterlist));
83
84 len += sizeof(struct scatterlist) * nfrags;
85
86 return kmalloc(len, GFP_ATOMIC);
87 }
88
89 static inline __be32 *esp_tmp_seqhi(void *tmp)
90 {
91 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
92 }
93
94 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 {
96 return crypto_aead_ivsize(aead) ?
97 PTR_ALIGN((u8 *)tmp + seqhilen,
98 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
99 }
100
101 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
102 {
103 struct aead_request *req;
104
105 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 crypto_tfm_ctx_alignment());
107 aead_request_set_tfm(req, aead);
108 return req;
109 }
110
111 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
112 struct aead_request *req)
113 {
114 return (void *)ALIGN((unsigned long)(req + 1) +
115 crypto_aead_reqsize(aead),
116 __alignof__(struct scatterlist));
117 }
118
119 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
120 {
121 struct crypto_aead *aead = x->data;
122 int seqhilen = 0;
123 u8 *iv;
124 struct aead_request *req;
125 struct scatterlist *sg;
126
127 if (x->props.flags & XFRM_STATE_ESN)
128 seqhilen += sizeof(__be32);
129
130 iv = esp_tmp_iv(aead, tmp, seqhilen);
131 req = esp_tmp_req(aead, iv);
132
133 /* Unref skb_frag_pages in the src scatterlist if necessary.
134 * Skip the first sg which comes from skb->data.
135 */
136 if (req->src != req->dst)
137 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
138 put_page(sg_page(sg));
139 }
140
141 static void esp_output_done(struct crypto_async_request *base, int err)
142 {
143 struct sk_buff *skb = base->data;
144 void *tmp;
145 struct dst_entry *dst = skb_dst(skb);
146 struct xfrm_state *x = dst->xfrm;
147
148 tmp = ESP_SKB_CB(skb)->tmp;
149 esp_ssg_unref(x, tmp);
150 kfree(tmp);
151 xfrm_output_resume(skb, err);
152 }
153
154 /* Move ESP header back into place. */
155 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
156 {
157 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
158 void *tmp = ESP_SKB_CB(skb)->tmp;
159 __be32 *seqhi = esp_tmp_seqhi(tmp);
160
161 esph->seq_no = esph->spi;
162 esph->spi = *seqhi;
163 }
164
165 static void esp_output_restore_header(struct sk_buff *skb)
166 {
167 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
168 }
169
170 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
171 struct xfrm_state *x,
172 struct ip_esp_hdr *esph,
173 __be32 *seqhi)
174 {
175 /* For ESN we move the header forward by 4 bytes to
176 * accomodate the high bits. We will move it back after
177 * encryption.
178 */
179 if ((x->props.flags & XFRM_STATE_ESN)) {
180 struct xfrm_offload *xo = xfrm_offload(skb);
181
182 esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
183 *seqhi = esph->spi;
184 if (xo)
185 esph->seq_no = htonl(xo->seq.hi);
186 else
187 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
188 }
189
190 esph->spi = x->id.spi;
191
192 return esph;
193 }
194
195 static void esp_output_done_esn(struct crypto_async_request *base, int err)
196 {
197 struct sk_buff *skb = base->data;
198
199 esp_output_restore_header(skb);
200 esp_output_done(base, err);
201 }
202
203 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
204 {
205 /* Fill padding... */
206 if (tfclen) {
207 memset(tail, 0, tfclen);
208 tail += tfclen;
209 }
210 do {
211 int i;
212 for (i = 0; i < plen - 2; i++)
213 tail[i] = i + 1;
214 } while (0);
215 tail[plen - 2] = plen - 2;
216 tail[plen - 1] = proto;
217 }
218
219 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
220 {
221 u8 *tail;
222 u8 *vaddr;
223 int nfrags;
224 struct page *page;
225 struct sk_buff *trailer;
226 int tailen = esp->tailen;
227
228 if (!skb_cloned(skb)) {
229 if (tailen <= skb_tailroom(skb)) {
230 nfrags = 1;
231 trailer = skb;
232 tail = skb_tail_pointer(trailer);
233
234 goto skip_cow;
235 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
236 && !skb_has_frag_list(skb)) {
237 int allocsize;
238 struct sock *sk = skb->sk;
239 struct page_frag *pfrag = &x->xfrag;
240
241 esp->inplace = false;
242
243 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
244
245 spin_lock_bh(&x->lock);
246
247 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
248 spin_unlock_bh(&x->lock);
249 goto cow;
250 }
251
252 page = pfrag->page;
253 get_page(page);
254
255 vaddr = kmap_atomic(page);
256
257 tail = vaddr + pfrag->offset;
258
259 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
260
261 kunmap_atomic(vaddr);
262
263 nfrags = skb_shinfo(skb)->nr_frags;
264
265 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
266 tailen);
267 skb_shinfo(skb)->nr_frags = ++nfrags;
268
269 pfrag->offset = pfrag->offset + allocsize;
270
271 spin_unlock_bh(&x->lock);
272
273 nfrags++;
274
275 skb->len += tailen;
276 skb->data_len += tailen;
277 skb->truesize += tailen;
278 if (sk)
279 refcount_add(tailen, &sk->sk_wmem_alloc);
280
281 goto out;
282 }
283 }
284
285 cow:
286 nfrags = skb_cow_data(skb, tailen, &trailer);
287 if (nfrags < 0)
288 goto out;
289 tail = skb_tail_pointer(trailer);
290
291 skip_cow:
292 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
293 pskb_put(skb, trailer, tailen);
294
295 out:
296 return nfrags;
297 }
298 EXPORT_SYMBOL_GPL(esp6_output_head);
299
300 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
301 {
302 u8 *iv;
303 int alen;
304 void *tmp;
305 int ivlen;
306 int assoclen;
307 int seqhilen;
308 __be32 *seqhi;
309 struct page *page;
310 struct ip_esp_hdr *esph;
311 struct aead_request *req;
312 struct crypto_aead *aead;
313 struct scatterlist *sg, *dsg;
314 int err = -ENOMEM;
315
316 assoclen = sizeof(struct ip_esp_hdr);
317 seqhilen = 0;
318
319 if (x->props.flags & XFRM_STATE_ESN) {
320 seqhilen += sizeof(__be32);
321 assoclen += sizeof(__be32);
322 }
323
324 aead = x->data;
325 alen = crypto_aead_authsize(aead);
326 ivlen = crypto_aead_ivsize(aead);
327
328 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
329 if (!tmp)
330 goto error;
331
332 seqhi = esp_tmp_seqhi(tmp);
333 iv = esp_tmp_iv(aead, tmp, seqhilen);
334 req = esp_tmp_req(aead, iv);
335 sg = esp_req_sg(aead, req);
336
337 if (esp->inplace)
338 dsg = sg;
339 else
340 dsg = &sg[esp->nfrags];
341
342 esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
343
344 sg_init_table(sg, esp->nfrags);
345 err = skb_to_sgvec(skb, sg,
346 (unsigned char *)esph - skb->data,
347 assoclen + ivlen + esp->clen + alen);
348 if (unlikely(err < 0))
349 goto error_free;
350
351 if (!esp->inplace) {
352 int allocsize;
353 struct page_frag *pfrag = &x->xfrag;
354
355 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
356
357 spin_lock_bh(&x->lock);
358 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
359 spin_unlock_bh(&x->lock);
360 goto error_free;
361 }
362
363 skb_shinfo(skb)->nr_frags = 1;
364
365 page = pfrag->page;
366 get_page(page);
367 /* replace page frags in skb with new page */
368 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
369 pfrag->offset = pfrag->offset + allocsize;
370 spin_unlock_bh(&x->lock);
371
372 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
373 err = skb_to_sgvec(skb, dsg,
374 (unsigned char *)esph - skb->data,
375 assoclen + ivlen + esp->clen + alen);
376 if (unlikely(err < 0))
377 goto error_free;
378 }
379
380 if ((x->props.flags & XFRM_STATE_ESN))
381 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
382 else
383 aead_request_set_callback(req, 0, esp_output_done, skb);
384
385 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
386 aead_request_set_ad(req, assoclen);
387
388 memset(iv, 0, ivlen);
389 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
390 min(ivlen, 8));
391
392 ESP_SKB_CB(skb)->tmp = tmp;
393 err = crypto_aead_encrypt(req);
394
395 switch (err) {
396 case -EINPROGRESS:
397 goto error;
398
399 case -EBUSY:
400 err = NET_XMIT_DROP;
401 break;
402
403 case 0:
404 if ((x->props.flags & XFRM_STATE_ESN))
405 esp_output_restore_header(skb);
406 }
407
408 if (sg != dsg)
409 esp_ssg_unref(x, tmp);
410
411 error_free:
412 kfree(tmp);
413 error:
414 return err;
415 }
416 EXPORT_SYMBOL_GPL(esp6_output_tail);
417
418 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
419 {
420 int alen;
421 int blksize;
422 struct ip_esp_hdr *esph;
423 struct crypto_aead *aead;
424 struct esp_info esp;
425
426 esp.inplace = true;
427
428 esp.proto = *skb_mac_header(skb);
429 *skb_mac_header(skb) = IPPROTO_ESP;
430
431 /* skb is pure payload to encrypt */
432
433 aead = x->data;
434 alen = crypto_aead_authsize(aead);
435
436 esp.tfclen = 0;
437 if (x->tfcpad) {
438 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
439 u32 padto;
440
441 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
442 if (skb->len < padto)
443 esp.tfclen = padto - skb->len;
444 }
445 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
446 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
447 esp.plen = esp.clen - skb->len - esp.tfclen;
448 esp.tailen = esp.tfclen + esp.plen + alen;
449
450 esp.nfrags = esp6_output_head(x, skb, &esp);
451 if (esp.nfrags < 0)
452 return esp.nfrags;
453
454 esph = ip_esp_hdr(skb);
455 esph->spi = x->id.spi;
456
457 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
458 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
459 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
460
461 skb_push(skb, -skb_network_offset(skb));
462
463 return esp6_output_tail(x, skb, &esp);
464 }
465
466 int esp6_input_done2(struct sk_buff *skb, int err)
467 {
468 struct xfrm_state *x = xfrm_input_state(skb);
469 struct xfrm_offload *xo = xfrm_offload(skb);
470 struct crypto_aead *aead = x->data;
471 int alen = crypto_aead_authsize(aead);
472 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
473 int elen = skb->len - hlen;
474 int hdr_len = skb_network_header_len(skb);
475 int padlen;
476 u8 nexthdr[2];
477
478 if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
479 kfree(ESP_SKB_CB(skb)->tmp);
480
481 if (unlikely(err))
482 goto out;
483
484 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
485 BUG();
486
487 err = -EINVAL;
488 padlen = nexthdr[0];
489 if (padlen + 2 + alen >= elen) {
490 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
491 padlen + 2, elen - alen);
492 goto out;
493 }
494
495 /* ... check padding bits here. Silly. :-) */
496
497 pskb_trim(skb, skb->len - alen - padlen - 2);
498 __skb_pull(skb, hlen);
499 if (x->props.mode == XFRM_MODE_TUNNEL)
500 skb_reset_transport_header(skb);
501 else
502 skb_set_transport_header(skb, -hdr_len);
503
504 err = nexthdr[1];
505
506 /* RFC4303: Drop dummy packets without any error */
507 if (err == IPPROTO_NONE)
508 err = -EINVAL;
509
510 out:
511 return err;
512 }
513 EXPORT_SYMBOL_GPL(esp6_input_done2);
514
515 static void esp_input_done(struct crypto_async_request *base, int err)
516 {
517 struct sk_buff *skb = base->data;
518
519 xfrm_input_resume(skb, esp6_input_done2(skb, err));
520 }
521
522 static void esp_input_restore_header(struct sk_buff *skb)
523 {
524 esp_restore_header(skb, 0);
525 __skb_pull(skb, 4);
526 }
527
528 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
529 {
530 struct xfrm_state *x = xfrm_input_state(skb);
531 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
532
533 /* For ESN we move the header forward by 4 bytes to
534 * accomodate the high bits. We will move it back after
535 * decryption.
536 */
537 if ((x->props.flags & XFRM_STATE_ESN)) {
538 esph = skb_push(skb, 4);
539 *seqhi = esph->spi;
540 esph->spi = esph->seq_no;
541 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
542 }
543 }
544
545 static void esp_input_done_esn(struct crypto_async_request *base, int err)
546 {
547 struct sk_buff *skb = base->data;
548
549 esp_input_restore_header(skb);
550 esp_input_done(base, err);
551 }
552
553 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
554 {
555 struct ip_esp_hdr *esph;
556 struct crypto_aead *aead = x->data;
557 struct aead_request *req;
558 struct sk_buff *trailer;
559 int ivlen = crypto_aead_ivsize(aead);
560 int elen = skb->len - sizeof(*esph) - ivlen;
561 int nfrags;
562 int assoclen;
563 int seqhilen;
564 int ret = 0;
565 void *tmp;
566 __be32 *seqhi;
567 u8 *iv;
568 struct scatterlist *sg;
569
570 if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
571 ret = -EINVAL;
572 goto out;
573 }
574
575 if (elen <= 0) {
576 ret = -EINVAL;
577 goto out;
578 }
579
580 assoclen = sizeof(*esph);
581 seqhilen = 0;
582
583 if (x->props.flags & XFRM_STATE_ESN) {
584 seqhilen += sizeof(__be32);
585 assoclen += seqhilen;
586 }
587
588 if (!skb_cloned(skb)) {
589 if (!skb_is_nonlinear(skb)) {
590 nfrags = 1;
591
592 goto skip_cow;
593 } else if (!skb_has_frag_list(skb)) {
594 nfrags = skb_shinfo(skb)->nr_frags;
595 nfrags++;
596
597 goto skip_cow;
598 }
599 }
600
601 nfrags = skb_cow_data(skb, 0, &trailer);
602 if (nfrags < 0) {
603 ret = -EINVAL;
604 goto out;
605 }
606
607 skip_cow:
608 ret = -ENOMEM;
609 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
610 if (!tmp)
611 goto out;
612
613 ESP_SKB_CB(skb)->tmp = tmp;
614 seqhi = esp_tmp_seqhi(tmp);
615 iv = esp_tmp_iv(aead, tmp, seqhilen);
616 req = esp_tmp_req(aead, iv);
617 sg = esp_req_sg(aead, req);
618
619 esp_input_set_header(skb, seqhi);
620
621 sg_init_table(sg, nfrags);
622 ret = skb_to_sgvec(skb, sg, 0, skb->len);
623 if (unlikely(ret < 0))
624 goto out;
625
626 skb->ip_summed = CHECKSUM_NONE;
627
628 if ((x->props.flags & XFRM_STATE_ESN))
629 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
630 else
631 aead_request_set_callback(req, 0, esp_input_done, skb);
632
633 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
634 aead_request_set_ad(req, assoclen);
635
636 ret = crypto_aead_decrypt(req);
637 if (ret == -EINPROGRESS)
638 goto out;
639
640 if ((x->props.flags & XFRM_STATE_ESN))
641 esp_input_restore_header(skb);
642
643 ret = esp6_input_done2(skb, ret);
644
645 out:
646 return ret;
647 }
648
649 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
650 {
651 struct crypto_aead *aead = x->data;
652 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
653 unsigned int net_adj;
654
655 if (x->props.mode != XFRM_MODE_TUNNEL)
656 net_adj = sizeof(struct ipv6hdr);
657 else
658 net_adj = 0;
659
660 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
661 net_adj) & ~(blksize - 1)) + net_adj - 2;
662 }
663
664 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
665 u8 type, u8 code, int offset, __be32 info)
666 {
667 struct net *net = dev_net(skb->dev);
668 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
669 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
670 struct xfrm_state *x;
671
672 if (type != ICMPV6_PKT_TOOBIG &&
673 type != NDISC_REDIRECT)
674 return 0;
675
676 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
677 esph->spi, IPPROTO_ESP, AF_INET6);
678 if (!x)
679 return 0;
680
681 if (type == NDISC_REDIRECT)
682 ip6_redirect(skb, net, skb->dev->ifindex, 0,
683 sock_net_uid(net, NULL));
684 else
685 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
686 xfrm_state_put(x);
687
688 return 0;
689 }
690
691 static void esp6_destroy(struct xfrm_state *x)
692 {
693 struct crypto_aead *aead = x->data;
694
695 if (!aead)
696 return;
697
698 crypto_free_aead(aead);
699 }
700
701 static int esp_init_aead(struct xfrm_state *x)
702 {
703 char aead_name[CRYPTO_MAX_ALG_NAME];
704 struct crypto_aead *aead;
705 int err;
706 u32 mask = 0;
707
708 err = -ENAMETOOLONG;
709 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
710 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
711 goto error;
712
713 if (x->xso.offload_handle)
714 mask |= CRYPTO_ALG_ASYNC;
715
716 aead = crypto_alloc_aead(aead_name, 0, mask);
717 err = PTR_ERR(aead);
718 if (IS_ERR(aead))
719 goto error;
720
721 x->data = aead;
722
723 err = crypto_aead_setkey(aead, x->aead->alg_key,
724 (x->aead->alg_key_len + 7) / 8);
725 if (err)
726 goto error;
727
728 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
729 if (err)
730 goto error;
731
732 error:
733 return err;
734 }
735
736 static int esp_init_authenc(struct xfrm_state *x)
737 {
738 struct crypto_aead *aead;
739 struct crypto_authenc_key_param *param;
740 struct rtattr *rta;
741 char *key;
742 char *p;
743 char authenc_name[CRYPTO_MAX_ALG_NAME];
744 unsigned int keylen;
745 int err;
746 u32 mask = 0;
747
748 err = -EINVAL;
749 if (!x->ealg)
750 goto error;
751
752 err = -ENAMETOOLONG;
753
754 if ((x->props.flags & XFRM_STATE_ESN)) {
755 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
756 "%s%sauthencesn(%s,%s)%s",
757 x->geniv ?: "", x->geniv ? "(" : "",
758 x->aalg ? x->aalg->alg_name : "digest_null",
759 x->ealg->alg_name,
760 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
761 goto error;
762 } else {
763 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
764 "%s%sauthenc(%s,%s)%s",
765 x->geniv ?: "", x->geniv ? "(" : "",
766 x->aalg ? x->aalg->alg_name : "digest_null",
767 x->ealg->alg_name,
768 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
769 goto error;
770 }
771
772 if (x->xso.offload_handle)
773 mask |= CRYPTO_ALG_ASYNC;
774
775 aead = crypto_alloc_aead(authenc_name, 0, mask);
776 err = PTR_ERR(aead);
777 if (IS_ERR(aead))
778 goto error;
779
780 x->data = aead;
781
782 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
783 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
784 err = -ENOMEM;
785 key = kmalloc(keylen, GFP_KERNEL);
786 if (!key)
787 goto error;
788
789 p = key;
790 rta = (void *)p;
791 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
792 rta->rta_len = RTA_LENGTH(sizeof(*param));
793 param = RTA_DATA(rta);
794 p += RTA_SPACE(sizeof(*param));
795
796 if (x->aalg) {
797 struct xfrm_algo_desc *aalg_desc;
798
799 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
800 p += (x->aalg->alg_key_len + 7) / 8;
801
802 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
803 BUG_ON(!aalg_desc);
804
805 err = -EINVAL;
806 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
807 crypto_aead_authsize(aead)) {
808 pr_info("ESP: %s digestsize %u != %hu\n",
809 x->aalg->alg_name,
810 crypto_aead_authsize(aead),
811 aalg_desc->uinfo.auth.icv_fullbits / 8);
812 goto free_key;
813 }
814
815 err = crypto_aead_setauthsize(
816 aead, x->aalg->alg_trunc_len / 8);
817 if (err)
818 goto free_key;
819 }
820
821 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
822 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
823
824 err = crypto_aead_setkey(aead, key, keylen);
825
826 free_key:
827 kfree(key);
828
829 error:
830 return err;
831 }
832
833 static int esp6_init_state(struct xfrm_state *x)
834 {
835 struct crypto_aead *aead;
836 u32 align;
837 int err;
838
839 if (x->encap)
840 return -EINVAL;
841
842 x->data = NULL;
843
844 if (x->aead)
845 err = esp_init_aead(x);
846 else
847 err = esp_init_authenc(x);
848
849 if (err)
850 goto error;
851
852 aead = x->data;
853
854 x->props.header_len = sizeof(struct ip_esp_hdr) +
855 crypto_aead_ivsize(aead);
856 switch (x->props.mode) {
857 case XFRM_MODE_BEET:
858 if (x->sel.family != AF_INET6)
859 x->props.header_len += IPV4_BEET_PHMAXLEN +
860 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
861 break;
862 case XFRM_MODE_TRANSPORT:
863 break;
864 case XFRM_MODE_TUNNEL:
865 x->props.header_len += sizeof(struct ipv6hdr);
866 break;
867 default:
868 goto error;
869 }
870
871 align = ALIGN(crypto_aead_blocksize(aead), 4);
872 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
873
874 error:
875 return err;
876 }
877
878 static int esp6_rcv_cb(struct sk_buff *skb, int err)
879 {
880 return 0;
881 }
882
883 static const struct xfrm_type esp6_type = {
884 .description = "ESP6",
885 .owner = THIS_MODULE,
886 .proto = IPPROTO_ESP,
887 .flags = XFRM_TYPE_REPLAY_PROT,
888 .init_state = esp6_init_state,
889 .destructor = esp6_destroy,
890 .get_mtu = esp6_get_mtu,
891 .input = esp6_input,
892 .output = esp6_output,
893 .hdr_offset = xfrm6_find_1stfragopt,
894 };
895
896 static struct xfrm6_protocol esp6_protocol = {
897 .handler = xfrm6_rcv,
898 .cb_handler = esp6_rcv_cb,
899 .err_handler = esp6_err,
900 .priority = 0,
901 };
902
903 static int __init esp6_init(void)
904 {
905 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
906 pr_info("%s: can't add xfrm type\n", __func__);
907 return -EAGAIN;
908 }
909 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
910 pr_info("%s: can't add protocol\n", __func__);
911 xfrm_unregister_type(&esp6_type, AF_INET6);
912 return -EAGAIN;
913 }
914
915 return 0;
916 }
917
918 static void __exit esp6_fini(void)
919 {
920 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
921 pr_info("%s: can't remove protocol\n", __func__);
922 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
923 pr_info("%s: can't remove xfrm type\n", __func__);
924 }
925
926 module_init(esp6_init);
927 module_exit(esp6_fini);
928
929 MODULE_LICENSE("GPL");
930 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);