]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ipv6/esp6.c
esp6: Avoid skb_cow_data whenever possible
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / esp6.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C)2002 USAGI/WIDE Project
1ab1457c 3 *
1da177e4
LT
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
1ab1457c 8 *
1da177e4
LT
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1ab1457c 13 *
1da177e4 14 * You should have received a copy of the GNU General Public License
a99421d9 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
1da177e4
LT
16 *
17 * Authors
18 *
1ab1457c 19 * Mitsuru KANDA @USAGI : IPv6 Support
67ba4152
IM
20 * Kazunori MIYAZAWA @USAGI :
21 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
1ab1457c 22 *
67ba4152 23 * This file is derived from net/ipv4/esp.c
1da177e4
LT
24 */
25
f3213831
JP
26#define pr_fmt(fmt) "IPv6: " fmt
27
38320c70
HX
28#include <crypto/aead.h>
29#include <crypto/authenc.h>
6b7326c8 30#include <linux/err.h>
1da177e4
LT
31#include <linux/module.h>
32#include <net/ip.h>
33#include <net/xfrm.h>
34#include <net/esp.h>
72998d8c 35#include <linux/scatterlist.h>
a02a6422 36#include <linux/kernel.h>
1da177e4
LT
37#include <linux/pfkeyv2.h>
38#include <linux/random.h>
38320c70 39#include <linux/slab.h>
b7c6538c 40#include <linux/spinlock.h>
81aded24 41#include <net/ip6_route.h>
1da177e4
LT
42#include <net/icmp.h>
43#include <net/ipv6.h>
14c85021 44#include <net/protocol.h>
1da177e4
LT
45#include <linux/icmpv6.h>
46
03e2a30f
SK
47#include <linux/highmem.h>
48
38320c70
HX
49struct esp_skb_cb {
50 struct xfrm_skb_cb xfrm;
51 void *tmp;
52};
53
54#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55
040253c9
MW
56static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
57
38320c70
HX
58/*
59 * Allocate an AEAD request structure with extra space for SG and IV.
60 *
d212a4c2
SK
61 * For alignment considerations the upper 32 bits of the sequence number are
62 * placed at the front, if present. Followed by the IV, the request and finally
63 * the SG list.
38320c70
HX
64 *
65 * TODO: Use spare space in skb for this where possible.
66 */
d212a4c2 67static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
38320c70
HX
68{
69 unsigned int len;
70
d212a4c2
SK
71 len = seqihlen;
72
73 len += crypto_aead_ivsize(aead);
74
38320c70
HX
75 if (len) {
76 len += crypto_aead_alignmask(aead) &
77 ~(crypto_tfm_ctx_alignment() - 1);
78 len = ALIGN(len, crypto_tfm_ctx_alignment());
79 }
80
000ae7b2 81 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
38320c70
HX
82 len = ALIGN(len, __alignof__(struct scatterlist));
83
84 len += sizeof(struct scatterlist) * nfrags;
85
86 return kmalloc(len, GFP_ATOMIC);
87}
88
d212a4c2
SK
89static inline __be32 *esp_tmp_seqhi(void *tmp)
90{
91 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
92}
93
94static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
38320c70
HX
95{
96 return crypto_aead_ivsize(aead) ?
d212a4c2
SK
97 PTR_ALIGN((u8 *)tmp + seqhilen,
98 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
38320c70
HX
99}
100
38320c70
HX
101static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
102{
103 struct aead_request *req;
104
105 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 crypto_tfm_ctx_alignment());
107 aead_request_set_tfm(req, aead);
108 return req;
109}
110
111static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
112 struct aead_request *req)
113{
114 return (void *)ALIGN((unsigned long)(req + 1) +
115 crypto_aead_reqsize(aead),
116 __alignof__(struct scatterlist));
117}
118
03e2a30f
SK
119static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
120{
121 __be32 *seqhi;
122 struct crypto_aead *aead = x->data;
123 int seqhilen = 0;
124 u8 *iv;
125 struct aead_request *req;
126 struct scatterlist *sg;
127
128 if (x->props.flags & XFRM_STATE_ESN)
129 seqhilen += sizeof(__be32);
130
131 seqhi = esp_tmp_seqhi(tmp);
132 iv = esp_tmp_iv(aead, tmp, seqhilen);
133 req = esp_tmp_req(aead, iv);
134
135 /* Unref skb_frag_pages in the src scatterlist if necessary.
136 * Skip the first sg which comes from skb->data.
137 */
138 if (req->src != req->dst)
139 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
140 put_page(sg_page(sg));
141}
142
38320c70
HX
143static void esp_output_done(struct crypto_async_request *base, int err)
144{
145 struct sk_buff *skb = base->data;
03e2a30f
SK
146 void *tmp;
147 struct dst_entry *dst = skb_dst(skb);
148 struct xfrm_state *x = dst->xfrm;
38320c70 149
03e2a30f
SK
150 tmp = ESP_SKB_CB(skb)->tmp;
151 esp_ssg_unref(x, tmp);
152 kfree(tmp);
38320c70
HX
153 xfrm_output_resume(skb, err);
154}
155
000ae7b2
HX
156/* Move ESP header back into place. */
157static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
158{
159 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
160 void *tmp = ESP_SKB_CB(skb)->tmp;
161 __be32 *seqhi = esp_tmp_seqhi(tmp);
162
163 esph->seq_no = esph->spi;
164 esph->spi = *seqhi;
165}
166
167static void esp_output_restore_header(struct sk_buff *skb)
168{
169 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
170}
171
03e2a30f
SK
172static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
173 struct ip_esp_hdr *esph,
174 __be32 *seqhi)
175{
176 struct xfrm_state *x = skb_dst(skb)->xfrm;
177
178 /* For ESN we move the header forward by 4 bytes to
179 * accomodate the high bits. We will move it back after
180 * encryption.
181 */
182 if ((x->props.flags & XFRM_STATE_ESN)) {
183 esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
184 *seqhi = esph->spi;
185 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
186 }
187
188 esph->spi = x->id.spi;
189
190 return esph;
191}
192
000ae7b2
HX
193static void esp_output_done_esn(struct crypto_async_request *base, int err)
194{
195 struct sk_buff *skb = base->data;
196
197 esp_output_restore_header(skb);
198 esp_output_done(base, err);
199}
200
1da177e4
LT
201static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
202{
203 int err;
87bdc48d 204 struct ip_esp_hdr *esph;
38320c70 205 struct crypto_aead *aead;
000ae7b2 206 struct aead_request *req;
03e2a30f 207 struct scatterlist *sg, *dsg;
1da177e4 208 struct sk_buff *trailer;
03e2a30f 209 struct page *page;
38320c70 210 void *tmp;
1da177e4
LT
211 int blksize;
212 int clen;
213 int alen;
040253c9 214 int plen;
000ae7b2 215 int ivlen;
040253c9 216 int tfclen;
1da177e4 217 int nfrags;
d212a4c2 218 int assoclen;
d212a4c2 219 int seqhilen;
03e2a30f 220 int tailen;
38320c70 221 u8 *iv;
27a884dc 222 u8 *tail;
03e2a30f 223 u8 *vaddr;
d212a4c2 224 __be32 *seqhi;
000ae7b2 225 __be64 seqno;
03e2a30f 226 __u8 proto = *skb_mac_header(skb);
1da177e4 227
7b277b1a 228 /* skb is pure payload to encrypt */
1c5ad13f 229 aead = x->data;
38320c70 230 alen = crypto_aead_authsize(aead);
000ae7b2 231 ivlen = crypto_aead_ivsize(aead);
38320c70 232
040253c9
MW
233 tfclen = 0;
234 if (x->tfcpad) {
235 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
236 u32 padto;
237
238 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
239 if (skb->len < padto)
240 tfclen = padto - skb->len;
241 }
38320c70 242 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
040253c9 243 clen = ALIGN(skb->len + 2 + tfclen, blksize);
040253c9 244 plen = clen - skb->len - tfclen;
03e2a30f 245 tailen = tfclen + plen + alen;
38320c70 246
d212a4c2 247 assoclen = sizeof(*esph);
d212a4c2
SK
248 seqhilen = 0;
249
250 if (x->props.flags & XFRM_STATE_ESN) {
d212a4c2
SK
251 seqhilen += sizeof(__be32);
252 assoclen += seqhilen;
253 }
254
03e2a30f
SK
255 *skb_mac_header(skb) = IPPROTO_ESP;
256 esph = ip_esp_hdr(skb);
257
258 if (!skb_cloned(skb)) {
259 if (tailen <= skb_availroom(skb)) {
260 nfrags = 1;
261 trailer = skb;
262 tail = skb_tail_pointer(trailer);
263
264 goto skip_cow;
265 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
266 && !skb_has_frag_list(skb)) {
267 int allocsize;
268 struct sock *sk = skb->sk;
269 struct page_frag *pfrag = &x->xfrag;
270
271 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
272
273 spin_lock_bh(&x->lock);
274
275 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
276 spin_unlock_bh(&x->lock);
277 goto cow;
278 }
279
280 page = pfrag->page;
281 get_page(page);
282
283 vaddr = kmap_atomic(page);
284
285 tail = vaddr + pfrag->offset;
286
287 /* Fill padding... */
288 if (tfclen) {
289 memset(tail, 0, tfclen);
290 tail += tfclen;
291 }
292 do {
293 int i;
294 for (i = 0; i < plen - 2; i++)
295 tail[i] = i + 1;
296 } while (0);
297 tail[plen - 2] = plen - 2;
298 tail[plen - 1] = proto;
299
300 kunmap_atomic(vaddr);
301
302 nfrags = skb_shinfo(skb)->nr_frags;
303
304 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
305 tailen);
306 skb_shinfo(skb)->nr_frags = ++nfrags;
307
308 pfrag->offset = pfrag->offset + allocsize;
309 nfrags++;
310
311 skb->len += tailen;
312 skb->data_len += tailen;
313 skb->truesize += tailen;
314 if (sk)
315 atomic_add(tailen, &sk->sk_wmem_alloc);
316
317 skb_push(skb, -skb_network_offset(skb));
318
319 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
320 esph->spi = x->id.spi;
321
322 tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen);
323 if (!tmp) {
324 spin_unlock_bh(&x->lock);
325 err = -ENOMEM;
326 goto error;
327 }
328 seqhi = esp_tmp_seqhi(tmp);
329 iv = esp_tmp_iv(aead, tmp, seqhilen);
330 req = esp_tmp_req(aead, iv);
331 sg = esp_req_sg(aead, req);
332 dsg = &sg[nfrags];
333
334 esph = esp_output_set_esn(skb, esph, seqhi);
335
336 sg_init_table(sg, nfrags);
337 skb_to_sgvec(skb, sg,
338 (unsigned char *)esph - skb->data,
339 assoclen + ivlen + clen + alen);
340
341 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
342
343 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
344 spin_unlock_bh(&x->lock);
345 err = -ENOMEM;
346 goto error;
347 }
348
349 skb_shinfo(skb)->nr_frags = 1;
350
351 page = pfrag->page;
352 get_page(page);
353 /* replace page frags in skb with new page */
354 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
355 pfrag->offset = pfrag->offset + allocsize;
356
357 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
358 skb_to_sgvec(skb, dsg,
359 (unsigned char *)esph - skb->data,
360 assoclen + ivlen + clen + alen);
361
362 spin_unlock_bh(&x->lock);
363
364 goto skip_cow2;
365 }
48f125ce 366 }
38320c70 367
03e2a30f
SK
368cow:
369 err = skb_cow_data(skb, tailen, &trailer);
370 if (err < 0)
371 goto error;
372 nfrags = err;
1da177e4 373
27a884dc 374 tail = skb_tail_pointer(trailer);
03e2a30f
SK
375 esph = ip_esp_hdr(skb);
376
377skip_cow:
378 /* Fill padding... */
040253c9
MW
379 if (tfclen) {
380 memset(tail, 0, tfclen);
381 tail += tfclen;
382 }
1da177e4
LT
383 do {
384 int i;
040253c9 385 for (i = 0; i < plen - 2; i++)
27a884dc 386 tail[i] = i + 1;
1da177e4 387 } while (0);
040253c9 388 tail[plen - 2] = plen - 2;
03e2a30f 389 tail[plen - 1] = proto;
38320c70 390 pskb_put(skb, trailer, clen - skb->len + alen);
1da177e4 391
7b277b1a 392 skb_push(skb, -skb_network_offset(skb));
1da177e4 393
1ce3644a 394 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
03e2a30f 395 esph->spi = x->id.spi;
1da177e4 396
03e2a30f
SK
397 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
398 if (!tmp) {
399 err = -ENOMEM;
400 goto error;
000ae7b2
HX
401 }
402
03e2a30f
SK
403 seqhi = esp_tmp_seqhi(tmp);
404 iv = esp_tmp_iv(aead, tmp, seqhilen);
405 req = esp_tmp_req(aead, iv);
406 sg = esp_req_sg(aead, req);
407 dsg = sg;
408
409 esph = esp_output_set_esn(skb, esph, seqhi);
000ae7b2 410
38320c70
HX
411 sg_init_table(sg, nfrags);
412 skb_to_sgvec(skb, sg,
000ae7b2
HX
413 (unsigned char *)esph - skb->data,
414 assoclen + ivlen + clen + alen);
d212a4c2 415
03e2a30f
SK
416skip_cow2:
417 if ((x->props.flags & XFRM_STATE_ESN))
418 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
419 else
420 aead_request_set_callback(req, 0, esp_output_done, skb);
421
422 aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
000ae7b2
HX
423 aead_request_set_ad(req, assoclen);
424
425 seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
426 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
427
428 memset(iv, 0, ivlen);
429 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8),
430 min(ivlen, 8));
1da177e4 431
38320c70 432 ESP_SKB_CB(skb)->tmp = tmp;
000ae7b2
HX
433 err = crypto_aead_encrypt(req);
434
435 switch (err) {
436 case -EINPROGRESS:
38320c70 437 goto error;
1da177e4 438
000ae7b2 439 case -EBUSY:
38320c70 440 err = NET_XMIT_DROP;
000ae7b2
HX
441 break;
442
443 case 0:
444 if ((x->props.flags & XFRM_STATE_ESN))
445 esp_output_restore_header(skb);
446 }
38320c70 447
03e2a30f
SK
448 if (sg != dsg)
449 esp_ssg_unref(x, tmp);
38320c70
HX
450 kfree(tmp);
451
452error:
453 return err;
454}
455
456static int esp_input_done2(struct sk_buff *skb, int err)
457{
458 struct xfrm_state *x = xfrm_input_state(skb);
1c5ad13f 459 struct crypto_aead *aead = x->data;
38320c70
HX
460 int alen = crypto_aead_authsize(aead);
461 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
462 int elen = skb->len - hlen;
463 int hdr_len = skb_network_header_len(skb);
464 int padlen;
465 u8 nexthdr[2];
466
467 kfree(ESP_SKB_CB(skb)->tmp);
1da177e4 468
6b7326c8 469 if (unlikely(err))
38320c70 470 goto out;
6b7326c8 471
38320c70
HX
472 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
473 BUG();
1da177e4 474
38320c70
HX
475 err = -EINVAL;
476 padlen = nexthdr[0];
477 if (padlen + 2 + alen >= elen) {
ba7a46f1
JP
478 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
479 padlen + 2, elen - alen);
38320c70 480 goto out;
1da177e4
LT
481 }
482
38320c70 483 /* ... check padding bits here. Silly. :-) */
b7c6538c 484
38320c70
HX
485 pskb_trim(skb, skb->len - alen - padlen - 2);
486 __skb_pull(skb, hlen);
a9403f8a
LR
487 if (x->props.mode == XFRM_MODE_TUNNEL)
488 skb_reset_transport_header(skb);
489 else
490 skb_set_transport_header(skb, -hdr_len);
38320c70
HX
491
492 err = nexthdr[1];
493
494 /* RFC4303: Drop dummy packets without any error */
495 if (err == IPPROTO_NONE)
496 err = -EINVAL;
497
498out:
1da177e4
LT
499 return err;
500}
501
38320c70
HX
502static void esp_input_done(struct crypto_async_request *base, int err)
503{
504 struct sk_buff *skb = base->data;
505
506 xfrm_input_resume(skb, esp_input_done2(skb, err));
507}
508
000ae7b2
HX
509static void esp_input_restore_header(struct sk_buff *skb)
510{
511 esp_restore_header(skb, 0);
512 __skb_pull(skb, 4);
513}
514
03e2a30f
SK
515static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
516{
517 struct xfrm_state *x = xfrm_input_state(skb);
518 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
519
520 /* For ESN we move the header forward by 4 bytes to
521 * accomodate the high bits. We will move it back after
522 * decryption.
523 */
524 if ((x->props.flags & XFRM_STATE_ESN)) {
525 esph = (void *)skb_push(skb, 4);
526 *seqhi = esph->spi;
527 esph->spi = esph->seq_no;
528 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
529 }
530}
531
000ae7b2
HX
532static void esp_input_done_esn(struct crypto_async_request *base, int err)
533{
534 struct sk_buff *skb = base->data;
535
536 esp_input_restore_header(skb);
537 esp_input_done(base, err);
538}
539
e695633e 540static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
1da177e4 541{
87bdc48d 542 struct ip_esp_hdr *esph;
1c5ad13f 543 struct crypto_aead *aead = x->data;
38320c70 544 struct aead_request *req;
1da177e4 545 struct sk_buff *trailer;
000ae7b2
HX
546 int ivlen = crypto_aead_ivsize(aead);
547 int elen = skb->len - sizeof(*esph) - ivlen;
1da177e4 548 int nfrags;
d212a4c2 549 int assoclen;
d212a4c2 550 int seqhilen;
1da177e4 551 int ret = 0;
38320c70 552 void *tmp;
d212a4c2 553 __be32 *seqhi;
38320c70
HX
554 u8 *iv;
555 struct scatterlist *sg;
1da177e4 556
000ae7b2 557 if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
1da177e4 558 ret = -EINVAL;
31a4ab93 559 goto out;
1da177e4
LT
560 }
561
38320c70 562 if (elen <= 0) {
1da177e4 563 ret = -EINVAL;
31a4ab93 564 goto out;
1da177e4 565 }
1da177e4 566
d212a4c2 567 assoclen = sizeof(*esph);
d212a4c2
SK
568 seqhilen = 0;
569
570 if (x->props.flags & XFRM_STATE_ESN) {
d212a4c2
SK
571 seqhilen += sizeof(__be32);
572 assoclen += seqhilen;
573 }
574
03e2a30f
SK
575 if (!skb_cloned(skb)) {
576 if (!skb_is_nonlinear(skb)) {
577 nfrags = 1;
578
579 goto skip_cow;
580 } else if (!skb_has_frag_list(skb)) {
581 nfrags = skb_shinfo(skb)->nr_frags;
582 nfrags++;
583
584 goto skip_cow;
585 }
586 }
587
588 nfrags = skb_cow_data(skb, 0, &trailer);
589 if (nfrags < 0) {
590 ret = -EINVAL;
591 goto out;
592 }
593
594skip_cow:
595 ret = -ENOMEM;
000ae7b2 596 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
38320c70
HX
597 if (!tmp)
598 goto out;
1da177e4 599
38320c70 600 ESP_SKB_CB(skb)->tmp = tmp;
d212a4c2
SK
601 seqhi = esp_tmp_seqhi(tmp);
602 iv = esp_tmp_iv(aead, tmp, seqhilen);
38320c70 603 req = esp_tmp_req(aead, iv);
000ae7b2 604 sg = esp_req_sg(aead, req);
1da177e4 605
03e2a30f 606 esp_input_set_header(skb, seqhi);
1da177e4 607
03e2a30f
SK
608 sg_init_table(sg, nfrags);
609 skb_to_sgvec(skb, sg, 0, skb->len);
1da177e4 610
03e2a30f 611 skb->ip_summed = CHECKSUM_NONE;
d212a4c2 612
03e2a30f 613 if ((x->props.flags & XFRM_STATE_ESN))
000ae7b2 614 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
03e2a30f
SK
615 else
616 aead_request_set_callback(req, 0, esp_input_done, skb);
000ae7b2
HX
617
618 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
619 aead_request_set_ad(req, assoclen);
1da177e4 620
38320c70
HX
621 ret = crypto_aead_decrypt(req);
622 if (ret == -EINPROGRESS)
623 goto out;
95a02cfd 624
000ae7b2
HX
625 if ((x->props.flags & XFRM_STATE_ESN))
626 esp_input_restore_header(skb);
627
38320c70 628 ret = esp_input_done2(skb, ret);
1da177e4
LT
629
630out:
1da177e4
LT
631 return ret;
632}
633
c5c25238 634static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
1da177e4 635{
1c5ad13f
MK
636 struct crypto_aead *aead = x->data;
637 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
91657eaf 638 unsigned int net_adj;
1da177e4 639
91657eaf
BP
640 if (x->props.mode != XFRM_MODE_TUNNEL)
641 net_adj = sizeof(struct ipv6hdr);
642 else
643 net_adj = 0;
c5c25238 644
1c5ad13f 645 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
123b0d1b 646 net_adj) & ~(blksize - 1)) + net_adj - 2;
1da177e4
LT
647}
648
d5860c5c
SK
649static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
650 u8 type, u8 code, int offset, __be32 info)
1da177e4 651{
4fb236ba 652 struct net *net = dev_net(skb->dev);
b71d1d42 653 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
87bdc48d 654 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1da177e4
LT
655 struct xfrm_state *x;
656
b3b2b9e1 657 if (type != ICMPV6_PKT_TOOBIG &&
ec18d9a2 658 type != NDISC_REDIRECT)
d5860c5c 659 return 0;
1da177e4 660
b71d1d42
ED
661 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
662 esph->spi, IPPROTO_ESP, AF_INET6);
1da177e4 663 if (!x)
d5860c5c 664 return 0;
ec18d9a2
DM
665
666 if (type == NDISC_REDIRECT)
e2d118a1
LC
667 ip6_redirect(skb, net, skb->dev->ifindex, 0,
668 sock_net_uid(net, NULL));
ec18d9a2 669 else
e2d118a1 670 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1da177e4 671 xfrm_state_put(x);
d5860c5c
SK
672
673 return 0;
1da177e4
LT
674}
675
676static void esp6_destroy(struct xfrm_state *x)
677{
1c5ad13f 678 struct crypto_aead *aead = x->data;
1da177e4 679
1c5ad13f 680 if (!aead)
1da177e4
LT
681 return;
682
1c5ad13f 683 crypto_free_aead(aead);
1da177e4
LT
684}
685
1a6509d9
HX
686static int esp_init_aead(struct xfrm_state *x)
687{
000ae7b2 688 char aead_name[CRYPTO_MAX_ALG_NAME];
1a6509d9
HX
689 struct crypto_aead *aead;
690 int err;
691
000ae7b2
HX
692 err = -ENAMETOOLONG;
693 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
694 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
695 goto error;
696
697 aead = crypto_alloc_aead(aead_name, 0, 0);
1a6509d9
HX
698 err = PTR_ERR(aead);
699 if (IS_ERR(aead))
700 goto error;
701
1c5ad13f 702 x->data = aead;
1a6509d9
HX
703
704 err = crypto_aead_setkey(aead, x->aead->alg_key,
705 (x->aead->alg_key_len + 7) / 8);
706 if (err)
707 goto error;
708
709 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
710 if (err)
711 goto error;
712
713error:
714 return err;
715}
716
717static int esp_init_authenc(struct xfrm_state *x)
1da177e4 718{
38320c70
HX
719 struct crypto_aead *aead;
720 struct crypto_authenc_key_param *param;
721 struct rtattr *rta;
722 char *key;
723 char *p;
724 char authenc_name[CRYPTO_MAX_ALG_NAME];
38320c70
HX
725 unsigned int keylen;
726 int err;
1da177e4 727
1a6509d9 728 err = -EINVAL;
63159f29 729 if (!x->ealg)
1a6509d9 730 goto error;
38320c70 731
1a6509d9 732 err = -ENAMETOOLONG;
d212a4c2
SK
733
734 if ((x->props.flags & XFRM_STATE_ESN)) {
735 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
000ae7b2
HX
736 "%s%sauthencesn(%s,%s)%s",
737 x->geniv ?: "", x->geniv ? "(" : "",
d212a4c2 738 x->aalg ? x->aalg->alg_name : "digest_null",
000ae7b2
HX
739 x->ealg->alg_name,
740 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
d212a4c2
SK
741 goto error;
742 } else {
743 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
000ae7b2
HX
744 "%s%sauthenc(%s,%s)%s",
745 x->geniv ?: "", x->geniv ? "(" : "",
d212a4c2 746 x->aalg ? x->aalg->alg_name : "digest_null",
000ae7b2
HX
747 x->ealg->alg_name,
748 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
d212a4c2
SK
749 goto error;
750 }
38320c70
HX
751
752 aead = crypto_alloc_aead(authenc_name, 0, 0);
753 err = PTR_ERR(aead);
754 if (IS_ERR(aead))
755 goto error;
756
1c5ad13f 757 x->data = aead;
38320c70
HX
758
759 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
760 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
761 err = -ENOMEM;
762 key = kmalloc(keylen, GFP_KERNEL);
763 if (!key)
764 goto error;
765
766 p = key;
767 rta = (void *)p;
768 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
769 rta->rta_len = RTA_LENGTH(sizeof(*param));
770 param = RTA_DATA(rta);
771 p += RTA_SPACE(sizeof(*param));
772
1da177e4
LT
773 if (x->aalg) {
774 struct xfrm_algo_desc *aalg_desc;
775
38320c70
HX
776 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
777 p += (x->aalg->alg_key_len + 7) / 8;
1ab1457c 778
1da177e4
LT
779 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
780 BUG_ON(!aalg_desc);
1ab1457c 781
38320c70 782 err = -EINVAL;
45083497 783 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
38320c70 784 crypto_aead_authsize(aead)) {
45083497
JP
785 pr_info("ESP: %s digestsize %u != %hu\n",
786 x->aalg->alg_name,
787 crypto_aead_authsize(aead),
788 aalg_desc->uinfo.auth.icv_fullbits / 8);
38320c70 789 goto free_key;
1da177e4 790 }
1ab1457c 791
38320c70 792 err = crypto_aead_setauthsize(
8f8a088c 793 aead, x->aalg->alg_trunc_len / 8);
38320c70
HX
794 if (err)
795 goto free_key;
1da177e4 796 }
38320c70 797
38320c70
HX
798 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
799 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
800
801 err = crypto_aead_setkey(aead, key, keylen);
802
803free_key:
804 kfree(key);
805
1a6509d9
HX
806error:
807 return err;
808}
809
810static int esp6_init_state(struct xfrm_state *x)
811{
1a6509d9
HX
812 struct crypto_aead *aead;
813 u32 align;
814 int err;
815
816 if (x->encap)
817 return -EINVAL;
818
1c5ad13f 819 x->data = NULL;
1a6509d9
HX
820
821 if (x->aead)
822 err = esp_init_aead(x);
823 else
824 err = esp_init_authenc(x);
825
38320c70 826 if (err)
1da177e4 827 goto error;
38320c70 828
1c5ad13f 829 aead = x->data;
1a6509d9 830
38320c70
HX
831 x->props.header_len = sizeof(struct ip_esp_hdr) +
832 crypto_aead_ivsize(aead);
ca68145f
HX
833 switch (x->props.mode) {
834 case XFRM_MODE_BEET:
abf5cdb8
JK
835 if (x->sel.family != AF_INET6)
836 x->props.header_len += IPV4_BEET_PHMAXLEN +
67ba4152 837 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
abf5cdb8 838 break;
ca68145f
HX
839 case XFRM_MODE_TRANSPORT:
840 break;
841 case XFRM_MODE_TUNNEL:
1da177e4 842 x->props.header_len += sizeof(struct ipv6hdr);
ea2c47b4 843 break;
ca68145f
HX
844 default:
845 goto error;
846 }
38320c70
HX
847
848 align = ALIGN(crypto_aead_blocksize(aead), 4);
1c5ad13f 849 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1da177e4
LT
850
851error:
38320c70 852 return err;
1da177e4
LT
853}
854
d5860c5c
SK
855static int esp6_rcv_cb(struct sk_buff *skb, int err)
856{
857 return 0;
858}
859
cc24beca 860static const struct xfrm_type esp6_type = {
1da177e4 861 .description = "ESP6",
cc24beca
IM
862 .owner = THIS_MODULE,
863 .proto = IPPROTO_ESP,
436a0a40 864 .flags = XFRM_TYPE_REPLAY_PROT,
1da177e4
LT
865 .init_state = esp6_init_state,
866 .destructor = esp6_destroy,
c5c25238 867 .get_mtu = esp6_get_mtu,
1da177e4 868 .input = esp6_input,
aee5adb4
MN
869 .output = esp6_output,
870 .hdr_offset = xfrm6_find_1stfragopt,
1da177e4
LT
871};
872
d5860c5c
SK
873static struct xfrm6_protocol esp6_protocol = {
874 .handler = xfrm6_rcv,
875 .cb_handler = esp6_rcv_cb,
1da177e4 876 .err_handler = esp6_err,
d5860c5c 877 .priority = 0,
1da177e4
LT
878};
879
880static int __init esp6_init(void)
881{
882 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
f3213831 883 pr_info("%s: can't add xfrm type\n", __func__);
1da177e4
LT
884 return -EAGAIN;
885 }
d5860c5c 886 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
f3213831 887 pr_info("%s: can't add protocol\n", __func__);
1da177e4
LT
888 xfrm_unregister_type(&esp6_type, AF_INET6);
889 return -EAGAIN;
890 }
891
892 return 0;
893}
894
895static void __exit esp6_fini(void)
896{
d5860c5c 897 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
f3213831 898 pr_info("%s: can't remove protocol\n", __func__);
1da177e4 899 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
f3213831 900 pr_info("%s: can't remove xfrm type\n", __func__);
1da177e4
LT
901}
902
903module_init(esp6_init);
904module_exit(esp6_fini);
905
906MODULE_LICENSE("GPL");
d3d6dd3a 907MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);