]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/ipv4/ah4.c
Merge branch 'for-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[mirror_ubuntu-focal-kernel.git] / net / ipv4 / ah4.c
1 #define pr_fmt(fmt) "IPsec: " fmt
2
3 #include <crypto/hash.h>
4 #include <linux/err.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <net/ip.h>
8 #include <net/xfrm.h>
9 #include <net/ah.h>
10 #include <linux/crypto.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/scatterlist.h>
13 #include <net/icmp.h>
14 #include <net/protocol.h>
15
16 struct ah_skb_cb {
17 struct xfrm_skb_cb xfrm;
18 void *tmp;
19 };
20
21 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
22
23 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
24 unsigned int size)
25 {
26 unsigned int len;
27
28 len = size + crypto_ahash_digestsize(ahash) +
29 (crypto_ahash_alignmask(ahash) &
30 ~(crypto_tfm_ctx_alignment() - 1));
31
32 len = ALIGN(len, crypto_tfm_ctx_alignment());
33
34 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
35 len = ALIGN(len, __alignof__(struct scatterlist));
36
37 len += sizeof(struct scatterlist) * nfrags;
38
39 return kmalloc(len, GFP_ATOMIC);
40 }
41
42 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
43 {
44 return tmp + offset;
45 }
46
47 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
48 unsigned int offset)
49 {
50 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
51 }
52
53 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
54 u8 *icv)
55 {
56 struct ahash_request *req;
57
58 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
59 crypto_tfm_ctx_alignment());
60
61 ahash_request_set_tfm(req, ahash);
62
63 return req;
64 }
65
66 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
67 struct ahash_request *req)
68 {
69 return (void *)ALIGN((unsigned long)(req + 1) +
70 crypto_ahash_reqsize(ahash),
71 __alignof__(struct scatterlist));
72 }
73
74 /* Clear mutable options and find final destination to substitute
75 * into IP header for icv calculation. Options are already checked
76 * for validity, so paranoia is not required. */
77
78 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
79 {
80 unsigned char *optptr = (unsigned char *)(iph+1);
81 int l = iph->ihl*4 - sizeof(struct iphdr);
82 int optlen;
83
84 while (l > 0) {
85 switch (*optptr) {
86 case IPOPT_END:
87 return 0;
88 case IPOPT_NOOP:
89 l--;
90 optptr++;
91 continue;
92 }
93 optlen = optptr[1];
94 if (optlen<2 || optlen>l)
95 return -EINVAL;
96 switch (*optptr) {
97 case IPOPT_SEC:
98 case 0x85: /* Some "Extended Security" crap. */
99 case IPOPT_CIPSO:
100 case IPOPT_RA:
101 case 0x80|21: /* RFC1770 */
102 break;
103 case IPOPT_LSRR:
104 case IPOPT_SSRR:
105 if (optlen < 6)
106 return -EINVAL;
107 memcpy(daddr, optptr+optlen-4, 4);
108 /* Fall through */
109 default:
110 memset(optptr, 0, optlen);
111 }
112 l -= optlen;
113 optptr += optlen;
114 }
115 return 0;
116 }
117
118 static void ah_output_done(struct crypto_async_request *base, int err)
119 {
120 u8 *icv;
121 struct iphdr *iph;
122 struct sk_buff *skb = base->data;
123 struct xfrm_state *x = skb_dst(skb)->xfrm;
124 struct ah_data *ahp = x->data;
125 struct iphdr *top_iph = ip_hdr(skb);
126 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
127 int ihl = ip_hdrlen(skb);
128
129 iph = AH_SKB_CB(skb)->tmp;
130 icv = ah_tmp_icv(ahp->ahash, iph, ihl);
131 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
132
133 top_iph->tos = iph->tos;
134 top_iph->ttl = iph->ttl;
135 top_iph->frag_off = iph->frag_off;
136 if (top_iph->ihl != 5) {
137 top_iph->daddr = iph->daddr;
138 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
139 }
140
141 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err);
143 }
144
145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
146 {
147 int err;
148 int nfrags;
149 int ihl;
150 u8 *icv;
151 struct sk_buff *trailer;
152 struct crypto_ahash *ahash;
153 struct ahash_request *req;
154 struct scatterlist *sg;
155 struct iphdr *iph, *top_iph;
156 struct ip_auth_hdr *ah;
157 struct ah_data *ahp;
158 int seqhi_len = 0;
159 __be32 *seqhi;
160 int sglists = 0;
161 struct scatterlist *seqhisg;
162
163 ahp = x->data;
164 ahash = ahp->ahash;
165
166 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
167 goto out;
168 nfrags = err;
169
170 skb_push(skb, -skb_network_offset(skb));
171 ah = ip_auth_hdr(skb);
172 ihl = ip_hdrlen(skb);
173
174 if (x->props.flags & XFRM_STATE_ESN) {
175 sglists = 1;
176 seqhi_len = sizeof(*seqhi);
177 }
178 err = -ENOMEM;
179 iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
180 if (!iph)
181 goto out;
182 seqhi = (__be32 *)((char *)iph + ihl);
183 icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
184 req = ah_tmp_req(ahash, icv);
185 sg = ah_req_sg(ahash, req);
186 seqhisg = sg + nfrags;
187
188 memset(ah->auth_data, 0, ahp->icv_trunc_len);
189
190 top_iph = ip_hdr(skb);
191
192 iph->tos = top_iph->tos;
193 iph->ttl = top_iph->ttl;
194 iph->frag_off = top_iph->frag_off;
195
196 if (top_iph->ihl != 5) {
197 iph->daddr = top_iph->daddr;
198 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
199 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
200 if (err)
201 goto out_free;
202 }
203
204 ah->nexthdr = *skb_mac_header(skb);
205 *skb_mac_header(skb) = IPPROTO_AH;
206
207 top_iph->tos = 0;
208 top_iph->tot_len = htons(skb->len);
209 top_iph->frag_off = 0;
210 top_iph->ttl = 0;
211 top_iph->check = 0;
212
213 if (x->props.flags & XFRM_STATE_ALIGN4)
214 ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
215 else
216 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
217
218 ah->reserved = 0;
219 ah->spi = x->id.spi;
220 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
221
222 sg_init_table(sg, nfrags + sglists);
223 skb_to_sgvec_nomark(skb, sg, 0, skb->len);
224
225 if (x->props.flags & XFRM_STATE_ESN) {
226 /* Attach seqhi sg right after packet payload */
227 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
228 sg_set_buf(seqhisg, seqhi, seqhi_len);
229 }
230 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
231 ahash_request_set_callback(req, 0, ah_output_done, skb);
232
233 AH_SKB_CB(skb)->tmp = iph;
234
235 err = crypto_ahash_digest(req);
236 if (err) {
237 if (err == -EINPROGRESS)
238 goto out;
239
240 if (err == -EBUSY)
241 err = NET_XMIT_DROP;
242 goto out_free;
243 }
244
245 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
246
247 top_iph->tos = iph->tos;
248 top_iph->ttl = iph->ttl;
249 top_iph->frag_off = iph->frag_off;
250 if (top_iph->ihl != 5) {
251 top_iph->daddr = iph->daddr;
252 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
253 }
254
255 out_free:
256 kfree(iph);
257 out:
258 return err;
259 }
260
261 static void ah_input_done(struct crypto_async_request *base, int err)
262 {
263 u8 *auth_data;
264 u8 *icv;
265 struct iphdr *work_iph;
266 struct sk_buff *skb = base->data;
267 struct xfrm_state *x = xfrm_input_state(skb);
268 struct ah_data *ahp = x->data;
269 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
270 int ihl = ip_hdrlen(skb);
271 int ah_hlen = (ah->hdrlen + 2) << 2;
272
273 if (err)
274 goto out;
275
276 work_iph = AH_SKB_CB(skb)->tmp;
277 auth_data = ah_tmp_auth(work_iph, ihl);
278 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
279
280 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
281 if (err)
282 goto out;
283
284 err = ah->nexthdr;
285
286 skb->network_header += ah_hlen;
287 memcpy(skb_network_header(skb), work_iph, ihl);
288 __skb_pull(skb, ah_hlen + ihl);
289
290 if (x->props.mode == XFRM_MODE_TUNNEL)
291 skb_reset_transport_header(skb);
292 else
293 skb_set_transport_header(skb, -ihl);
294 out:
295 kfree(AH_SKB_CB(skb)->tmp);
296 xfrm_input_resume(skb, err);
297 }
298
299 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
300 {
301 int ah_hlen;
302 int ihl;
303 int nexthdr;
304 int nfrags;
305 u8 *auth_data;
306 u8 *icv;
307 struct sk_buff *trailer;
308 struct crypto_ahash *ahash;
309 struct ahash_request *req;
310 struct scatterlist *sg;
311 struct iphdr *iph, *work_iph;
312 struct ip_auth_hdr *ah;
313 struct ah_data *ahp;
314 int err = -ENOMEM;
315 int seqhi_len = 0;
316 __be32 *seqhi;
317 int sglists = 0;
318 struct scatterlist *seqhisg;
319
320 if (!pskb_may_pull(skb, sizeof(*ah)))
321 goto out;
322
323 ah = (struct ip_auth_hdr *)skb->data;
324 ahp = x->data;
325 ahash = ahp->ahash;
326
327 nexthdr = ah->nexthdr;
328 ah_hlen = (ah->hdrlen + 2) << 2;
329
330 if (x->props.flags & XFRM_STATE_ALIGN4) {
331 if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
332 ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
333 goto out;
334 } else {
335 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
336 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
337 goto out;
338 }
339
340 if (!pskb_may_pull(skb, ah_hlen))
341 goto out;
342
343 /* We are going to _remove_ AH header to keep sockets happy,
344 * so... Later this can change. */
345 if (skb_unclone(skb, GFP_ATOMIC))
346 goto out;
347
348 skb->ip_summed = CHECKSUM_NONE;
349
350
351 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
352 goto out;
353 nfrags = err;
354
355 ah = (struct ip_auth_hdr *)skb->data;
356 iph = ip_hdr(skb);
357 ihl = ip_hdrlen(skb);
358
359 if (x->props.flags & XFRM_STATE_ESN) {
360 sglists = 1;
361 seqhi_len = sizeof(*seqhi);
362 }
363
364 work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
365 ahp->icv_trunc_len + seqhi_len);
366 if (!work_iph) {
367 err = -ENOMEM;
368 goto out;
369 }
370
371 seqhi = (__be32 *)((char *)work_iph + ihl);
372 auth_data = ah_tmp_auth(seqhi, seqhi_len);
373 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
374 req = ah_tmp_req(ahash, icv);
375 sg = ah_req_sg(ahash, req);
376 seqhisg = sg + nfrags;
377
378 memcpy(work_iph, iph, ihl);
379 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
380 memset(ah->auth_data, 0, ahp->icv_trunc_len);
381
382 iph->ttl = 0;
383 iph->tos = 0;
384 iph->frag_off = 0;
385 iph->check = 0;
386 if (ihl > sizeof(*iph)) {
387 __be32 dummy;
388 err = ip_clear_mutable_options(iph, &dummy);
389 if (err)
390 goto out_free;
391 }
392
393 skb_push(skb, ihl);
394
395 sg_init_table(sg, nfrags + sglists);
396 skb_to_sgvec_nomark(skb, sg, 0, skb->len);
397
398 if (x->props.flags & XFRM_STATE_ESN) {
399 /* Attach seqhi sg right after packet payload */
400 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
401 sg_set_buf(seqhisg, seqhi, seqhi_len);
402 }
403 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
404 ahash_request_set_callback(req, 0, ah_input_done, skb);
405
406 AH_SKB_CB(skb)->tmp = work_iph;
407
408 err = crypto_ahash_digest(req);
409 if (err) {
410 if (err == -EINPROGRESS)
411 goto out;
412
413 goto out_free;
414 }
415
416 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
417 if (err)
418 goto out_free;
419
420 skb->network_header += ah_hlen;
421 memcpy(skb_network_header(skb), work_iph, ihl);
422 __skb_pull(skb, ah_hlen + ihl);
423 if (x->props.mode == XFRM_MODE_TUNNEL)
424 skb_reset_transport_header(skb);
425 else
426 skb_set_transport_header(skb, -ihl);
427
428 err = nexthdr;
429
430 out_free:
431 kfree (work_iph);
432 out:
433 return err;
434 }
435
436 static int ah4_err(struct sk_buff *skb, u32 info)
437 {
438 struct net *net = dev_net(skb->dev);
439 const struct iphdr *iph = (const struct iphdr *)skb->data;
440 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
441 struct xfrm_state *x;
442
443 switch (icmp_hdr(skb)->type) {
444 case ICMP_DEST_UNREACH:
445 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
446 return 0;
447 case ICMP_REDIRECT:
448 break;
449 default:
450 return 0;
451 }
452
453 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
454 ah->spi, IPPROTO_AH, AF_INET);
455 if (!x)
456 return 0;
457
458 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
459 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
460 else
461 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
462 xfrm_state_put(x);
463
464 return 0;
465 }
466
467 static int ah_init_state(struct xfrm_state *x)
468 {
469 struct ah_data *ahp = NULL;
470 struct xfrm_algo_desc *aalg_desc;
471 struct crypto_ahash *ahash;
472
473 if (!x->aalg)
474 goto error;
475
476 if (x->encap)
477 goto error;
478
479 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
480 if (!ahp)
481 return -ENOMEM;
482
483 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
484 if (IS_ERR(ahash))
485 goto error;
486
487 ahp->ahash = ahash;
488 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
489 (x->aalg->alg_key_len + 7) / 8))
490 goto error;
491
492 /*
493 * Lookup the algorithm description maintained by xfrm_algo,
494 * verify crypto transform properties, and store information
495 * we need for AH processing. This lookup cannot fail here
496 * after a successful crypto_alloc_ahash().
497 */
498 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
499 BUG_ON(!aalg_desc);
500
501 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
502 crypto_ahash_digestsize(ahash)) {
503 pr_info("%s: %s digestsize %u != %hu\n",
504 __func__, x->aalg->alg_name,
505 crypto_ahash_digestsize(ahash),
506 aalg_desc->uinfo.auth.icv_fullbits / 8);
507 goto error;
508 }
509
510 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
511 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
512
513 if (x->props.flags & XFRM_STATE_ALIGN4)
514 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
515 ahp->icv_trunc_len);
516 else
517 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
518 ahp->icv_trunc_len);
519 if (x->props.mode == XFRM_MODE_TUNNEL)
520 x->props.header_len += sizeof(struct iphdr);
521 x->data = ahp;
522
523 return 0;
524
525 error:
526 if (ahp) {
527 crypto_free_ahash(ahp->ahash);
528 kfree(ahp);
529 }
530 return -EINVAL;
531 }
532
533 static void ah_destroy(struct xfrm_state *x)
534 {
535 struct ah_data *ahp = x->data;
536
537 if (!ahp)
538 return;
539
540 crypto_free_ahash(ahp->ahash);
541 kfree(ahp);
542 }
543
544 static int ah4_rcv_cb(struct sk_buff *skb, int err)
545 {
546 return 0;
547 }
548
549 static const struct xfrm_type ah_type =
550 {
551 .description = "AH4",
552 .owner = THIS_MODULE,
553 .proto = IPPROTO_AH,
554 .flags = XFRM_TYPE_REPLAY_PROT,
555 .init_state = ah_init_state,
556 .destructor = ah_destroy,
557 .input = ah_input,
558 .output = ah_output
559 };
560
561 static struct xfrm4_protocol ah4_protocol = {
562 .handler = xfrm4_rcv,
563 .input_handler = xfrm_input,
564 .cb_handler = ah4_rcv_cb,
565 .err_handler = ah4_err,
566 .priority = 0,
567 };
568
569 static int __init ah4_init(void)
570 {
571 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
572 pr_info("%s: can't add xfrm type\n", __func__);
573 return -EAGAIN;
574 }
575 if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
576 pr_info("%s: can't add protocol\n", __func__);
577 xfrm_unregister_type(&ah_type, AF_INET);
578 return -EAGAIN;
579 }
580 return 0;
581 }
582
583 static void __exit ah4_fini(void)
584 {
585 if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
586 pr_info("%s: can't remove protocol\n", __func__);
587 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
588 pr_info("%s: can't remove xfrm type\n", __func__);
589 }
590
591 module_init(ah4_init);
592 module_exit(ah4_fini);
593 MODULE_LICENSE("GPL");
594 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);