]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/xfrm/xfrm_user.c
cdf887fa61d574a3d1e92f0fac2b75949896ac3d
[mirror_ubuntu-zesty-kernel.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <net/ah.h>
30 #include <linux/uaccess.h>
31 #if IS_ENABLED(CONFIG_IPV6)
32 #include <linux/in6.h>
33 #endif
34 #include <asm/unaligned.h>
35
36 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
37 {
38 struct nlattr *rt = attrs[type];
39 struct xfrm_algo *algp;
40
41 if (!rt)
42 return 0;
43
44 algp = nla_data(rt);
45 if (nla_len(rt) < xfrm_alg_len(algp))
46 return -EINVAL;
47
48 switch (type) {
49 case XFRMA_ALG_AUTH:
50 case XFRMA_ALG_CRYPT:
51 case XFRMA_ALG_COMP:
52 break;
53
54 default:
55 return -EINVAL;
56 }
57
58 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
59 return 0;
60 }
61
62 static int verify_auth_trunc(struct nlattr **attrs)
63 {
64 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
65 struct xfrm_algo_auth *algp;
66
67 if (!rt)
68 return 0;
69
70 algp = nla_data(rt);
71 if (nla_len(rt) < xfrm_alg_auth_len(algp))
72 return -EINVAL;
73
74 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
75 return 0;
76 }
77
78 static int verify_aead(struct nlattr **attrs)
79 {
80 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
81 struct xfrm_algo_aead *algp;
82
83 if (!rt)
84 return 0;
85
86 algp = nla_data(rt);
87 if (nla_len(rt) < aead_len(algp))
88 return -EINVAL;
89
90 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
91 return 0;
92 }
93
94 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
95 xfrm_address_t **addrp)
96 {
97 struct nlattr *rt = attrs[type];
98
99 if (rt && addrp)
100 *addrp = nla_data(rt);
101 }
102
103 static inline int verify_sec_ctx_len(struct nlattr **attrs)
104 {
105 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
106 struct xfrm_user_sec_ctx *uctx;
107
108 if (!rt)
109 return 0;
110
111 uctx = nla_data(rt);
112 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
113 return -EINVAL;
114
115 return 0;
116 }
117
118 static inline int verify_replay(struct xfrm_usersa_info *p,
119 struct nlattr **attrs)
120 {
121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
122 struct xfrm_replay_state_esn *rs;
123
124 if (p->flags & XFRM_STATE_ESN) {
125 if (!rt)
126 return -EINVAL;
127
128 rs = nla_data(rt);
129
130 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
131 return -EINVAL;
132
133 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
134 nla_len(rt) != sizeof(*rs))
135 return -EINVAL;
136 }
137
138 if (!rt)
139 return 0;
140
141 /* As only ESP and AH support ESN feature. */
142 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
143 return -EINVAL;
144
145 if (p->replay_window != 0)
146 return -EINVAL;
147
148 return 0;
149 }
150
151 static int verify_newsa_info(struct xfrm_usersa_info *p,
152 struct nlattr **attrs)
153 {
154 int err;
155
156 err = -EINVAL;
157 switch (p->family) {
158 case AF_INET:
159 break;
160
161 case AF_INET6:
162 #if IS_ENABLED(CONFIG_IPV6)
163 break;
164 #else
165 err = -EAFNOSUPPORT;
166 goto out;
167 #endif
168
169 default:
170 goto out;
171 }
172
173 err = -EINVAL;
174 switch (p->id.proto) {
175 case IPPROTO_AH:
176 if ((!attrs[XFRMA_ALG_AUTH] &&
177 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
178 attrs[XFRMA_ALG_AEAD] ||
179 attrs[XFRMA_ALG_CRYPT] ||
180 attrs[XFRMA_ALG_COMP] ||
181 attrs[XFRMA_TFCPAD])
182 goto out;
183 break;
184
185 case IPPROTO_ESP:
186 if (attrs[XFRMA_ALG_COMP])
187 goto out;
188 if (!attrs[XFRMA_ALG_AUTH] &&
189 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
190 !attrs[XFRMA_ALG_CRYPT] &&
191 !attrs[XFRMA_ALG_AEAD])
192 goto out;
193 if ((attrs[XFRMA_ALG_AUTH] ||
194 attrs[XFRMA_ALG_AUTH_TRUNC] ||
195 attrs[XFRMA_ALG_CRYPT]) &&
196 attrs[XFRMA_ALG_AEAD])
197 goto out;
198 if (attrs[XFRMA_TFCPAD] &&
199 p->mode != XFRM_MODE_TUNNEL)
200 goto out;
201 break;
202
203 case IPPROTO_COMP:
204 if (!attrs[XFRMA_ALG_COMP] ||
205 attrs[XFRMA_ALG_AEAD] ||
206 attrs[XFRMA_ALG_AUTH] ||
207 attrs[XFRMA_ALG_AUTH_TRUNC] ||
208 attrs[XFRMA_ALG_CRYPT] ||
209 attrs[XFRMA_TFCPAD] ||
210 (ntohl(p->id.spi) >= 0x10000))
211 goto out;
212 break;
213
214 #if IS_ENABLED(CONFIG_IPV6)
215 case IPPROTO_DSTOPTS:
216 case IPPROTO_ROUTING:
217 if (attrs[XFRMA_ALG_COMP] ||
218 attrs[XFRMA_ALG_AUTH] ||
219 attrs[XFRMA_ALG_AUTH_TRUNC] ||
220 attrs[XFRMA_ALG_AEAD] ||
221 attrs[XFRMA_ALG_CRYPT] ||
222 attrs[XFRMA_ENCAP] ||
223 attrs[XFRMA_SEC_CTX] ||
224 attrs[XFRMA_TFCPAD] ||
225 !attrs[XFRMA_COADDR])
226 goto out;
227 break;
228 #endif
229
230 default:
231 goto out;
232 }
233
234 if ((err = verify_aead(attrs)))
235 goto out;
236 if ((err = verify_auth_trunc(attrs)))
237 goto out;
238 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
239 goto out;
240 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
241 goto out;
242 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
243 goto out;
244 if ((err = verify_sec_ctx_len(attrs)))
245 goto out;
246 if ((err = verify_replay(p, attrs)))
247 goto out;
248
249 err = -EINVAL;
250 switch (p->mode) {
251 case XFRM_MODE_TRANSPORT:
252 case XFRM_MODE_TUNNEL:
253 case XFRM_MODE_ROUTEOPTIMIZATION:
254 case XFRM_MODE_BEET:
255 break;
256
257 default:
258 goto out;
259 }
260
261 err = 0;
262
263 out:
264 return err;
265 }
266
267 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
268 struct xfrm_algo_desc *(*get_byname)(const char *, int),
269 struct nlattr *rta)
270 {
271 struct xfrm_algo *p, *ualg;
272 struct xfrm_algo_desc *algo;
273
274 if (!rta)
275 return 0;
276
277 ualg = nla_data(rta);
278
279 algo = get_byname(ualg->alg_name, 1);
280 if (!algo)
281 return -ENOSYS;
282 *props = algo->desc.sadb_alg_id;
283
284 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
285 if (!p)
286 return -ENOMEM;
287
288 strcpy(p->alg_name, algo->name);
289 *algpp = p;
290 return 0;
291 }
292
293 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
294 {
295 struct xfrm_algo *p, *ualg;
296 struct xfrm_algo_desc *algo;
297
298 if (!rta)
299 return 0;
300
301 ualg = nla_data(rta);
302
303 algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
304 if (!algo)
305 return -ENOSYS;
306 x->props.ealgo = algo->desc.sadb_alg_id;
307
308 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
309 if (!p)
310 return -ENOMEM;
311
312 strcpy(p->alg_name, algo->name);
313 x->ealg = p;
314 x->geniv = algo->uinfo.encr.geniv;
315 return 0;
316 }
317
318 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
319 struct nlattr *rta)
320 {
321 struct xfrm_algo *ualg;
322 struct xfrm_algo_auth *p;
323 struct xfrm_algo_desc *algo;
324
325 if (!rta)
326 return 0;
327
328 ualg = nla_data(rta);
329
330 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
331 if (!algo)
332 return -ENOSYS;
333 *props = algo->desc.sadb_alg_id;
334
335 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
336 if (!p)
337 return -ENOMEM;
338
339 strcpy(p->alg_name, algo->name);
340 p->alg_key_len = ualg->alg_key_len;
341 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
342 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
343
344 *algpp = p;
345 return 0;
346 }
347
348 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
349 struct nlattr *rta)
350 {
351 struct xfrm_algo_auth *p, *ualg;
352 struct xfrm_algo_desc *algo;
353
354 if (!rta)
355 return 0;
356
357 ualg = nla_data(rta);
358
359 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
360 if (!algo)
361 return -ENOSYS;
362 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
363 return -EINVAL;
364 *props = algo->desc.sadb_alg_id;
365
366 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
367 if (!p)
368 return -ENOMEM;
369
370 strcpy(p->alg_name, algo->name);
371 if (!p->alg_trunc_len)
372 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
373
374 *algpp = p;
375 return 0;
376 }
377
378 static int attach_aead(struct xfrm_state *x, struct nlattr *rta)
379 {
380 struct xfrm_algo_aead *p, *ualg;
381 struct xfrm_algo_desc *algo;
382
383 if (!rta)
384 return 0;
385
386 ualg = nla_data(rta);
387
388 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
389 if (!algo)
390 return -ENOSYS;
391 x->props.ealgo = algo->desc.sadb_alg_id;
392
393 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
394 if (!p)
395 return -ENOMEM;
396
397 strcpy(p->alg_name, algo->name);
398 x->aead = p;
399 x->geniv = algo->uinfo.aead.geniv;
400 return 0;
401 }
402
403 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
404 struct nlattr *rp)
405 {
406 struct xfrm_replay_state_esn *up;
407 int ulen;
408
409 if (!replay_esn || !rp)
410 return 0;
411
412 up = nla_data(rp);
413 ulen = xfrm_replay_state_esn_len(up);
414
415 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
416 return -EINVAL;
417
418 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
419 return -EINVAL;
420
421 return 0;
422 }
423
424 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
425 struct xfrm_replay_state_esn **preplay_esn,
426 struct nlattr *rta)
427 {
428 struct xfrm_replay_state_esn *p, *pp, *up;
429 int klen, ulen;
430
431 if (!rta)
432 return 0;
433
434 up = nla_data(rta);
435 klen = xfrm_replay_state_esn_len(up);
436 ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
437
438 p = kzalloc(klen, GFP_KERNEL);
439 if (!p)
440 return -ENOMEM;
441
442 pp = kzalloc(klen, GFP_KERNEL);
443 if (!pp) {
444 kfree(p);
445 return -ENOMEM;
446 }
447
448 memcpy(p, up, ulen);
449 memcpy(pp, up, ulen);
450
451 *replay_esn = p;
452 *preplay_esn = pp;
453
454 return 0;
455 }
456
457 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
458 {
459 int len = 0;
460
461 if (xfrm_ctx) {
462 len += sizeof(struct xfrm_user_sec_ctx);
463 len += xfrm_ctx->ctx_len;
464 }
465 return len;
466 }
467
468 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
469 {
470 memcpy(&x->id, &p->id, sizeof(x->id));
471 memcpy(&x->sel, &p->sel, sizeof(x->sel));
472 memcpy(&x->lft, &p->lft, sizeof(x->lft));
473 x->props.mode = p->mode;
474 x->props.replay_window = min_t(unsigned int, p->replay_window,
475 sizeof(x->replay.bitmap) * 8);
476 x->props.reqid = p->reqid;
477 x->props.family = p->family;
478 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
479 x->props.flags = p->flags;
480
481 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
482 x->sel.family = p->family;
483 }
484
485 /*
486 * someday when pfkey also has support, we could have the code
487 * somehow made shareable and move it to xfrm_state.c - JHS
488 *
489 */
490 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
491 int update_esn)
492 {
493 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
494 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
495 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
496 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
497 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
498
499 if (re) {
500 struct xfrm_replay_state_esn *replay_esn;
501 replay_esn = nla_data(re);
502 memcpy(x->replay_esn, replay_esn,
503 xfrm_replay_state_esn_len(replay_esn));
504 memcpy(x->preplay_esn, replay_esn,
505 xfrm_replay_state_esn_len(replay_esn));
506 }
507
508 if (rp) {
509 struct xfrm_replay_state *replay;
510 replay = nla_data(rp);
511 memcpy(&x->replay, replay, sizeof(*replay));
512 memcpy(&x->preplay, replay, sizeof(*replay));
513 }
514
515 if (lt) {
516 struct xfrm_lifetime_cur *ltime;
517 ltime = nla_data(lt);
518 x->curlft.bytes = ltime->bytes;
519 x->curlft.packets = ltime->packets;
520 x->curlft.add_time = ltime->add_time;
521 x->curlft.use_time = ltime->use_time;
522 }
523
524 if (et)
525 x->replay_maxage = nla_get_u32(et);
526
527 if (rt)
528 x->replay_maxdiff = nla_get_u32(rt);
529 }
530
531 static struct xfrm_state *xfrm_state_construct(struct net *net,
532 struct xfrm_usersa_info *p,
533 struct nlattr **attrs,
534 int *errp)
535 {
536 struct xfrm_state *x = xfrm_state_alloc(net);
537 int err = -ENOMEM;
538
539 if (!x)
540 goto error_no_put;
541
542 copy_from_user_state(x, p);
543
544 if (attrs[XFRMA_SA_EXTRA_FLAGS])
545 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
546
547 if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD])))
548 goto error;
549 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
550 attrs[XFRMA_ALG_AUTH_TRUNC])))
551 goto error;
552 if (!x->props.aalgo) {
553 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
554 attrs[XFRMA_ALG_AUTH])))
555 goto error;
556 }
557 if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT])))
558 goto error;
559 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
560 xfrm_calg_get_byname,
561 attrs[XFRMA_ALG_COMP])))
562 goto error;
563
564 if (attrs[XFRMA_ENCAP]) {
565 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
566 sizeof(*x->encap), GFP_KERNEL);
567 if (x->encap == NULL)
568 goto error;
569 }
570
571 if (attrs[XFRMA_TFCPAD])
572 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
573
574 if (attrs[XFRMA_COADDR]) {
575 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
576 sizeof(*x->coaddr), GFP_KERNEL);
577 if (x->coaddr == NULL)
578 goto error;
579 }
580
581 xfrm_mark_get(attrs, &x->mark);
582
583 err = __xfrm_init_state(x, false);
584 if (err)
585 goto error;
586
587 if (attrs[XFRMA_SEC_CTX]) {
588 err = security_xfrm_state_alloc(x,
589 nla_data(attrs[XFRMA_SEC_CTX]));
590 if (err)
591 goto error;
592 }
593
594 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
595 attrs[XFRMA_REPLAY_ESN_VAL])))
596 goto error;
597
598 x->km.seq = p->seq;
599 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
600 /* sysctl_xfrm_aevent_etime is in 100ms units */
601 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
602
603 if ((err = xfrm_init_replay(x)))
604 goto error;
605
606 /* override default values from above */
607 xfrm_update_ae_params(x, attrs, 0);
608
609 return x;
610
611 error:
612 x->km.state = XFRM_STATE_DEAD;
613 xfrm_state_put(x);
614 error_no_put:
615 *errp = err;
616 return NULL;
617 }
618
619 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
620 struct nlattr **attrs)
621 {
622 struct net *net = sock_net(skb->sk);
623 struct xfrm_usersa_info *p = nlmsg_data(nlh);
624 struct xfrm_state *x;
625 int err;
626 struct km_event c;
627
628 err = verify_newsa_info(p, attrs);
629 if (err)
630 return err;
631
632 x = xfrm_state_construct(net, p, attrs, &err);
633 if (!x)
634 return err;
635
636 xfrm_state_hold(x);
637 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
638 err = xfrm_state_add(x);
639 else
640 err = xfrm_state_update(x);
641
642 xfrm_audit_state_add(x, err ? 0 : 1, true);
643
644 if (err < 0) {
645 x->km.state = XFRM_STATE_DEAD;
646 __xfrm_state_put(x);
647 goto out;
648 }
649
650 c.seq = nlh->nlmsg_seq;
651 c.portid = nlh->nlmsg_pid;
652 c.event = nlh->nlmsg_type;
653
654 km_state_notify(x, &c);
655 out:
656 xfrm_state_put(x);
657 return err;
658 }
659
660 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
661 struct xfrm_usersa_id *p,
662 struct nlattr **attrs,
663 int *errp)
664 {
665 struct xfrm_state *x = NULL;
666 struct xfrm_mark m;
667 int err;
668 u32 mark = xfrm_mark_get(attrs, &m);
669
670 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
671 err = -ESRCH;
672 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
673 } else {
674 xfrm_address_t *saddr = NULL;
675
676 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
677 if (!saddr) {
678 err = -EINVAL;
679 goto out;
680 }
681
682 err = -ESRCH;
683 x = xfrm_state_lookup_byaddr(net, mark,
684 &p->daddr, saddr,
685 p->proto, p->family);
686 }
687
688 out:
689 if (!x && errp)
690 *errp = err;
691 return x;
692 }
693
694 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
695 struct nlattr **attrs)
696 {
697 struct net *net = sock_net(skb->sk);
698 struct xfrm_state *x;
699 int err = -ESRCH;
700 struct km_event c;
701 struct xfrm_usersa_id *p = nlmsg_data(nlh);
702
703 x = xfrm_user_state_lookup(net, p, attrs, &err);
704 if (x == NULL)
705 return err;
706
707 if ((err = security_xfrm_state_delete(x)) != 0)
708 goto out;
709
710 if (xfrm_state_kern(x)) {
711 err = -EPERM;
712 goto out;
713 }
714
715 err = xfrm_state_delete(x);
716
717 if (err < 0)
718 goto out;
719
720 c.seq = nlh->nlmsg_seq;
721 c.portid = nlh->nlmsg_pid;
722 c.event = nlh->nlmsg_type;
723 km_state_notify(x, &c);
724
725 out:
726 xfrm_audit_state_delete(x, err ? 0 : 1, true);
727 xfrm_state_put(x);
728 return err;
729 }
730
731 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
732 {
733 memset(p, 0, sizeof(*p));
734 memcpy(&p->id, &x->id, sizeof(p->id));
735 memcpy(&p->sel, &x->sel, sizeof(p->sel));
736 memcpy(&p->lft, &x->lft, sizeof(p->lft));
737 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
738 put_unaligned(x->stats.replay_window, &p->stats.replay_window);
739 put_unaligned(x->stats.replay, &p->stats.replay);
740 put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
741 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
742 p->mode = x->props.mode;
743 p->replay_window = x->props.replay_window;
744 p->reqid = x->props.reqid;
745 p->family = x->props.family;
746 p->flags = x->props.flags;
747 p->seq = x->km.seq;
748 }
749
750 struct xfrm_dump_info {
751 struct sk_buff *in_skb;
752 struct sk_buff *out_skb;
753 u32 nlmsg_seq;
754 u16 nlmsg_flags;
755 };
756
757 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
758 {
759 struct xfrm_user_sec_ctx *uctx;
760 struct nlattr *attr;
761 int ctx_size = sizeof(*uctx) + s->ctx_len;
762
763 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
764 if (attr == NULL)
765 return -EMSGSIZE;
766
767 uctx = nla_data(attr);
768 uctx->exttype = XFRMA_SEC_CTX;
769 uctx->len = ctx_size;
770 uctx->ctx_doi = s->ctx_doi;
771 uctx->ctx_alg = s->ctx_alg;
772 uctx->ctx_len = s->ctx_len;
773 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
774
775 return 0;
776 }
777
778 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
779 {
780 struct xfrm_algo *algo;
781 struct nlattr *nla;
782
783 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
784 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
785 if (!nla)
786 return -EMSGSIZE;
787
788 algo = nla_data(nla);
789 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
790 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
791 algo->alg_key_len = auth->alg_key_len;
792
793 return 0;
794 }
795
796 /* Don't change this without updating xfrm_sa_len! */
797 static int copy_to_user_state_extra(struct xfrm_state *x,
798 struct xfrm_usersa_info *p,
799 struct sk_buff *skb)
800 {
801 int ret = 0;
802
803 copy_to_user_state(x, p);
804
805 if (x->props.extra_flags) {
806 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
807 x->props.extra_flags);
808 if (ret)
809 goto out;
810 }
811
812 if (x->coaddr) {
813 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
814 if (ret)
815 goto out;
816 }
817 if (x->lastused) {
818 ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
819 XFRMA_PAD);
820 if (ret)
821 goto out;
822 }
823 if (x->aead) {
824 ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
825 if (ret)
826 goto out;
827 }
828 if (x->aalg) {
829 ret = copy_to_user_auth(x->aalg, skb);
830 if (!ret)
831 ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
832 xfrm_alg_auth_len(x->aalg), x->aalg);
833 if (ret)
834 goto out;
835 }
836 if (x->ealg) {
837 ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
838 if (ret)
839 goto out;
840 }
841 if (x->calg) {
842 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
843 if (ret)
844 goto out;
845 }
846 if (x->encap) {
847 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
848 if (ret)
849 goto out;
850 }
851 if (x->tfcpad) {
852 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
853 if (ret)
854 goto out;
855 }
856 ret = xfrm_mark_put(skb, &x->mark);
857 if (ret)
858 goto out;
859 if (x->replay_esn)
860 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
861 xfrm_replay_state_esn_len(x->replay_esn),
862 x->replay_esn);
863 else
864 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
865 &x->replay);
866 if (ret)
867 goto out;
868 if (x->security)
869 ret = copy_sec_ctx(x->security, skb);
870 out:
871 return ret;
872 }
873
874 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
875 {
876 struct xfrm_dump_info *sp = ptr;
877 struct sk_buff *in_skb = sp->in_skb;
878 struct sk_buff *skb = sp->out_skb;
879 struct xfrm_usersa_info *p;
880 struct nlmsghdr *nlh;
881 int err;
882
883 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
884 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
885 if (nlh == NULL)
886 return -EMSGSIZE;
887
888 p = nlmsg_data(nlh);
889
890 err = copy_to_user_state_extra(x, p, skb);
891 if (err) {
892 nlmsg_cancel(skb, nlh);
893 return err;
894 }
895 nlmsg_end(skb, nlh);
896 return 0;
897 }
898
899 static int xfrm_dump_sa_done(struct netlink_callback *cb)
900 {
901 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
902 struct sock *sk = cb->skb->sk;
903 struct net *net = sock_net(sk);
904
905 if (cb->args[0])
906 xfrm_state_walk_done(walk, net);
907 return 0;
908 }
909
910 static const struct nla_policy xfrma_policy[XFRMA_MAX+1];
911 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
912 {
913 struct net *net = sock_net(skb->sk);
914 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
915 struct xfrm_dump_info info;
916
917 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
918 sizeof(cb->args) - sizeof(cb->args[0]));
919
920 info.in_skb = cb->skb;
921 info.out_skb = skb;
922 info.nlmsg_seq = cb->nlh->nlmsg_seq;
923 info.nlmsg_flags = NLM_F_MULTI;
924
925 if (!cb->args[0]) {
926 struct nlattr *attrs[XFRMA_MAX+1];
927 struct xfrm_address_filter *filter = NULL;
928 u8 proto = 0;
929 int err;
930
931 err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
932 xfrma_policy);
933 if (err < 0)
934 return err;
935
936 if (attrs[XFRMA_ADDRESS_FILTER]) {
937 filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
938 sizeof(*filter), GFP_KERNEL);
939 if (filter == NULL)
940 return -ENOMEM;
941 }
942
943 if (attrs[XFRMA_PROTO])
944 proto = nla_get_u8(attrs[XFRMA_PROTO]);
945
946 xfrm_state_walk_init(walk, proto, filter);
947 cb->args[0] = 1;
948 }
949
950 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
951
952 return skb->len;
953 }
954
955 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
956 struct xfrm_state *x, u32 seq)
957 {
958 struct xfrm_dump_info info;
959 struct sk_buff *skb;
960 int err;
961
962 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
963 if (!skb)
964 return ERR_PTR(-ENOMEM);
965
966 info.in_skb = in_skb;
967 info.out_skb = skb;
968 info.nlmsg_seq = seq;
969 info.nlmsg_flags = 0;
970
971 err = dump_one_state(x, 0, &info);
972 if (err) {
973 kfree_skb(skb);
974 return ERR_PTR(err);
975 }
976
977 return skb;
978 }
979
980 /* A wrapper for nlmsg_multicast() checking that nlsk is still available.
981 * Must be called with RCU read lock.
982 */
983 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
984 u32 pid, unsigned int group)
985 {
986 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
987
988 if (nlsk)
989 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
990 else
991 return -1;
992 }
993
994 static inline size_t xfrm_spdinfo_msgsize(void)
995 {
996 return NLMSG_ALIGN(4)
997 + nla_total_size(sizeof(struct xfrmu_spdinfo))
998 + nla_total_size(sizeof(struct xfrmu_spdhinfo))
999 + nla_total_size(sizeof(struct xfrmu_spdhthresh))
1000 + nla_total_size(sizeof(struct xfrmu_spdhthresh));
1001 }
1002
1003 static int build_spdinfo(struct sk_buff *skb, struct net *net,
1004 u32 portid, u32 seq, u32 flags)
1005 {
1006 struct xfrmk_spdinfo si;
1007 struct xfrmu_spdinfo spc;
1008 struct xfrmu_spdhinfo sph;
1009 struct xfrmu_spdhthresh spt4, spt6;
1010 struct nlmsghdr *nlh;
1011 int err;
1012 u32 *f;
1013 unsigned lseq;
1014
1015 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
1016 if (nlh == NULL) /* shouldn't really happen ... */
1017 return -EMSGSIZE;
1018
1019 f = nlmsg_data(nlh);
1020 *f = flags;
1021 xfrm_spd_getinfo(net, &si);
1022 spc.incnt = si.incnt;
1023 spc.outcnt = si.outcnt;
1024 spc.fwdcnt = si.fwdcnt;
1025 spc.inscnt = si.inscnt;
1026 spc.outscnt = si.outscnt;
1027 spc.fwdscnt = si.fwdscnt;
1028 sph.spdhcnt = si.spdhcnt;
1029 sph.spdhmcnt = si.spdhmcnt;
1030
1031 do {
1032 lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1033
1034 spt4.lbits = net->xfrm.policy_hthresh.lbits4;
1035 spt4.rbits = net->xfrm.policy_hthresh.rbits4;
1036 spt6.lbits = net->xfrm.policy_hthresh.lbits6;
1037 spt6.rbits = net->xfrm.policy_hthresh.rbits6;
1038 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
1039
1040 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
1041 if (!err)
1042 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
1043 if (!err)
1044 err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
1045 if (!err)
1046 err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
1047 if (err) {
1048 nlmsg_cancel(skb, nlh);
1049 return err;
1050 }
1051
1052 nlmsg_end(skb, nlh);
1053 return 0;
1054 }
1055
1056 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1057 struct nlattr **attrs)
1058 {
1059 struct net *net = sock_net(skb->sk);
1060 struct xfrmu_spdhthresh *thresh4 = NULL;
1061 struct xfrmu_spdhthresh *thresh6 = NULL;
1062
1063 /* selector prefixlen thresholds to hash policies */
1064 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
1065 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
1066
1067 if (nla_len(rta) < sizeof(*thresh4))
1068 return -EINVAL;
1069 thresh4 = nla_data(rta);
1070 if (thresh4->lbits > 32 || thresh4->rbits > 32)
1071 return -EINVAL;
1072 }
1073 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
1074 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
1075
1076 if (nla_len(rta) < sizeof(*thresh6))
1077 return -EINVAL;
1078 thresh6 = nla_data(rta);
1079 if (thresh6->lbits > 128 || thresh6->rbits > 128)
1080 return -EINVAL;
1081 }
1082
1083 if (thresh4 || thresh6) {
1084 write_seqlock(&net->xfrm.policy_hthresh.lock);
1085 if (thresh4) {
1086 net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
1087 net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
1088 }
1089 if (thresh6) {
1090 net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
1091 net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
1092 }
1093 write_sequnlock(&net->xfrm.policy_hthresh.lock);
1094
1095 xfrm_policy_hash_rebuild(net);
1096 }
1097
1098 return 0;
1099 }
1100
1101 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1102 struct nlattr **attrs)
1103 {
1104 struct net *net = sock_net(skb->sk);
1105 struct sk_buff *r_skb;
1106 u32 *flags = nlmsg_data(nlh);
1107 u32 sportid = NETLINK_CB(skb).portid;
1108 u32 seq = nlh->nlmsg_seq;
1109
1110 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
1111 if (r_skb == NULL)
1112 return -ENOMEM;
1113
1114 if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
1115 BUG();
1116
1117 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1118 }
1119
1120 static inline size_t xfrm_sadinfo_msgsize(void)
1121 {
1122 return NLMSG_ALIGN(4)
1123 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1124 + nla_total_size(4); /* XFRMA_SAD_CNT */
1125 }
1126
1127 static int build_sadinfo(struct sk_buff *skb, struct net *net,
1128 u32 portid, u32 seq, u32 flags)
1129 {
1130 struct xfrmk_sadinfo si;
1131 struct xfrmu_sadhinfo sh;
1132 struct nlmsghdr *nlh;
1133 int err;
1134 u32 *f;
1135
1136 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1137 if (nlh == NULL) /* shouldn't really happen ... */
1138 return -EMSGSIZE;
1139
1140 f = nlmsg_data(nlh);
1141 *f = flags;
1142 xfrm_sad_getinfo(net, &si);
1143
1144 sh.sadhmcnt = si.sadhmcnt;
1145 sh.sadhcnt = si.sadhcnt;
1146
1147 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1148 if (!err)
1149 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1150 if (err) {
1151 nlmsg_cancel(skb, nlh);
1152 return err;
1153 }
1154
1155 nlmsg_end(skb, nlh);
1156 return 0;
1157 }
1158
1159 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1160 struct nlattr **attrs)
1161 {
1162 struct net *net = sock_net(skb->sk);
1163 struct sk_buff *r_skb;
1164 u32 *flags = nlmsg_data(nlh);
1165 u32 sportid = NETLINK_CB(skb).portid;
1166 u32 seq = nlh->nlmsg_seq;
1167
1168 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1169 if (r_skb == NULL)
1170 return -ENOMEM;
1171
1172 if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
1173 BUG();
1174
1175 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1176 }
1177
1178 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1179 struct nlattr **attrs)
1180 {
1181 struct net *net = sock_net(skb->sk);
1182 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1183 struct xfrm_state *x;
1184 struct sk_buff *resp_skb;
1185 int err = -ESRCH;
1186
1187 x = xfrm_user_state_lookup(net, p, attrs, &err);
1188 if (x == NULL)
1189 goto out_noput;
1190
1191 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1192 if (IS_ERR(resp_skb)) {
1193 err = PTR_ERR(resp_skb);
1194 } else {
1195 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1196 }
1197 xfrm_state_put(x);
1198 out_noput:
1199 return err;
1200 }
1201
1202 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1203 struct nlattr **attrs)
1204 {
1205 struct net *net = sock_net(skb->sk);
1206 struct xfrm_state *x;
1207 struct xfrm_userspi_info *p;
1208 struct sk_buff *resp_skb;
1209 xfrm_address_t *daddr;
1210 int family;
1211 int err;
1212 u32 mark;
1213 struct xfrm_mark m;
1214
1215 p = nlmsg_data(nlh);
1216 err = verify_spi_info(p->info.id.proto, p->min, p->max);
1217 if (err)
1218 goto out_noput;
1219
1220 family = p->info.family;
1221 daddr = &p->info.id.daddr;
1222
1223 x = NULL;
1224
1225 mark = xfrm_mark_get(attrs, &m);
1226 if (p->info.seq) {
1227 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1228 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1229 xfrm_state_put(x);
1230 x = NULL;
1231 }
1232 }
1233
1234 if (!x)
1235 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1236 p->info.id.proto, daddr,
1237 &p->info.saddr, 1,
1238 family);
1239 err = -ENOENT;
1240 if (x == NULL)
1241 goto out_noput;
1242
1243 err = xfrm_alloc_spi(x, p->min, p->max);
1244 if (err)
1245 goto out;
1246
1247 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1248 if (IS_ERR(resp_skb)) {
1249 err = PTR_ERR(resp_skb);
1250 goto out;
1251 }
1252
1253 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1254
1255 out:
1256 xfrm_state_put(x);
1257 out_noput:
1258 return err;
1259 }
1260
1261 static int verify_policy_dir(u8 dir)
1262 {
1263 switch (dir) {
1264 case XFRM_POLICY_IN:
1265 case XFRM_POLICY_OUT:
1266 case XFRM_POLICY_FWD:
1267 break;
1268
1269 default:
1270 return -EINVAL;
1271 }
1272
1273 return 0;
1274 }
1275
1276 static int verify_policy_type(u8 type)
1277 {
1278 switch (type) {
1279 case XFRM_POLICY_TYPE_MAIN:
1280 #ifdef CONFIG_XFRM_SUB_POLICY
1281 case XFRM_POLICY_TYPE_SUB:
1282 #endif
1283 break;
1284
1285 default:
1286 return -EINVAL;
1287 }
1288
1289 return 0;
1290 }
1291
1292 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1293 {
1294 int ret;
1295
1296 switch (p->share) {
1297 case XFRM_SHARE_ANY:
1298 case XFRM_SHARE_SESSION:
1299 case XFRM_SHARE_USER:
1300 case XFRM_SHARE_UNIQUE:
1301 break;
1302
1303 default:
1304 return -EINVAL;
1305 }
1306
1307 switch (p->action) {
1308 case XFRM_POLICY_ALLOW:
1309 case XFRM_POLICY_BLOCK:
1310 break;
1311
1312 default:
1313 return -EINVAL;
1314 }
1315
1316 switch (p->sel.family) {
1317 case AF_INET:
1318 break;
1319
1320 case AF_INET6:
1321 #if IS_ENABLED(CONFIG_IPV6)
1322 break;
1323 #else
1324 return -EAFNOSUPPORT;
1325 #endif
1326
1327 default:
1328 return -EINVAL;
1329 }
1330
1331 ret = verify_policy_dir(p->dir);
1332 if (ret)
1333 return ret;
1334 if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
1335 return -EINVAL;
1336
1337 return 0;
1338 }
1339
1340 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1341 {
1342 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1343 struct xfrm_user_sec_ctx *uctx;
1344
1345 if (!rt)
1346 return 0;
1347
1348 uctx = nla_data(rt);
1349 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
1350 }
1351
1352 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1353 int nr)
1354 {
1355 int i;
1356
1357 xp->xfrm_nr = nr;
1358 for (i = 0; i < nr; i++, ut++) {
1359 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1360
1361 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1362 memcpy(&t->saddr, &ut->saddr,
1363 sizeof(xfrm_address_t));
1364 t->reqid = ut->reqid;
1365 t->mode = ut->mode;
1366 t->share = ut->share;
1367 t->optional = ut->optional;
1368 t->aalgos = ut->aalgos;
1369 t->ealgos = ut->ealgos;
1370 t->calgos = ut->calgos;
1371 /* If all masks are ~0, then we allow all algorithms. */
1372 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1373 t->encap_family = ut->family;
1374 }
1375 }
1376
1377 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1378 {
1379 int i;
1380
1381 if (nr > XFRM_MAX_DEPTH)
1382 return -EINVAL;
1383
1384 for (i = 0; i < nr; i++) {
1385 /* We never validated the ut->family value, so many
1386 * applications simply leave it at zero. The check was
1387 * never made and ut->family was ignored because all
1388 * templates could be assumed to have the same family as
1389 * the policy itself. Now that we will have ipv4-in-ipv6
1390 * and ipv6-in-ipv4 tunnels, this is no longer true.
1391 */
1392 if (!ut[i].family)
1393 ut[i].family = family;
1394
1395 switch (ut[i].family) {
1396 case AF_INET:
1397 break;
1398 #if IS_ENABLED(CONFIG_IPV6)
1399 case AF_INET6:
1400 break;
1401 #endif
1402 default:
1403 return -EINVAL;
1404 }
1405 }
1406
1407 return 0;
1408 }
1409
1410 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1411 {
1412 struct nlattr *rt = attrs[XFRMA_TMPL];
1413
1414 if (!rt) {
1415 pol->xfrm_nr = 0;
1416 } else {
1417 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1418 int nr = nla_len(rt) / sizeof(*utmpl);
1419 int err;
1420
1421 err = validate_tmpl(nr, utmpl, pol->family);
1422 if (err)
1423 return err;
1424
1425 copy_templates(pol, utmpl, nr);
1426 }
1427 return 0;
1428 }
1429
1430 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1431 {
1432 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1433 struct xfrm_userpolicy_type *upt;
1434 u8 type = XFRM_POLICY_TYPE_MAIN;
1435 int err;
1436
1437 if (rt) {
1438 upt = nla_data(rt);
1439 type = upt->type;
1440 }
1441
1442 err = verify_policy_type(type);
1443 if (err)
1444 return err;
1445
1446 *tp = type;
1447 return 0;
1448 }
1449
1450 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1451 {
1452 xp->priority = p->priority;
1453 xp->index = p->index;
1454 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1455 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1456 xp->action = p->action;
1457 xp->flags = p->flags;
1458 xp->family = p->sel.family;
1459 /* XXX xp->share = p->share; */
1460 }
1461
1462 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1463 {
1464 memset(p, 0, sizeof(*p));
1465 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1466 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1467 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1468 p->priority = xp->priority;
1469 p->index = xp->index;
1470 p->sel.family = xp->family;
1471 p->dir = dir;
1472 p->action = xp->action;
1473 p->flags = xp->flags;
1474 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1475 }
1476
1477 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1478 {
1479 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1480 int err;
1481
1482 if (!xp) {
1483 *errp = -ENOMEM;
1484 return NULL;
1485 }
1486
1487 copy_from_user_policy(xp, p);
1488
1489 err = copy_from_user_policy_type(&xp->type, attrs);
1490 if (err)
1491 goto error;
1492
1493 if (!(err = copy_from_user_tmpl(xp, attrs)))
1494 err = copy_from_user_sec_ctx(xp, attrs);
1495 if (err)
1496 goto error;
1497
1498 xfrm_mark_get(attrs, &xp->mark);
1499
1500 return xp;
1501 error:
1502 *errp = err;
1503 xp->walk.dead = 1;
1504 xfrm_policy_destroy(xp);
1505 return NULL;
1506 }
1507
1508 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1509 struct nlattr **attrs)
1510 {
1511 struct net *net = sock_net(skb->sk);
1512 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1513 struct xfrm_policy *xp;
1514 struct km_event c;
1515 int err;
1516 int excl;
1517
1518 err = verify_newpolicy_info(p);
1519 if (err)
1520 return err;
1521 err = verify_sec_ctx_len(attrs);
1522 if (err)
1523 return err;
1524
1525 xp = xfrm_policy_construct(net, p, attrs, &err);
1526 if (!xp)
1527 return err;
1528
1529 /* shouldn't excl be based on nlh flags??
1530 * Aha! this is anti-netlink really i.e more pfkey derived
1531 * in netlink excl is a flag and you wouldnt need
1532 * a type XFRM_MSG_UPDPOLICY - JHS */
1533 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1534 err = xfrm_policy_insert(p->dir, xp, excl);
1535 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
1536
1537 if (err) {
1538 security_xfrm_policy_free(xp->security);
1539 kfree(xp);
1540 return err;
1541 }
1542
1543 c.event = nlh->nlmsg_type;
1544 c.seq = nlh->nlmsg_seq;
1545 c.portid = nlh->nlmsg_pid;
1546 km_policy_notify(xp, p->dir, &c);
1547
1548 xfrm_pol_put(xp);
1549
1550 return 0;
1551 }
1552
1553 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1554 {
1555 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1556 int i;
1557
1558 if (xp->xfrm_nr == 0)
1559 return 0;
1560
1561 for (i = 0; i < xp->xfrm_nr; i++) {
1562 struct xfrm_user_tmpl *up = &vec[i];
1563 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1564
1565 memset(up, 0, sizeof(*up));
1566 memcpy(&up->id, &kp->id, sizeof(up->id));
1567 up->family = kp->encap_family;
1568 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1569 up->reqid = kp->reqid;
1570 up->mode = kp->mode;
1571 up->share = kp->share;
1572 up->optional = kp->optional;
1573 up->aalgos = kp->aalgos;
1574 up->ealgos = kp->ealgos;
1575 up->calgos = kp->calgos;
1576 }
1577
1578 return nla_put(skb, XFRMA_TMPL,
1579 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1580 }
1581
1582 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1583 {
1584 if (x->security) {
1585 return copy_sec_ctx(x->security, skb);
1586 }
1587 return 0;
1588 }
1589
1590 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1591 {
1592 if (xp->security)
1593 return copy_sec_ctx(xp->security, skb);
1594 return 0;
1595 }
1596 static inline size_t userpolicy_type_attrsize(void)
1597 {
1598 #ifdef CONFIG_XFRM_SUB_POLICY
1599 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1600 #else
1601 return 0;
1602 #endif
1603 }
1604
1605 #ifdef CONFIG_XFRM_SUB_POLICY
1606 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1607 {
1608 struct xfrm_userpolicy_type upt = {
1609 .type = type,
1610 };
1611
1612 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1613 }
1614
1615 #else
1616 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1617 {
1618 return 0;
1619 }
1620 #endif
1621
1622 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1623 {
1624 struct xfrm_dump_info *sp = ptr;
1625 struct xfrm_userpolicy_info *p;
1626 struct sk_buff *in_skb = sp->in_skb;
1627 struct sk_buff *skb = sp->out_skb;
1628 struct nlmsghdr *nlh;
1629 int err;
1630
1631 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1632 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1633 if (nlh == NULL)
1634 return -EMSGSIZE;
1635
1636 p = nlmsg_data(nlh);
1637 copy_to_user_policy(xp, p, dir);
1638 err = copy_to_user_tmpl(xp, skb);
1639 if (!err)
1640 err = copy_to_user_sec_ctx(xp, skb);
1641 if (!err)
1642 err = copy_to_user_policy_type(xp->type, skb);
1643 if (!err)
1644 err = xfrm_mark_put(skb, &xp->mark);
1645 if (err) {
1646 nlmsg_cancel(skb, nlh);
1647 return err;
1648 }
1649 nlmsg_end(skb, nlh);
1650 return 0;
1651 }
1652
1653 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1654 {
1655 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1656 struct net *net = sock_net(cb->skb->sk);
1657
1658 xfrm_policy_walk_done(walk, net);
1659 return 0;
1660 }
1661
1662 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1663 {
1664 struct net *net = sock_net(skb->sk);
1665 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1666 struct xfrm_dump_info info;
1667
1668 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1669 sizeof(cb->args) - sizeof(cb->args[0]));
1670
1671 info.in_skb = cb->skb;
1672 info.out_skb = skb;
1673 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1674 info.nlmsg_flags = NLM_F_MULTI;
1675
1676 if (!cb->args[0]) {
1677 cb->args[0] = 1;
1678 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1679 }
1680
1681 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1682
1683 return skb->len;
1684 }
1685
1686 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1687 struct xfrm_policy *xp,
1688 int dir, u32 seq)
1689 {
1690 struct xfrm_dump_info info;
1691 struct sk_buff *skb;
1692 int err;
1693
1694 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1695 if (!skb)
1696 return ERR_PTR(-ENOMEM);
1697
1698 info.in_skb = in_skb;
1699 info.out_skb = skb;
1700 info.nlmsg_seq = seq;
1701 info.nlmsg_flags = 0;
1702
1703 err = dump_one_policy(xp, dir, 0, &info);
1704 if (err) {
1705 kfree_skb(skb);
1706 return ERR_PTR(err);
1707 }
1708
1709 return skb;
1710 }
1711
1712 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1713 struct nlattr **attrs)
1714 {
1715 struct net *net = sock_net(skb->sk);
1716 struct xfrm_policy *xp;
1717 struct xfrm_userpolicy_id *p;
1718 u8 type = XFRM_POLICY_TYPE_MAIN;
1719 int err;
1720 struct km_event c;
1721 int delete;
1722 struct xfrm_mark m;
1723 u32 mark = xfrm_mark_get(attrs, &m);
1724
1725 p = nlmsg_data(nlh);
1726 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1727
1728 err = copy_from_user_policy_type(&type, attrs);
1729 if (err)
1730 return err;
1731
1732 err = verify_policy_dir(p->dir);
1733 if (err)
1734 return err;
1735
1736 if (p->index)
1737 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1738 else {
1739 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1740 struct xfrm_sec_ctx *ctx;
1741
1742 err = verify_sec_ctx_len(attrs);
1743 if (err)
1744 return err;
1745
1746 ctx = NULL;
1747 if (rt) {
1748 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1749
1750 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
1751 if (err)
1752 return err;
1753 }
1754 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1755 ctx, delete, &err);
1756 security_xfrm_policy_free(ctx);
1757 }
1758 if (xp == NULL)
1759 return -ENOENT;
1760
1761 if (!delete) {
1762 struct sk_buff *resp_skb;
1763
1764 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1765 if (IS_ERR(resp_skb)) {
1766 err = PTR_ERR(resp_skb);
1767 } else {
1768 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1769 NETLINK_CB(skb).portid);
1770 }
1771 } else {
1772 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
1773
1774 if (err != 0)
1775 goto out;
1776
1777 c.data.byid = p->index;
1778 c.event = nlh->nlmsg_type;
1779 c.seq = nlh->nlmsg_seq;
1780 c.portid = nlh->nlmsg_pid;
1781 km_policy_notify(xp, p->dir, &c);
1782 }
1783
1784 out:
1785 xfrm_pol_put(xp);
1786 if (delete && err == 0)
1787 xfrm_garbage_collect(net);
1788 return err;
1789 }
1790
1791 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1792 struct nlattr **attrs)
1793 {
1794 struct net *net = sock_net(skb->sk);
1795 struct km_event c;
1796 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1797 int err;
1798
1799 err = xfrm_state_flush(net, p->proto, true);
1800 if (err) {
1801 if (err == -ESRCH) /* empty table */
1802 return 0;
1803 return err;
1804 }
1805 c.data.proto = p->proto;
1806 c.event = nlh->nlmsg_type;
1807 c.seq = nlh->nlmsg_seq;
1808 c.portid = nlh->nlmsg_pid;
1809 c.net = net;
1810 km_state_notify(NULL, &c);
1811
1812 return 0;
1813 }
1814
1815 static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
1816 {
1817 size_t replay_size = x->replay_esn ?
1818 xfrm_replay_state_esn_len(x->replay_esn) :
1819 sizeof(struct xfrm_replay_state);
1820
1821 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1822 + nla_total_size(replay_size)
1823 + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
1824 + nla_total_size(sizeof(struct xfrm_mark))
1825 + nla_total_size(4) /* XFRM_AE_RTHR */
1826 + nla_total_size(4); /* XFRM_AE_ETHR */
1827 }
1828
1829 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
1830 {
1831 struct xfrm_aevent_id *id;
1832 struct nlmsghdr *nlh;
1833 int err;
1834
1835 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1836 if (nlh == NULL)
1837 return -EMSGSIZE;
1838
1839 id = nlmsg_data(nlh);
1840 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
1841 id->sa_id.spi = x->id.spi;
1842 id->sa_id.family = x->props.family;
1843 id->sa_id.proto = x->id.proto;
1844 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
1845 id->reqid = x->props.reqid;
1846 id->flags = c->data.aevent;
1847
1848 if (x->replay_esn) {
1849 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1850 xfrm_replay_state_esn_len(x->replay_esn),
1851 x->replay_esn);
1852 } else {
1853 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1854 &x->replay);
1855 }
1856 if (err)
1857 goto out_cancel;
1858 err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
1859 XFRMA_PAD);
1860 if (err)
1861 goto out_cancel;
1862
1863 if (id->flags & XFRM_AE_RTHR) {
1864 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1865 if (err)
1866 goto out_cancel;
1867 }
1868 if (id->flags & XFRM_AE_ETHR) {
1869 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1870 x->replay_maxage * 10 / HZ);
1871 if (err)
1872 goto out_cancel;
1873 }
1874 err = xfrm_mark_put(skb, &x->mark);
1875 if (err)
1876 goto out_cancel;
1877
1878 nlmsg_end(skb, nlh);
1879 return 0;
1880
1881 out_cancel:
1882 nlmsg_cancel(skb, nlh);
1883 return err;
1884 }
1885
1886 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1887 struct nlattr **attrs)
1888 {
1889 struct net *net = sock_net(skb->sk);
1890 struct xfrm_state *x;
1891 struct sk_buff *r_skb;
1892 int err;
1893 struct km_event c;
1894 u32 mark;
1895 struct xfrm_mark m;
1896 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1897 struct xfrm_usersa_id *id = &p->sa_id;
1898
1899 mark = xfrm_mark_get(attrs, &m);
1900
1901 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1902 if (x == NULL)
1903 return -ESRCH;
1904
1905 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
1906 if (r_skb == NULL) {
1907 xfrm_state_put(x);
1908 return -ENOMEM;
1909 }
1910
1911 /*
1912 * XXX: is this lock really needed - none of the other
1913 * gets lock (the concern is things getting updated
1914 * while we are still reading) - jhs
1915 */
1916 spin_lock_bh(&x->lock);
1917 c.data.aevent = p->flags;
1918 c.seq = nlh->nlmsg_seq;
1919 c.portid = nlh->nlmsg_pid;
1920
1921 if (build_aevent(r_skb, x, &c) < 0)
1922 BUG();
1923 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
1924 spin_unlock_bh(&x->lock);
1925 xfrm_state_put(x);
1926 return err;
1927 }
1928
1929 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1930 struct nlattr **attrs)
1931 {
1932 struct net *net = sock_net(skb->sk);
1933 struct xfrm_state *x;
1934 struct km_event c;
1935 int err = -EINVAL;
1936 u32 mark = 0;
1937 struct xfrm_mark m;
1938 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1939 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1940 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
1941 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1942 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
1943 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
1944
1945 if (!lt && !rp && !re && !et && !rt)
1946 return err;
1947
1948 /* pedantic mode - thou shalt sayeth replaceth */
1949 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1950 return err;
1951
1952 mark = xfrm_mark_get(attrs, &m);
1953
1954 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1955 if (x == NULL)
1956 return -ESRCH;
1957
1958 if (x->km.state != XFRM_STATE_VALID)
1959 goto out;
1960
1961 err = xfrm_replay_verify_len(x->replay_esn, re);
1962 if (err)
1963 goto out;
1964
1965 spin_lock_bh(&x->lock);
1966 xfrm_update_ae_params(x, attrs, 1);
1967 spin_unlock_bh(&x->lock);
1968
1969 c.event = nlh->nlmsg_type;
1970 c.seq = nlh->nlmsg_seq;
1971 c.portid = nlh->nlmsg_pid;
1972 c.data.aevent = XFRM_AE_CU;
1973 km_state_notify(x, &c);
1974 err = 0;
1975 out:
1976 xfrm_state_put(x);
1977 return err;
1978 }
1979
1980 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1981 struct nlattr **attrs)
1982 {
1983 struct net *net = sock_net(skb->sk);
1984 struct km_event c;
1985 u8 type = XFRM_POLICY_TYPE_MAIN;
1986 int err;
1987
1988 err = copy_from_user_policy_type(&type, attrs);
1989 if (err)
1990 return err;
1991
1992 err = xfrm_policy_flush(net, type, true);
1993 if (err) {
1994 if (err == -ESRCH) /* empty table */
1995 return 0;
1996 return err;
1997 }
1998
1999 c.data.type = type;
2000 c.event = nlh->nlmsg_type;
2001 c.seq = nlh->nlmsg_seq;
2002 c.portid = nlh->nlmsg_pid;
2003 c.net = net;
2004 km_policy_notify(NULL, 0, &c);
2005 return 0;
2006 }
2007
2008 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2009 struct nlattr **attrs)
2010 {
2011 struct net *net = sock_net(skb->sk);
2012 struct xfrm_policy *xp;
2013 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
2014 struct xfrm_userpolicy_info *p = &up->pol;
2015 u8 type = XFRM_POLICY_TYPE_MAIN;
2016 int err = -ENOENT;
2017 struct xfrm_mark m;
2018 u32 mark = xfrm_mark_get(attrs, &m);
2019
2020 err = copy_from_user_policy_type(&type, attrs);
2021 if (err)
2022 return err;
2023
2024 err = verify_policy_dir(p->dir);
2025 if (err)
2026 return err;
2027
2028 if (p->index)
2029 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
2030 else {
2031 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2032 struct xfrm_sec_ctx *ctx;
2033
2034 err = verify_sec_ctx_len(attrs);
2035 if (err)
2036 return err;
2037
2038 ctx = NULL;
2039 if (rt) {
2040 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2041
2042 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2043 if (err)
2044 return err;
2045 }
2046 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
2047 &p->sel, ctx, 0, &err);
2048 security_xfrm_policy_free(ctx);
2049 }
2050 if (xp == NULL)
2051 return -ENOENT;
2052
2053 if (unlikely(xp->walk.dead))
2054 goto out;
2055
2056 err = 0;
2057 if (up->hard) {
2058 xfrm_policy_delete(xp, p->dir);
2059 xfrm_audit_policy_delete(xp, 1, true);
2060 }
2061 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2062
2063 out:
2064 xfrm_pol_put(xp);
2065 return err;
2066 }
2067
2068 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2069 struct nlattr **attrs)
2070 {
2071 struct net *net = sock_net(skb->sk);
2072 struct xfrm_state *x;
2073 int err;
2074 struct xfrm_user_expire *ue = nlmsg_data(nlh);
2075 struct xfrm_usersa_info *p = &ue->state;
2076 struct xfrm_mark m;
2077 u32 mark = xfrm_mark_get(attrs, &m);
2078
2079 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
2080
2081 err = -ENOENT;
2082 if (x == NULL)
2083 return err;
2084
2085 spin_lock_bh(&x->lock);
2086 err = -EINVAL;
2087 if (x->km.state != XFRM_STATE_VALID)
2088 goto out;
2089 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2090
2091 if (ue->hard) {
2092 __xfrm_state_delete(x);
2093 xfrm_audit_state_delete(x, 1, true);
2094 }
2095 err = 0;
2096 out:
2097 spin_unlock_bh(&x->lock);
2098 xfrm_state_put(x);
2099 return err;
2100 }
2101
2102 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2103 struct nlattr **attrs)
2104 {
2105 struct net *net = sock_net(skb->sk);
2106 struct xfrm_policy *xp;
2107 struct xfrm_user_tmpl *ut;
2108 int i;
2109 struct nlattr *rt = attrs[XFRMA_TMPL];
2110 struct xfrm_mark mark;
2111
2112 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2113 struct xfrm_state *x = xfrm_state_alloc(net);
2114 int err = -ENOMEM;
2115
2116 if (!x)
2117 goto nomem;
2118
2119 xfrm_mark_get(attrs, &mark);
2120
2121 err = verify_newpolicy_info(&ua->policy);
2122 if (err)
2123 goto free_state;
2124
2125 /* build an XP */
2126 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
2127 if (!xp)
2128 goto free_state;
2129
2130 memcpy(&x->id, &ua->id, sizeof(ua->id));
2131 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2132 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2133 xp->mark.m = x->mark.m = mark.m;
2134 xp->mark.v = x->mark.v = mark.v;
2135 ut = nla_data(rt);
2136 /* extract the templates and for each call km_key */
2137 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2138 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2139 memcpy(&x->id, &t->id, sizeof(x->id));
2140 x->props.mode = t->mode;
2141 x->props.reqid = t->reqid;
2142 x->props.family = ut->family;
2143 t->aalgos = ua->aalgos;
2144 t->ealgos = ua->ealgos;
2145 t->calgos = ua->calgos;
2146 err = km_query(x, t, xp);
2147
2148 }
2149
2150 kfree(x);
2151 kfree(xp);
2152
2153 return 0;
2154
2155 free_state:
2156 kfree(x);
2157 nomem:
2158 return err;
2159 }
2160
2161 #ifdef CONFIG_XFRM_MIGRATE
2162 static int copy_from_user_migrate(struct xfrm_migrate *ma,
2163 struct xfrm_kmaddress *k,
2164 struct nlattr **attrs, int *num)
2165 {
2166 struct nlattr *rt = attrs[XFRMA_MIGRATE];
2167 struct xfrm_user_migrate *um;
2168 int i, num_migrate;
2169
2170 if (k != NULL) {
2171 struct xfrm_user_kmaddress *uk;
2172
2173 uk = nla_data(attrs[XFRMA_KMADDRESS]);
2174 memcpy(&k->local, &uk->local, sizeof(k->local));
2175 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2176 k->family = uk->family;
2177 k->reserved = uk->reserved;
2178 }
2179
2180 um = nla_data(rt);
2181 num_migrate = nla_len(rt) / sizeof(*um);
2182
2183 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
2184 return -EINVAL;
2185
2186 for (i = 0; i < num_migrate; i++, um++, ma++) {
2187 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2188 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2189 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2190 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2191
2192 ma->proto = um->proto;
2193 ma->mode = um->mode;
2194 ma->reqid = um->reqid;
2195
2196 ma->old_family = um->old_family;
2197 ma->new_family = um->new_family;
2198 }
2199
2200 *num = i;
2201 return 0;
2202 }
2203
2204 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2205 struct nlattr **attrs)
2206 {
2207 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2208 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2209 struct xfrm_kmaddress km, *kmp;
2210 u8 type;
2211 int err;
2212 int n = 0;
2213 struct net *net = sock_net(skb->sk);
2214
2215 if (attrs[XFRMA_MIGRATE] == NULL)
2216 return -EINVAL;
2217
2218 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2219
2220 err = copy_from_user_policy_type(&type, attrs);
2221 if (err)
2222 return err;
2223
2224 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
2225 if (err)
2226 return err;
2227
2228 if (!n)
2229 return 0;
2230
2231 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net);
2232
2233 return 0;
2234 }
2235 #else
2236 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2237 struct nlattr **attrs)
2238 {
2239 return -ENOPROTOOPT;
2240 }
2241 #endif
2242
2243 #ifdef CONFIG_XFRM_MIGRATE
2244 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2245 {
2246 struct xfrm_user_migrate um;
2247
2248 memset(&um, 0, sizeof(um));
2249 um.proto = m->proto;
2250 um.mode = m->mode;
2251 um.reqid = m->reqid;
2252 um.old_family = m->old_family;
2253 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2254 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2255 um.new_family = m->new_family;
2256 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2257 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2258
2259 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2260 }
2261
2262 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2263 {
2264 struct xfrm_user_kmaddress uk;
2265
2266 memset(&uk, 0, sizeof(uk));
2267 uk.family = k->family;
2268 uk.reserved = k->reserved;
2269 memcpy(&uk.local, &k->local, sizeof(uk.local));
2270 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2271
2272 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2273 }
2274
2275 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
2276 {
2277 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2278 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2279 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2280 + userpolicy_type_attrsize();
2281 }
2282
2283 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2284 int num_migrate, const struct xfrm_kmaddress *k,
2285 const struct xfrm_selector *sel, u8 dir, u8 type)
2286 {
2287 const struct xfrm_migrate *mp;
2288 struct xfrm_userpolicy_id *pol_id;
2289 struct nlmsghdr *nlh;
2290 int i, err;
2291
2292 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2293 if (nlh == NULL)
2294 return -EMSGSIZE;
2295
2296 pol_id = nlmsg_data(nlh);
2297 /* copy data from selector, dir, and type to the pol_id */
2298 memset(pol_id, 0, sizeof(*pol_id));
2299 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2300 pol_id->dir = dir;
2301
2302 if (k != NULL) {
2303 err = copy_to_user_kmaddress(k, skb);
2304 if (err)
2305 goto out_cancel;
2306 }
2307 err = copy_to_user_policy_type(type, skb);
2308 if (err)
2309 goto out_cancel;
2310 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2311 err = copy_to_user_migrate(mp, skb);
2312 if (err)
2313 goto out_cancel;
2314 }
2315
2316 nlmsg_end(skb, nlh);
2317 return 0;
2318
2319 out_cancel:
2320 nlmsg_cancel(skb, nlh);
2321 return err;
2322 }
2323
2324 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2325 const struct xfrm_migrate *m, int num_migrate,
2326 const struct xfrm_kmaddress *k)
2327 {
2328 struct net *net = &init_net;
2329 struct sk_buff *skb;
2330
2331 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
2332 if (skb == NULL)
2333 return -ENOMEM;
2334
2335 /* build migrate */
2336 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2337 BUG();
2338
2339 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
2340 }
2341 #else
2342 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2343 const struct xfrm_migrate *m, int num_migrate,
2344 const struct xfrm_kmaddress *k)
2345 {
2346 return -ENOPROTOOPT;
2347 }
2348 #endif
2349
2350 #define XMSGSIZE(type) sizeof(struct type)
2351
2352 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2353 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2354 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2355 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2356 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2357 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2358 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2359 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2360 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2361 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2362 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2363 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2364 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2365 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2366 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2367 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2368 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2369 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2370 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2371 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2372 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2373 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2374 };
2375
2376 #undef XMSGSIZE
2377
2378 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2379 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2380 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2381 [XFRMA_LASTUSED] = { .type = NLA_U64},
2382 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2383 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2384 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2385 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2386 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2387 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2388 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2389 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2390 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2391 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2392 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2393 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2394 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2395 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2396 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2397 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2398 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2399 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2400 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2401 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
2402 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
2403 [XFRMA_PROTO] = { .type = NLA_U8 },
2404 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
2405 };
2406
2407 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
2408 [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
2409 [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
2410 };
2411
2412 static const struct xfrm_link {
2413 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2414 int (*dump)(struct sk_buff *, struct netlink_callback *);
2415 int (*done)(struct netlink_callback *);
2416 const struct nla_policy *nla_pol;
2417 int nla_max;
2418 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2419 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2420 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2421 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2422 .dump = xfrm_dump_sa,
2423 .done = xfrm_dump_sa_done },
2424 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2425 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2426 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2427 .dump = xfrm_dump_policy,
2428 .done = xfrm_dump_policy_done },
2429 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2430 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2431 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2432 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2433 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2434 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2435 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2436 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2437 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2438 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2439 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2440 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2441 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
2442 .nla_pol = xfrma_spd_policy,
2443 .nla_max = XFRMA_SPD_MAX },
2444 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2445 };
2446
2447 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2448 {
2449 struct net *net = sock_net(skb->sk);
2450 struct nlattr *attrs[XFRMA_MAX+1];
2451 const struct xfrm_link *link;
2452 int type, err;
2453
2454 #ifdef CONFIG_COMPAT
2455 if (in_compat_syscall())
2456 return -EOPNOTSUPP;
2457 #endif
2458
2459 type = nlh->nlmsg_type;
2460 if (type > XFRM_MSG_MAX)
2461 return -EINVAL;
2462
2463 type -= XFRM_MSG_BASE;
2464 link = &xfrm_dispatch[type];
2465
2466 /* All operations require privileges, even GET */
2467 if (!netlink_net_capable(skb, CAP_NET_ADMIN))
2468 return -EPERM;
2469
2470 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2471 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2472 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2473 if (link->dump == NULL)
2474 return -EINVAL;
2475
2476 {
2477 struct netlink_dump_control c = {
2478 .dump = link->dump,
2479 .done = link->done,
2480 };
2481 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2482 }
2483 }
2484
2485 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs,
2486 link->nla_max ? : XFRMA_MAX,
2487 link->nla_pol ? : xfrma_policy);
2488 if (err < 0)
2489 return err;
2490
2491 if (link->doit == NULL)
2492 return -EINVAL;
2493
2494 return link->doit(skb, nlh, attrs);
2495 }
2496
2497 static void xfrm_netlink_rcv(struct sk_buff *skb)
2498 {
2499 struct net *net = sock_net(skb->sk);
2500
2501 mutex_lock(&net->xfrm.xfrm_cfg_mutex);
2502 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2503 mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
2504 }
2505
2506 static inline size_t xfrm_expire_msgsize(void)
2507 {
2508 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2509 + nla_total_size(sizeof(struct xfrm_mark));
2510 }
2511
2512 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2513 {
2514 struct xfrm_user_expire *ue;
2515 struct nlmsghdr *nlh;
2516 int err;
2517
2518 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2519 if (nlh == NULL)
2520 return -EMSGSIZE;
2521
2522 ue = nlmsg_data(nlh);
2523 copy_to_user_state(x, &ue->state);
2524 ue->hard = (c->data.hard != 0) ? 1 : 0;
2525
2526 err = xfrm_mark_put(skb, &x->mark);
2527 if (err)
2528 return err;
2529
2530 nlmsg_end(skb, nlh);
2531 return 0;
2532 }
2533
2534 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2535 {
2536 struct net *net = xs_net(x);
2537 struct sk_buff *skb;
2538
2539 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2540 if (skb == NULL)
2541 return -ENOMEM;
2542
2543 if (build_expire(skb, x, c) < 0) {
2544 kfree_skb(skb);
2545 return -EMSGSIZE;
2546 }
2547
2548 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2549 }
2550
2551 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
2552 {
2553 struct net *net = xs_net(x);
2554 struct sk_buff *skb;
2555
2556 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2557 if (skb == NULL)
2558 return -ENOMEM;
2559
2560 if (build_aevent(skb, x, c) < 0)
2561 BUG();
2562
2563 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
2564 }
2565
2566 static int xfrm_notify_sa_flush(const struct km_event *c)
2567 {
2568 struct net *net = c->net;
2569 struct xfrm_usersa_flush *p;
2570 struct nlmsghdr *nlh;
2571 struct sk_buff *skb;
2572 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2573
2574 skb = nlmsg_new(len, GFP_ATOMIC);
2575 if (skb == NULL)
2576 return -ENOMEM;
2577
2578 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2579 if (nlh == NULL) {
2580 kfree_skb(skb);
2581 return -EMSGSIZE;
2582 }
2583
2584 p = nlmsg_data(nlh);
2585 p->proto = c->data.proto;
2586
2587 nlmsg_end(skb, nlh);
2588
2589 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
2590 }
2591
2592 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2593 {
2594 size_t l = 0;
2595 if (x->aead)
2596 l += nla_total_size(aead_len(x->aead));
2597 if (x->aalg) {
2598 l += nla_total_size(sizeof(struct xfrm_algo) +
2599 (x->aalg->alg_key_len + 7) / 8);
2600 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2601 }
2602 if (x->ealg)
2603 l += nla_total_size(xfrm_alg_len(x->ealg));
2604 if (x->calg)
2605 l += nla_total_size(sizeof(*x->calg));
2606 if (x->encap)
2607 l += nla_total_size(sizeof(*x->encap));
2608 if (x->tfcpad)
2609 l += nla_total_size(sizeof(x->tfcpad));
2610 if (x->replay_esn)
2611 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
2612 else
2613 l += nla_total_size(sizeof(struct xfrm_replay_state));
2614 if (x->security)
2615 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2616 x->security->ctx_len);
2617 if (x->coaddr)
2618 l += nla_total_size(sizeof(*x->coaddr));
2619 if (x->props.extra_flags)
2620 l += nla_total_size(sizeof(x->props.extra_flags));
2621
2622 /* Must count x->lastused as it may become non-zero behind our back. */
2623 l += nla_total_size_64bit(sizeof(u64));
2624
2625 return l;
2626 }
2627
2628 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2629 {
2630 struct net *net = xs_net(x);
2631 struct xfrm_usersa_info *p;
2632 struct xfrm_usersa_id *id;
2633 struct nlmsghdr *nlh;
2634 struct sk_buff *skb;
2635 int len = xfrm_sa_len(x);
2636 int headlen, err;
2637
2638 headlen = sizeof(*p);
2639 if (c->event == XFRM_MSG_DELSA) {
2640 len += nla_total_size(headlen);
2641 headlen = sizeof(*id);
2642 len += nla_total_size(sizeof(struct xfrm_mark));
2643 }
2644 len += NLMSG_ALIGN(headlen);
2645
2646 skb = nlmsg_new(len, GFP_ATOMIC);
2647 if (skb == NULL)
2648 return -ENOMEM;
2649
2650 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2651 err = -EMSGSIZE;
2652 if (nlh == NULL)
2653 goto out_free_skb;
2654
2655 p = nlmsg_data(nlh);
2656 if (c->event == XFRM_MSG_DELSA) {
2657 struct nlattr *attr;
2658
2659 id = nlmsg_data(nlh);
2660 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2661 id->spi = x->id.spi;
2662 id->family = x->props.family;
2663 id->proto = x->id.proto;
2664
2665 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2666 err = -EMSGSIZE;
2667 if (attr == NULL)
2668 goto out_free_skb;
2669
2670 p = nla_data(attr);
2671 }
2672 err = copy_to_user_state_extra(x, p, skb);
2673 if (err)
2674 goto out_free_skb;
2675
2676 nlmsg_end(skb, nlh);
2677
2678 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
2679
2680 out_free_skb:
2681 kfree_skb(skb);
2682 return err;
2683 }
2684
2685 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
2686 {
2687
2688 switch (c->event) {
2689 case XFRM_MSG_EXPIRE:
2690 return xfrm_exp_state_notify(x, c);
2691 case XFRM_MSG_NEWAE:
2692 return xfrm_aevent_state_notify(x, c);
2693 case XFRM_MSG_DELSA:
2694 case XFRM_MSG_UPDSA:
2695 case XFRM_MSG_NEWSA:
2696 return xfrm_notify_sa(x, c);
2697 case XFRM_MSG_FLUSHSA:
2698 return xfrm_notify_sa_flush(c);
2699 default:
2700 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
2701 c->event);
2702 break;
2703 }
2704
2705 return 0;
2706
2707 }
2708
2709 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2710 struct xfrm_policy *xp)
2711 {
2712 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2713 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2714 + nla_total_size(sizeof(struct xfrm_mark))
2715 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2716 + userpolicy_type_attrsize();
2717 }
2718
2719 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2720 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
2721 {
2722 __u32 seq = xfrm_get_acqseq();
2723 struct xfrm_user_acquire *ua;
2724 struct nlmsghdr *nlh;
2725 int err;
2726
2727 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2728 if (nlh == NULL)
2729 return -EMSGSIZE;
2730
2731 ua = nlmsg_data(nlh);
2732 memcpy(&ua->id, &x->id, sizeof(ua->id));
2733 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2734 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2735 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
2736 ua->aalgos = xt->aalgos;
2737 ua->ealgos = xt->ealgos;
2738 ua->calgos = xt->calgos;
2739 ua->seq = x->km.seq = seq;
2740
2741 err = copy_to_user_tmpl(xp, skb);
2742 if (!err)
2743 err = copy_to_user_state_sec_ctx(x, skb);
2744 if (!err)
2745 err = copy_to_user_policy_type(xp->type, skb);
2746 if (!err)
2747 err = xfrm_mark_put(skb, &xp->mark);
2748 if (err) {
2749 nlmsg_cancel(skb, nlh);
2750 return err;
2751 }
2752
2753 nlmsg_end(skb, nlh);
2754 return 0;
2755 }
2756
2757 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2758 struct xfrm_policy *xp)
2759 {
2760 struct net *net = xs_net(x);
2761 struct sk_buff *skb;
2762
2763 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2764 if (skb == NULL)
2765 return -ENOMEM;
2766
2767 if (build_acquire(skb, x, xt, xp) < 0)
2768 BUG();
2769
2770 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
2771 }
2772
2773 /* User gives us xfrm_user_policy_info followed by an array of 0
2774 * or more templates.
2775 */
2776 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2777 u8 *data, int len, int *dir)
2778 {
2779 struct net *net = sock_net(sk);
2780 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2781 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2782 struct xfrm_policy *xp;
2783 int nr;
2784
2785 switch (sk->sk_family) {
2786 case AF_INET:
2787 if (opt != IP_XFRM_POLICY) {
2788 *dir = -EOPNOTSUPP;
2789 return NULL;
2790 }
2791 break;
2792 #if IS_ENABLED(CONFIG_IPV6)
2793 case AF_INET6:
2794 if (opt != IPV6_XFRM_POLICY) {
2795 *dir = -EOPNOTSUPP;
2796 return NULL;
2797 }
2798 break;
2799 #endif
2800 default:
2801 *dir = -EINVAL;
2802 return NULL;
2803 }
2804
2805 *dir = -EINVAL;
2806
2807 if (len < sizeof(*p) ||
2808 verify_newpolicy_info(p))
2809 return NULL;
2810
2811 nr = ((len - sizeof(*p)) / sizeof(*ut));
2812 if (validate_tmpl(nr, ut, p->sel.family))
2813 return NULL;
2814
2815 if (p->dir > XFRM_POLICY_OUT)
2816 return NULL;
2817
2818 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2819 if (xp == NULL) {
2820 *dir = -ENOBUFS;
2821 return NULL;
2822 }
2823
2824 copy_from_user_policy(xp, p);
2825 xp->type = XFRM_POLICY_TYPE_MAIN;
2826 copy_templates(xp, ut, nr);
2827
2828 *dir = p->dir;
2829
2830 return xp;
2831 }
2832
2833 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2834 {
2835 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2836 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2837 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2838 + nla_total_size(sizeof(struct xfrm_mark))
2839 + userpolicy_type_attrsize();
2840 }
2841
2842 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2843 int dir, const struct km_event *c)
2844 {
2845 struct xfrm_user_polexpire *upe;
2846 int hard = c->data.hard;
2847 struct nlmsghdr *nlh;
2848 int err;
2849
2850 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2851 if (nlh == NULL)
2852 return -EMSGSIZE;
2853
2854 upe = nlmsg_data(nlh);
2855 copy_to_user_policy(xp, &upe->pol, dir);
2856 err = copy_to_user_tmpl(xp, skb);
2857 if (!err)
2858 err = copy_to_user_sec_ctx(xp, skb);
2859 if (!err)
2860 err = copy_to_user_policy_type(xp->type, skb);
2861 if (!err)
2862 err = xfrm_mark_put(skb, &xp->mark);
2863 if (err) {
2864 nlmsg_cancel(skb, nlh);
2865 return err;
2866 }
2867 upe->hard = !!hard;
2868
2869 nlmsg_end(skb, nlh);
2870 return 0;
2871 }
2872
2873 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2874 {
2875 struct net *net = xp_net(xp);
2876 struct sk_buff *skb;
2877
2878 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2879 if (skb == NULL)
2880 return -ENOMEM;
2881
2882 if (build_polexpire(skb, xp, dir, c) < 0)
2883 BUG();
2884
2885 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2886 }
2887
2888 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2889 {
2890 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2891 struct net *net = xp_net(xp);
2892 struct xfrm_userpolicy_info *p;
2893 struct xfrm_userpolicy_id *id;
2894 struct nlmsghdr *nlh;
2895 struct sk_buff *skb;
2896 int headlen, err;
2897
2898 headlen = sizeof(*p);
2899 if (c->event == XFRM_MSG_DELPOLICY) {
2900 len += nla_total_size(headlen);
2901 headlen = sizeof(*id);
2902 }
2903 len += userpolicy_type_attrsize();
2904 len += nla_total_size(sizeof(struct xfrm_mark));
2905 len += NLMSG_ALIGN(headlen);
2906
2907 skb = nlmsg_new(len, GFP_ATOMIC);
2908 if (skb == NULL)
2909 return -ENOMEM;
2910
2911 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2912 err = -EMSGSIZE;
2913 if (nlh == NULL)
2914 goto out_free_skb;
2915
2916 p = nlmsg_data(nlh);
2917 if (c->event == XFRM_MSG_DELPOLICY) {
2918 struct nlattr *attr;
2919
2920 id = nlmsg_data(nlh);
2921 memset(id, 0, sizeof(*id));
2922 id->dir = dir;
2923 if (c->data.byid)
2924 id->index = xp->index;
2925 else
2926 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2927
2928 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2929 err = -EMSGSIZE;
2930 if (attr == NULL)
2931 goto out_free_skb;
2932
2933 p = nla_data(attr);
2934 }
2935
2936 copy_to_user_policy(xp, p, dir);
2937 err = copy_to_user_tmpl(xp, skb);
2938 if (!err)
2939 err = copy_to_user_policy_type(xp->type, skb);
2940 if (!err)
2941 err = xfrm_mark_put(skb, &xp->mark);
2942 if (err)
2943 goto out_free_skb;
2944
2945 nlmsg_end(skb, nlh);
2946
2947 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2948
2949 out_free_skb:
2950 kfree_skb(skb);
2951 return err;
2952 }
2953
2954 static int xfrm_notify_policy_flush(const struct km_event *c)
2955 {
2956 struct net *net = c->net;
2957 struct nlmsghdr *nlh;
2958 struct sk_buff *skb;
2959 int err;
2960
2961 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2962 if (skb == NULL)
2963 return -ENOMEM;
2964
2965 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2966 err = -EMSGSIZE;
2967 if (nlh == NULL)
2968 goto out_free_skb;
2969 err = copy_to_user_policy_type(c->data.type, skb);
2970 if (err)
2971 goto out_free_skb;
2972
2973 nlmsg_end(skb, nlh);
2974
2975 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2976
2977 out_free_skb:
2978 kfree_skb(skb);
2979 return err;
2980 }
2981
2982 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2983 {
2984
2985 switch (c->event) {
2986 case XFRM_MSG_NEWPOLICY:
2987 case XFRM_MSG_UPDPOLICY:
2988 case XFRM_MSG_DELPOLICY:
2989 return xfrm_notify_policy(xp, dir, c);
2990 case XFRM_MSG_FLUSHPOLICY:
2991 return xfrm_notify_policy_flush(c);
2992 case XFRM_MSG_POLEXPIRE:
2993 return xfrm_exp_policy_notify(xp, dir, c);
2994 default:
2995 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
2996 c->event);
2997 }
2998
2999 return 0;
3000
3001 }
3002
3003 static inline size_t xfrm_report_msgsize(void)
3004 {
3005 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
3006 }
3007
3008 static int build_report(struct sk_buff *skb, u8 proto,
3009 struct xfrm_selector *sel, xfrm_address_t *addr)
3010 {
3011 struct xfrm_user_report *ur;
3012 struct nlmsghdr *nlh;
3013
3014 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
3015 if (nlh == NULL)
3016 return -EMSGSIZE;
3017
3018 ur = nlmsg_data(nlh);
3019 ur->proto = proto;
3020 memcpy(&ur->sel, sel, sizeof(ur->sel));
3021
3022 if (addr) {
3023 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
3024 if (err) {
3025 nlmsg_cancel(skb, nlh);
3026 return err;
3027 }
3028 }
3029 nlmsg_end(skb, nlh);
3030 return 0;
3031 }
3032
3033 static int xfrm_send_report(struct net *net, u8 proto,
3034 struct xfrm_selector *sel, xfrm_address_t *addr)
3035 {
3036 struct sk_buff *skb;
3037
3038 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
3039 if (skb == NULL)
3040 return -ENOMEM;
3041
3042 if (build_report(skb, proto, sel, addr) < 0)
3043 BUG();
3044
3045 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
3046 }
3047
3048 static inline size_t xfrm_mapping_msgsize(void)
3049 {
3050 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
3051 }
3052
3053 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
3054 xfrm_address_t *new_saddr, __be16 new_sport)
3055 {
3056 struct xfrm_user_mapping *um;
3057 struct nlmsghdr *nlh;
3058
3059 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
3060 if (nlh == NULL)
3061 return -EMSGSIZE;
3062
3063 um = nlmsg_data(nlh);
3064
3065 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
3066 um->id.spi = x->id.spi;
3067 um->id.family = x->props.family;
3068 um->id.proto = x->id.proto;
3069 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
3070 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
3071 um->new_sport = new_sport;
3072 um->old_sport = x->encap->encap_sport;
3073 um->reqid = x->props.reqid;
3074
3075 nlmsg_end(skb, nlh);
3076 return 0;
3077 }
3078
3079 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3080 __be16 sport)
3081 {
3082 struct net *net = xs_net(x);
3083 struct sk_buff *skb;
3084
3085 if (x->id.proto != IPPROTO_ESP)
3086 return -EINVAL;
3087
3088 if (!x->encap)
3089 return -EINVAL;
3090
3091 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
3092 if (skb == NULL)
3093 return -ENOMEM;
3094
3095 if (build_mapping(skb, x, ipaddr, sport) < 0)
3096 BUG();
3097
3098 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
3099 }
3100
3101 static bool xfrm_is_alive(const struct km_event *c)
3102 {
3103 return (bool)xfrm_acquire_is_on(c->net);
3104 }
3105
3106 static struct xfrm_mgr netlink_mgr = {
3107 .id = "netlink",
3108 .notify = xfrm_send_state_notify,
3109 .acquire = xfrm_send_acquire,
3110 .compile_policy = xfrm_compile_policy,
3111 .notify_policy = xfrm_send_policy_notify,
3112 .report = xfrm_send_report,
3113 .migrate = xfrm_send_migrate,
3114 .new_mapping = xfrm_send_mapping,
3115 .is_alive = xfrm_is_alive,
3116 };
3117
3118 static int __net_init xfrm_user_net_init(struct net *net)
3119 {
3120 struct sock *nlsk;
3121 struct netlink_kernel_cfg cfg = {
3122 .groups = XFRMNLGRP_MAX,
3123 .input = xfrm_netlink_rcv,
3124 };
3125
3126 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
3127 if (nlsk == NULL)
3128 return -ENOMEM;
3129 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
3130 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
3131 return 0;
3132 }
3133
3134 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
3135 {
3136 struct net *net;
3137 list_for_each_entry(net, net_exit_list, exit_list)
3138 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
3139 synchronize_net();
3140 list_for_each_entry(net, net_exit_list, exit_list)
3141 netlink_kernel_release(net->xfrm.nlsk_stash);
3142 }
3143
3144 static struct pernet_operations xfrm_user_net_ops = {
3145 .init = xfrm_user_net_init,
3146 .exit_batch = xfrm_user_net_exit,
3147 };
3148
3149 static int __init xfrm_user_init(void)
3150 {
3151 int rv;
3152
3153 printk(KERN_INFO "Initializing XFRM netlink socket\n");
3154
3155 rv = register_pernet_subsys(&xfrm_user_net_ops);
3156 if (rv < 0)
3157 return rv;
3158 rv = xfrm_register_km(&netlink_mgr);
3159 if (rv < 0)
3160 unregister_pernet_subsys(&xfrm_user_net_ops);
3161 return rv;
3162 }
3163
3164 static void __exit xfrm_user_exit(void)
3165 {
3166 xfrm_unregister_km(&netlink_mgr);
3167 unregister_pernet_subsys(&xfrm_user_net_ops);
3168 }
3169
3170 module_init(xfrm_user_init);
3171 module_exit(xfrm_user_exit);
3172 MODULE_LICENSE("GPL");
3173 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
3174