]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/xfrm/xfrm_user.c
Merge branch 'next-devicetree' of git://git.secretlab.ca/git/linux-2.6
[mirror_ubuntu-artful-kernel.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <asm/uaccess.h>
30 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31 #include <linux/in6.h>
32 #endif
33
34 static inline int aead_len(struct xfrm_algo_aead *alg)
35 {
36 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
37 }
38
39 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
40 {
41 struct nlattr *rt = attrs[type];
42 struct xfrm_algo *algp;
43
44 if (!rt)
45 return 0;
46
47 algp = nla_data(rt);
48 if (nla_len(rt) < xfrm_alg_len(algp))
49 return -EINVAL;
50
51 switch (type) {
52 case XFRMA_ALG_AUTH:
53 case XFRMA_ALG_CRYPT:
54 case XFRMA_ALG_COMP:
55 break;
56
57 default:
58 return -EINVAL;
59 }
60
61 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
62 return 0;
63 }
64
65 static int verify_auth_trunc(struct nlattr **attrs)
66 {
67 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
68 struct xfrm_algo_auth *algp;
69
70 if (!rt)
71 return 0;
72
73 algp = nla_data(rt);
74 if (nla_len(rt) < xfrm_alg_auth_len(algp))
75 return -EINVAL;
76
77 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
78 return 0;
79 }
80
81 static int verify_aead(struct nlattr **attrs)
82 {
83 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
84 struct xfrm_algo_aead *algp;
85
86 if (!rt)
87 return 0;
88
89 algp = nla_data(rt);
90 if (nla_len(rt) < aead_len(algp))
91 return -EINVAL;
92
93 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
94 return 0;
95 }
96
97 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
98 xfrm_address_t **addrp)
99 {
100 struct nlattr *rt = attrs[type];
101
102 if (rt && addrp)
103 *addrp = nla_data(rt);
104 }
105
106 static inline int verify_sec_ctx_len(struct nlattr **attrs)
107 {
108 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
109 struct xfrm_user_sec_ctx *uctx;
110
111 if (!rt)
112 return 0;
113
114 uctx = nla_data(rt);
115 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
116 return -EINVAL;
117
118 return 0;
119 }
120
121
122 static int verify_newsa_info(struct xfrm_usersa_info *p,
123 struct nlattr **attrs)
124 {
125 int err;
126
127 err = -EINVAL;
128 switch (p->family) {
129 case AF_INET:
130 break;
131
132 case AF_INET6:
133 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
134 break;
135 #else
136 err = -EAFNOSUPPORT;
137 goto out;
138 #endif
139
140 default:
141 goto out;
142 }
143
144 err = -EINVAL;
145 switch (p->id.proto) {
146 case IPPROTO_AH:
147 if ((!attrs[XFRMA_ALG_AUTH] &&
148 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
149 attrs[XFRMA_ALG_AEAD] ||
150 attrs[XFRMA_ALG_CRYPT] ||
151 attrs[XFRMA_ALG_COMP] ||
152 attrs[XFRMA_TFCPAD])
153 goto out;
154 break;
155
156 case IPPROTO_ESP:
157 if (attrs[XFRMA_ALG_COMP])
158 goto out;
159 if (!attrs[XFRMA_ALG_AUTH] &&
160 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
161 !attrs[XFRMA_ALG_CRYPT] &&
162 !attrs[XFRMA_ALG_AEAD])
163 goto out;
164 if ((attrs[XFRMA_ALG_AUTH] ||
165 attrs[XFRMA_ALG_AUTH_TRUNC] ||
166 attrs[XFRMA_ALG_CRYPT]) &&
167 attrs[XFRMA_ALG_AEAD])
168 goto out;
169 if (attrs[XFRMA_TFCPAD] &&
170 p->mode != XFRM_MODE_TUNNEL)
171 goto out;
172 break;
173
174 case IPPROTO_COMP:
175 if (!attrs[XFRMA_ALG_COMP] ||
176 attrs[XFRMA_ALG_AEAD] ||
177 attrs[XFRMA_ALG_AUTH] ||
178 attrs[XFRMA_ALG_AUTH_TRUNC] ||
179 attrs[XFRMA_ALG_CRYPT] ||
180 attrs[XFRMA_TFCPAD])
181 goto out;
182 break;
183
184 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
185 case IPPROTO_DSTOPTS:
186 case IPPROTO_ROUTING:
187 if (attrs[XFRMA_ALG_COMP] ||
188 attrs[XFRMA_ALG_AUTH] ||
189 attrs[XFRMA_ALG_AUTH_TRUNC] ||
190 attrs[XFRMA_ALG_AEAD] ||
191 attrs[XFRMA_ALG_CRYPT] ||
192 attrs[XFRMA_ENCAP] ||
193 attrs[XFRMA_SEC_CTX] ||
194 attrs[XFRMA_TFCPAD] ||
195 !attrs[XFRMA_COADDR])
196 goto out;
197 break;
198 #endif
199
200 default:
201 goto out;
202 }
203
204 if ((err = verify_aead(attrs)))
205 goto out;
206 if ((err = verify_auth_trunc(attrs)))
207 goto out;
208 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
209 goto out;
210 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
211 goto out;
212 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
213 goto out;
214 if ((err = verify_sec_ctx_len(attrs)))
215 goto out;
216
217 err = -EINVAL;
218 switch (p->mode) {
219 case XFRM_MODE_TRANSPORT:
220 case XFRM_MODE_TUNNEL:
221 case XFRM_MODE_ROUTEOPTIMIZATION:
222 case XFRM_MODE_BEET:
223 break;
224
225 default:
226 goto out;
227 }
228
229 err = 0;
230
231 out:
232 return err;
233 }
234
235 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
236 struct xfrm_algo_desc *(*get_byname)(char *, int),
237 struct nlattr *rta)
238 {
239 struct xfrm_algo *p, *ualg;
240 struct xfrm_algo_desc *algo;
241
242 if (!rta)
243 return 0;
244
245 ualg = nla_data(rta);
246
247 algo = get_byname(ualg->alg_name, 1);
248 if (!algo)
249 return -ENOSYS;
250 *props = algo->desc.sadb_alg_id;
251
252 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
253 if (!p)
254 return -ENOMEM;
255
256 strcpy(p->alg_name, algo->name);
257 *algpp = p;
258 return 0;
259 }
260
261 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
262 struct nlattr *rta)
263 {
264 struct xfrm_algo *ualg;
265 struct xfrm_algo_auth *p;
266 struct xfrm_algo_desc *algo;
267
268 if (!rta)
269 return 0;
270
271 ualg = nla_data(rta);
272
273 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
274 if (!algo)
275 return -ENOSYS;
276 *props = algo->desc.sadb_alg_id;
277
278 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
279 if (!p)
280 return -ENOMEM;
281
282 strcpy(p->alg_name, algo->name);
283 p->alg_key_len = ualg->alg_key_len;
284 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
285 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
286
287 *algpp = p;
288 return 0;
289 }
290
291 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
292 struct nlattr *rta)
293 {
294 struct xfrm_algo_auth *p, *ualg;
295 struct xfrm_algo_desc *algo;
296
297 if (!rta)
298 return 0;
299
300 ualg = nla_data(rta);
301
302 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
303 if (!algo)
304 return -ENOSYS;
305 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
306 return -EINVAL;
307 *props = algo->desc.sadb_alg_id;
308
309 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
310 if (!p)
311 return -ENOMEM;
312
313 strcpy(p->alg_name, algo->name);
314 if (!p->alg_trunc_len)
315 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
316
317 *algpp = p;
318 return 0;
319 }
320
321 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
322 struct nlattr *rta)
323 {
324 struct xfrm_algo_aead *p, *ualg;
325 struct xfrm_algo_desc *algo;
326
327 if (!rta)
328 return 0;
329
330 ualg = nla_data(rta);
331
332 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
333 if (!algo)
334 return -ENOSYS;
335 *props = algo->desc.sadb_alg_id;
336
337 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
338 if (!p)
339 return -ENOMEM;
340
341 strcpy(p->alg_name, algo->name);
342 *algpp = p;
343 return 0;
344 }
345
346 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
347 {
348 int len = 0;
349
350 if (xfrm_ctx) {
351 len += sizeof(struct xfrm_user_sec_ctx);
352 len += xfrm_ctx->ctx_len;
353 }
354 return len;
355 }
356
357 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
358 {
359 memcpy(&x->id, &p->id, sizeof(x->id));
360 memcpy(&x->sel, &p->sel, sizeof(x->sel));
361 memcpy(&x->lft, &p->lft, sizeof(x->lft));
362 x->props.mode = p->mode;
363 x->props.replay_window = p->replay_window;
364 x->props.reqid = p->reqid;
365 x->props.family = p->family;
366 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
367 x->props.flags = p->flags;
368
369 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
370 x->sel.family = p->family;
371 }
372
373 /*
374 * someday when pfkey also has support, we could have the code
375 * somehow made shareable and move it to xfrm_state.c - JHS
376 *
377 */
378 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
379 {
380 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
381 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
382 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
383 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
384
385 if (rp) {
386 struct xfrm_replay_state *replay;
387 replay = nla_data(rp);
388 memcpy(&x->replay, replay, sizeof(*replay));
389 memcpy(&x->preplay, replay, sizeof(*replay));
390 }
391
392 if (lt) {
393 struct xfrm_lifetime_cur *ltime;
394 ltime = nla_data(lt);
395 x->curlft.bytes = ltime->bytes;
396 x->curlft.packets = ltime->packets;
397 x->curlft.add_time = ltime->add_time;
398 x->curlft.use_time = ltime->use_time;
399 }
400
401 if (et)
402 x->replay_maxage = nla_get_u32(et);
403
404 if (rt)
405 x->replay_maxdiff = nla_get_u32(rt);
406 }
407
408 static struct xfrm_state *xfrm_state_construct(struct net *net,
409 struct xfrm_usersa_info *p,
410 struct nlattr **attrs,
411 int *errp)
412 {
413 struct xfrm_state *x = xfrm_state_alloc(net);
414 int err = -ENOMEM;
415
416 if (!x)
417 goto error_no_put;
418
419 copy_from_user_state(x, p);
420
421 if ((err = attach_aead(&x->aead, &x->props.ealgo,
422 attrs[XFRMA_ALG_AEAD])))
423 goto error;
424 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
425 attrs[XFRMA_ALG_AUTH_TRUNC])))
426 goto error;
427 if (!x->props.aalgo) {
428 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
429 attrs[XFRMA_ALG_AUTH])))
430 goto error;
431 }
432 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
433 xfrm_ealg_get_byname,
434 attrs[XFRMA_ALG_CRYPT])))
435 goto error;
436 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
437 xfrm_calg_get_byname,
438 attrs[XFRMA_ALG_COMP])))
439 goto error;
440
441 if (attrs[XFRMA_ENCAP]) {
442 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
443 sizeof(*x->encap), GFP_KERNEL);
444 if (x->encap == NULL)
445 goto error;
446 }
447
448 if (attrs[XFRMA_TFCPAD])
449 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
450
451 if (attrs[XFRMA_COADDR]) {
452 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
453 sizeof(*x->coaddr), GFP_KERNEL);
454 if (x->coaddr == NULL)
455 goto error;
456 }
457
458 xfrm_mark_get(attrs, &x->mark);
459
460 err = xfrm_init_state(x);
461 if (err)
462 goto error;
463
464 if (attrs[XFRMA_SEC_CTX] &&
465 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
466 goto error;
467
468 x->km.seq = p->seq;
469 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
470 /* sysctl_xfrm_aevent_etime is in 100ms units */
471 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
472 x->preplay.bitmap = 0;
473 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
474 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
475
476 /* override default values from above */
477
478 xfrm_update_ae_params(x, attrs);
479
480 return x;
481
482 error:
483 x->km.state = XFRM_STATE_DEAD;
484 xfrm_state_put(x);
485 error_no_put:
486 *errp = err;
487 return NULL;
488 }
489
490 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
491 struct nlattr **attrs)
492 {
493 struct net *net = sock_net(skb->sk);
494 struct xfrm_usersa_info *p = nlmsg_data(nlh);
495 struct xfrm_state *x;
496 int err;
497 struct km_event c;
498 uid_t loginuid = NETLINK_CB(skb).loginuid;
499 u32 sessionid = NETLINK_CB(skb).sessionid;
500 u32 sid = NETLINK_CB(skb).sid;
501
502 err = verify_newsa_info(p, attrs);
503 if (err)
504 return err;
505
506 x = xfrm_state_construct(net, p, attrs, &err);
507 if (!x)
508 return err;
509
510 xfrm_state_hold(x);
511 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
512 err = xfrm_state_add(x);
513 else
514 err = xfrm_state_update(x);
515
516 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
517
518 if (err < 0) {
519 x->km.state = XFRM_STATE_DEAD;
520 __xfrm_state_put(x);
521 goto out;
522 }
523
524 c.seq = nlh->nlmsg_seq;
525 c.pid = nlh->nlmsg_pid;
526 c.event = nlh->nlmsg_type;
527
528 km_state_notify(x, &c);
529 out:
530 xfrm_state_put(x);
531 return err;
532 }
533
534 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
535 struct xfrm_usersa_id *p,
536 struct nlattr **attrs,
537 int *errp)
538 {
539 struct xfrm_state *x = NULL;
540 struct xfrm_mark m;
541 int err;
542 u32 mark = xfrm_mark_get(attrs, &m);
543
544 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
545 err = -ESRCH;
546 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
547 } else {
548 xfrm_address_t *saddr = NULL;
549
550 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
551 if (!saddr) {
552 err = -EINVAL;
553 goto out;
554 }
555
556 err = -ESRCH;
557 x = xfrm_state_lookup_byaddr(net, mark,
558 &p->daddr, saddr,
559 p->proto, p->family);
560 }
561
562 out:
563 if (!x && errp)
564 *errp = err;
565 return x;
566 }
567
568 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
569 struct nlattr **attrs)
570 {
571 struct net *net = sock_net(skb->sk);
572 struct xfrm_state *x;
573 int err = -ESRCH;
574 struct km_event c;
575 struct xfrm_usersa_id *p = nlmsg_data(nlh);
576 uid_t loginuid = NETLINK_CB(skb).loginuid;
577 u32 sessionid = NETLINK_CB(skb).sessionid;
578 u32 sid = NETLINK_CB(skb).sid;
579
580 x = xfrm_user_state_lookup(net, p, attrs, &err);
581 if (x == NULL)
582 return err;
583
584 if ((err = security_xfrm_state_delete(x)) != 0)
585 goto out;
586
587 if (xfrm_state_kern(x)) {
588 err = -EPERM;
589 goto out;
590 }
591
592 err = xfrm_state_delete(x);
593
594 if (err < 0)
595 goto out;
596
597 c.seq = nlh->nlmsg_seq;
598 c.pid = nlh->nlmsg_pid;
599 c.event = nlh->nlmsg_type;
600 km_state_notify(x, &c);
601
602 out:
603 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
604 xfrm_state_put(x);
605 return err;
606 }
607
608 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
609 {
610 memcpy(&p->id, &x->id, sizeof(p->id));
611 memcpy(&p->sel, &x->sel, sizeof(p->sel));
612 memcpy(&p->lft, &x->lft, sizeof(p->lft));
613 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
614 memcpy(&p->stats, &x->stats, sizeof(p->stats));
615 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
616 p->mode = x->props.mode;
617 p->replay_window = x->props.replay_window;
618 p->reqid = x->props.reqid;
619 p->family = x->props.family;
620 p->flags = x->props.flags;
621 p->seq = x->km.seq;
622 }
623
624 struct xfrm_dump_info {
625 struct sk_buff *in_skb;
626 struct sk_buff *out_skb;
627 u32 nlmsg_seq;
628 u16 nlmsg_flags;
629 };
630
631 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
632 {
633 struct xfrm_user_sec_ctx *uctx;
634 struct nlattr *attr;
635 int ctx_size = sizeof(*uctx) + s->ctx_len;
636
637 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
638 if (attr == NULL)
639 return -EMSGSIZE;
640
641 uctx = nla_data(attr);
642 uctx->exttype = XFRMA_SEC_CTX;
643 uctx->len = ctx_size;
644 uctx->ctx_doi = s->ctx_doi;
645 uctx->ctx_alg = s->ctx_alg;
646 uctx->ctx_len = s->ctx_len;
647 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
648
649 return 0;
650 }
651
652 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
653 {
654 struct xfrm_algo *algo;
655 struct nlattr *nla;
656
657 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
658 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
659 if (!nla)
660 return -EMSGSIZE;
661
662 algo = nla_data(nla);
663 strcpy(algo->alg_name, auth->alg_name);
664 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
665 algo->alg_key_len = auth->alg_key_len;
666
667 return 0;
668 }
669
670 /* Don't change this without updating xfrm_sa_len! */
671 static int copy_to_user_state_extra(struct xfrm_state *x,
672 struct xfrm_usersa_info *p,
673 struct sk_buff *skb)
674 {
675 copy_to_user_state(x, p);
676
677 if (x->coaddr)
678 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
679
680 if (x->lastused)
681 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
682
683 if (x->aead)
684 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
685 if (x->aalg) {
686 if (copy_to_user_auth(x->aalg, skb))
687 goto nla_put_failure;
688
689 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
690 xfrm_alg_auth_len(x->aalg), x->aalg);
691 }
692 if (x->ealg)
693 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
694 if (x->calg)
695 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
696
697 if (x->encap)
698 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
699
700 if (x->tfcpad)
701 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
702
703 if (xfrm_mark_put(skb, &x->mark))
704 goto nla_put_failure;
705
706 if (x->security && copy_sec_ctx(x->security, skb) < 0)
707 goto nla_put_failure;
708
709 return 0;
710
711 nla_put_failure:
712 return -EMSGSIZE;
713 }
714
715 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
716 {
717 struct xfrm_dump_info *sp = ptr;
718 struct sk_buff *in_skb = sp->in_skb;
719 struct sk_buff *skb = sp->out_skb;
720 struct xfrm_usersa_info *p;
721 struct nlmsghdr *nlh;
722 int err;
723
724 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
725 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
726 if (nlh == NULL)
727 return -EMSGSIZE;
728
729 p = nlmsg_data(nlh);
730
731 err = copy_to_user_state_extra(x, p, skb);
732 if (err)
733 goto nla_put_failure;
734
735 nlmsg_end(skb, nlh);
736 return 0;
737
738 nla_put_failure:
739 nlmsg_cancel(skb, nlh);
740 return err;
741 }
742
743 static int xfrm_dump_sa_done(struct netlink_callback *cb)
744 {
745 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
746 xfrm_state_walk_done(walk);
747 return 0;
748 }
749
750 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
751 {
752 struct net *net = sock_net(skb->sk);
753 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
754 struct xfrm_dump_info info;
755
756 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
757 sizeof(cb->args) - sizeof(cb->args[0]));
758
759 info.in_skb = cb->skb;
760 info.out_skb = skb;
761 info.nlmsg_seq = cb->nlh->nlmsg_seq;
762 info.nlmsg_flags = NLM_F_MULTI;
763
764 if (!cb->args[0]) {
765 cb->args[0] = 1;
766 xfrm_state_walk_init(walk, 0);
767 }
768
769 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
770
771 return skb->len;
772 }
773
774 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
775 struct xfrm_state *x, u32 seq)
776 {
777 struct xfrm_dump_info info;
778 struct sk_buff *skb;
779
780 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
781 if (!skb)
782 return ERR_PTR(-ENOMEM);
783
784 info.in_skb = in_skb;
785 info.out_skb = skb;
786 info.nlmsg_seq = seq;
787 info.nlmsg_flags = 0;
788
789 if (dump_one_state(x, 0, &info)) {
790 kfree_skb(skb);
791 return NULL;
792 }
793
794 return skb;
795 }
796
797 static inline size_t xfrm_spdinfo_msgsize(void)
798 {
799 return NLMSG_ALIGN(4)
800 + nla_total_size(sizeof(struct xfrmu_spdinfo))
801 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
802 }
803
804 static int build_spdinfo(struct sk_buff *skb, struct net *net,
805 u32 pid, u32 seq, u32 flags)
806 {
807 struct xfrmk_spdinfo si;
808 struct xfrmu_spdinfo spc;
809 struct xfrmu_spdhinfo sph;
810 struct nlmsghdr *nlh;
811 u32 *f;
812
813 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
814 if (nlh == NULL) /* shouldnt really happen ... */
815 return -EMSGSIZE;
816
817 f = nlmsg_data(nlh);
818 *f = flags;
819 xfrm_spd_getinfo(net, &si);
820 spc.incnt = si.incnt;
821 spc.outcnt = si.outcnt;
822 spc.fwdcnt = si.fwdcnt;
823 spc.inscnt = si.inscnt;
824 spc.outscnt = si.outscnt;
825 spc.fwdscnt = si.fwdscnt;
826 sph.spdhcnt = si.spdhcnt;
827 sph.spdhmcnt = si.spdhmcnt;
828
829 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
830 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
831
832 return nlmsg_end(skb, nlh);
833
834 nla_put_failure:
835 nlmsg_cancel(skb, nlh);
836 return -EMSGSIZE;
837 }
838
839 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
840 struct nlattr **attrs)
841 {
842 struct net *net = sock_net(skb->sk);
843 struct sk_buff *r_skb;
844 u32 *flags = nlmsg_data(nlh);
845 u32 spid = NETLINK_CB(skb).pid;
846 u32 seq = nlh->nlmsg_seq;
847
848 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
849 if (r_skb == NULL)
850 return -ENOMEM;
851
852 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
853 BUG();
854
855 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
856 }
857
858 static inline size_t xfrm_sadinfo_msgsize(void)
859 {
860 return NLMSG_ALIGN(4)
861 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
862 + nla_total_size(4); /* XFRMA_SAD_CNT */
863 }
864
865 static int build_sadinfo(struct sk_buff *skb, struct net *net,
866 u32 pid, u32 seq, u32 flags)
867 {
868 struct xfrmk_sadinfo si;
869 struct xfrmu_sadhinfo sh;
870 struct nlmsghdr *nlh;
871 u32 *f;
872
873 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
874 if (nlh == NULL) /* shouldnt really happen ... */
875 return -EMSGSIZE;
876
877 f = nlmsg_data(nlh);
878 *f = flags;
879 xfrm_sad_getinfo(net, &si);
880
881 sh.sadhmcnt = si.sadhmcnt;
882 sh.sadhcnt = si.sadhcnt;
883
884 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
885 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
886
887 return nlmsg_end(skb, nlh);
888
889 nla_put_failure:
890 nlmsg_cancel(skb, nlh);
891 return -EMSGSIZE;
892 }
893
894 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
895 struct nlattr **attrs)
896 {
897 struct net *net = sock_net(skb->sk);
898 struct sk_buff *r_skb;
899 u32 *flags = nlmsg_data(nlh);
900 u32 spid = NETLINK_CB(skb).pid;
901 u32 seq = nlh->nlmsg_seq;
902
903 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
904 if (r_skb == NULL)
905 return -ENOMEM;
906
907 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
908 BUG();
909
910 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
911 }
912
913 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
914 struct nlattr **attrs)
915 {
916 struct net *net = sock_net(skb->sk);
917 struct xfrm_usersa_id *p = nlmsg_data(nlh);
918 struct xfrm_state *x;
919 struct sk_buff *resp_skb;
920 int err = -ESRCH;
921
922 x = xfrm_user_state_lookup(net, p, attrs, &err);
923 if (x == NULL)
924 goto out_noput;
925
926 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
927 if (IS_ERR(resp_skb)) {
928 err = PTR_ERR(resp_skb);
929 } else {
930 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
931 }
932 xfrm_state_put(x);
933 out_noput:
934 return err;
935 }
936
937 static int verify_userspi_info(struct xfrm_userspi_info *p)
938 {
939 switch (p->info.id.proto) {
940 case IPPROTO_AH:
941 case IPPROTO_ESP:
942 break;
943
944 case IPPROTO_COMP:
945 /* IPCOMP spi is 16-bits. */
946 if (p->max >= 0x10000)
947 return -EINVAL;
948 break;
949
950 default:
951 return -EINVAL;
952 }
953
954 if (p->min > p->max)
955 return -EINVAL;
956
957 return 0;
958 }
959
960 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
961 struct nlattr **attrs)
962 {
963 struct net *net = sock_net(skb->sk);
964 struct xfrm_state *x;
965 struct xfrm_userspi_info *p;
966 struct sk_buff *resp_skb;
967 xfrm_address_t *daddr;
968 int family;
969 int err;
970 u32 mark;
971 struct xfrm_mark m;
972
973 p = nlmsg_data(nlh);
974 err = verify_userspi_info(p);
975 if (err)
976 goto out_noput;
977
978 family = p->info.family;
979 daddr = &p->info.id.daddr;
980
981 x = NULL;
982
983 mark = xfrm_mark_get(attrs, &m);
984 if (p->info.seq) {
985 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
986 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
987 xfrm_state_put(x);
988 x = NULL;
989 }
990 }
991
992 if (!x)
993 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
994 p->info.id.proto, daddr,
995 &p->info.saddr, 1,
996 family);
997 err = -ENOENT;
998 if (x == NULL)
999 goto out_noput;
1000
1001 err = xfrm_alloc_spi(x, p->min, p->max);
1002 if (err)
1003 goto out;
1004
1005 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1006 if (IS_ERR(resp_skb)) {
1007 err = PTR_ERR(resp_skb);
1008 goto out;
1009 }
1010
1011 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
1012
1013 out:
1014 xfrm_state_put(x);
1015 out_noput:
1016 return err;
1017 }
1018
1019 static int verify_policy_dir(u8 dir)
1020 {
1021 switch (dir) {
1022 case XFRM_POLICY_IN:
1023 case XFRM_POLICY_OUT:
1024 case XFRM_POLICY_FWD:
1025 break;
1026
1027 default:
1028 return -EINVAL;
1029 }
1030
1031 return 0;
1032 }
1033
1034 static int verify_policy_type(u8 type)
1035 {
1036 switch (type) {
1037 case XFRM_POLICY_TYPE_MAIN:
1038 #ifdef CONFIG_XFRM_SUB_POLICY
1039 case XFRM_POLICY_TYPE_SUB:
1040 #endif
1041 break;
1042
1043 default:
1044 return -EINVAL;
1045 }
1046
1047 return 0;
1048 }
1049
1050 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1051 {
1052 switch (p->share) {
1053 case XFRM_SHARE_ANY:
1054 case XFRM_SHARE_SESSION:
1055 case XFRM_SHARE_USER:
1056 case XFRM_SHARE_UNIQUE:
1057 break;
1058
1059 default:
1060 return -EINVAL;
1061 }
1062
1063 switch (p->action) {
1064 case XFRM_POLICY_ALLOW:
1065 case XFRM_POLICY_BLOCK:
1066 break;
1067
1068 default:
1069 return -EINVAL;
1070 }
1071
1072 switch (p->sel.family) {
1073 case AF_INET:
1074 break;
1075
1076 case AF_INET6:
1077 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1078 break;
1079 #else
1080 return -EAFNOSUPPORT;
1081 #endif
1082
1083 default:
1084 return -EINVAL;
1085 }
1086
1087 return verify_policy_dir(p->dir);
1088 }
1089
1090 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1091 {
1092 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1093 struct xfrm_user_sec_ctx *uctx;
1094
1095 if (!rt)
1096 return 0;
1097
1098 uctx = nla_data(rt);
1099 return security_xfrm_policy_alloc(&pol->security, uctx);
1100 }
1101
1102 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1103 int nr)
1104 {
1105 int i;
1106
1107 xp->xfrm_nr = nr;
1108 for (i = 0; i < nr; i++, ut++) {
1109 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1110
1111 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1112 memcpy(&t->saddr, &ut->saddr,
1113 sizeof(xfrm_address_t));
1114 t->reqid = ut->reqid;
1115 t->mode = ut->mode;
1116 t->share = ut->share;
1117 t->optional = ut->optional;
1118 t->aalgos = ut->aalgos;
1119 t->ealgos = ut->ealgos;
1120 t->calgos = ut->calgos;
1121 /* If all masks are ~0, then we allow all algorithms. */
1122 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1123 t->encap_family = ut->family;
1124 }
1125 }
1126
1127 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1128 {
1129 int i;
1130
1131 if (nr > XFRM_MAX_DEPTH)
1132 return -EINVAL;
1133
1134 for (i = 0; i < nr; i++) {
1135 /* We never validated the ut->family value, so many
1136 * applications simply leave it at zero. The check was
1137 * never made and ut->family was ignored because all
1138 * templates could be assumed to have the same family as
1139 * the policy itself. Now that we will have ipv4-in-ipv6
1140 * and ipv6-in-ipv4 tunnels, this is no longer true.
1141 */
1142 if (!ut[i].family)
1143 ut[i].family = family;
1144
1145 switch (ut[i].family) {
1146 case AF_INET:
1147 break;
1148 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1149 case AF_INET6:
1150 break;
1151 #endif
1152 default:
1153 return -EINVAL;
1154 }
1155 }
1156
1157 return 0;
1158 }
1159
1160 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1161 {
1162 struct nlattr *rt = attrs[XFRMA_TMPL];
1163
1164 if (!rt) {
1165 pol->xfrm_nr = 0;
1166 } else {
1167 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1168 int nr = nla_len(rt) / sizeof(*utmpl);
1169 int err;
1170
1171 err = validate_tmpl(nr, utmpl, pol->family);
1172 if (err)
1173 return err;
1174
1175 copy_templates(pol, utmpl, nr);
1176 }
1177 return 0;
1178 }
1179
1180 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1181 {
1182 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1183 struct xfrm_userpolicy_type *upt;
1184 u8 type = XFRM_POLICY_TYPE_MAIN;
1185 int err;
1186
1187 if (rt) {
1188 upt = nla_data(rt);
1189 type = upt->type;
1190 }
1191
1192 err = verify_policy_type(type);
1193 if (err)
1194 return err;
1195
1196 *tp = type;
1197 return 0;
1198 }
1199
1200 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1201 {
1202 xp->priority = p->priority;
1203 xp->index = p->index;
1204 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1205 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1206 xp->action = p->action;
1207 xp->flags = p->flags;
1208 xp->family = p->sel.family;
1209 /* XXX xp->share = p->share; */
1210 }
1211
1212 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1213 {
1214 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1215 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1216 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1217 p->priority = xp->priority;
1218 p->index = xp->index;
1219 p->sel.family = xp->family;
1220 p->dir = dir;
1221 p->action = xp->action;
1222 p->flags = xp->flags;
1223 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1224 }
1225
1226 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1227 {
1228 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1229 int err;
1230
1231 if (!xp) {
1232 *errp = -ENOMEM;
1233 return NULL;
1234 }
1235
1236 copy_from_user_policy(xp, p);
1237
1238 err = copy_from_user_policy_type(&xp->type, attrs);
1239 if (err)
1240 goto error;
1241
1242 if (!(err = copy_from_user_tmpl(xp, attrs)))
1243 err = copy_from_user_sec_ctx(xp, attrs);
1244 if (err)
1245 goto error;
1246
1247 xfrm_mark_get(attrs, &xp->mark);
1248
1249 return xp;
1250 error:
1251 *errp = err;
1252 xp->walk.dead = 1;
1253 xfrm_policy_destroy(xp);
1254 return NULL;
1255 }
1256
1257 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1258 struct nlattr **attrs)
1259 {
1260 struct net *net = sock_net(skb->sk);
1261 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1262 struct xfrm_policy *xp;
1263 struct km_event c;
1264 int err;
1265 int excl;
1266 uid_t loginuid = NETLINK_CB(skb).loginuid;
1267 u32 sessionid = NETLINK_CB(skb).sessionid;
1268 u32 sid = NETLINK_CB(skb).sid;
1269
1270 err = verify_newpolicy_info(p);
1271 if (err)
1272 return err;
1273 err = verify_sec_ctx_len(attrs);
1274 if (err)
1275 return err;
1276
1277 xp = xfrm_policy_construct(net, p, attrs, &err);
1278 if (!xp)
1279 return err;
1280
1281 /* shouldnt excl be based on nlh flags??
1282 * Aha! this is anti-netlink really i.e more pfkey derived
1283 * in netlink excl is a flag and you wouldnt need
1284 * a type XFRM_MSG_UPDPOLICY - JHS */
1285 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1286 err = xfrm_policy_insert(p->dir, xp, excl);
1287 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1288
1289 if (err) {
1290 security_xfrm_policy_free(xp->security);
1291 kfree(xp);
1292 return err;
1293 }
1294
1295 c.event = nlh->nlmsg_type;
1296 c.seq = nlh->nlmsg_seq;
1297 c.pid = nlh->nlmsg_pid;
1298 km_policy_notify(xp, p->dir, &c);
1299
1300 xfrm_pol_put(xp);
1301
1302 return 0;
1303 }
1304
1305 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1306 {
1307 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1308 int i;
1309
1310 if (xp->xfrm_nr == 0)
1311 return 0;
1312
1313 for (i = 0; i < xp->xfrm_nr; i++) {
1314 struct xfrm_user_tmpl *up = &vec[i];
1315 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1316
1317 memcpy(&up->id, &kp->id, sizeof(up->id));
1318 up->family = kp->encap_family;
1319 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1320 up->reqid = kp->reqid;
1321 up->mode = kp->mode;
1322 up->share = kp->share;
1323 up->optional = kp->optional;
1324 up->aalgos = kp->aalgos;
1325 up->ealgos = kp->ealgos;
1326 up->calgos = kp->calgos;
1327 }
1328
1329 return nla_put(skb, XFRMA_TMPL,
1330 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1331 }
1332
1333 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1334 {
1335 if (x->security) {
1336 return copy_sec_ctx(x->security, skb);
1337 }
1338 return 0;
1339 }
1340
1341 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1342 {
1343 if (xp->security) {
1344 return copy_sec_ctx(xp->security, skb);
1345 }
1346 return 0;
1347 }
1348 static inline size_t userpolicy_type_attrsize(void)
1349 {
1350 #ifdef CONFIG_XFRM_SUB_POLICY
1351 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1352 #else
1353 return 0;
1354 #endif
1355 }
1356
1357 #ifdef CONFIG_XFRM_SUB_POLICY
1358 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1359 {
1360 struct xfrm_userpolicy_type upt = {
1361 .type = type,
1362 };
1363
1364 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1365 }
1366
1367 #else
1368 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1369 {
1370 return 0;
1371 }
1372 #endif
1373
1374 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1375 {
1376 struct xfrm_dump_info *sp = ptr;
1377 struct xfrm_userpolicy_info *p;
1378 struct sk_buff *in_skb = sp->in_skb;
1379 struct sk_buff *skb = sp->out_skb;
1380 struct nlmsghdr *nlh;
1381
1382 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1383 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1384 if (nlh == NULL)
1385 return -EMSGSIZE;
1386
1387 p = nlmsg_data(nlh);
1388 copy_to_user_policy(xp, p, dir);
1389 if (copy_to_user_tmpl(xp, skb) < 0)
1390 goto nlmsg_failure;
1391 if (copy_to_user_sec_ctx(xp, skb))
1392 goto nlmsg_failure;
1393 if (copy_to_user_policy_type(xp->type, skb) < 0)
1394 goto nlmsg_failure;
1395 if (xfrm_mark_put(skb, &xp->mark))
1396 goto nla_put_failure;
1397
1398 nlmsg_end(skb, nlh);
1399 return 0;
1400
1401 nla_put_failure:
1402 nlmsg_failure:
1403 nlmsg_cancel(skb, nlh);
1404 return -EMSGSIZE;
1405 }
1406
1407 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1408 {
1409 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1410
1411 xfrm_policy_walk_done(walk);
1412 return 0;
1413 }
1414
1415 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1416 {
1417 struct net *net = sock_net(skb->sk);
1418 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1419 struct xfrm_dump_info info;
1420
1421 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1422 sizeof(cb->args) - sizeof(cb->args[0]));
1423
1424 info.in_skb = cb->skb;
1425 info.out_skb = skb;
1426 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1427 info.nlmsg_flags = NLM_F_MULTI;
1428
1429 if (!cb->args[0]) {
1430 cb->args[0] = 1;
1431 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1432 }
1433
1434 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1435
1436 return skb->len;
1437 }
1438
1439 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1440 struct xfrm_policy *xp,
1441 int dir, u32 seq)
1442 {
1443 struct xfrm_dump_info info;
1444 struct sk_buff *skb;
1445
1446 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1447 if (!skb)
1448 return ERR_PTR(-ENOMEM);
1449
1450 info.in_skb = in_skb;
1451 info.out_skb = skb;
1452 info.nlmsg_seq = seq;
1453 info.nlmsg_flags = 0;
1454
1455 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1456 kfree_skb(skb);
1457 return NULL;
1458 }
1459
1460 return skb;
1461 }
1462
1463 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1464 struct nlattr **attrs)
1465 {
1466 struct net *net = sock_net(skb->sk);
1467 struct xfrm_policy *xp;
1468 struct xfrm_userpolicy_id *p;
1469 u8 type = XFRM_POLICY_TYPE_MAIN;
1470 int err;
1471 struct km_event c;
1472 int delete;
1473 struct xfrm_mark m;
1474 u32 mark = xfrm_mark_get(attrs, &m);
1475
1476 p = nlmsg_data(nlh);
1477 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1478
1479 err = copy_from_user_policy_type(&type, attrs);
1480 if (err)
1481 return err;
1482
1483 err = verify_policy_dir(p->dir);
1484 if (err)
1485 return err;
1486
1487 if (p->index)
1488 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1489 else {
1490 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1491 struct xfrm_sec_ctx *ctx;
1492
1493 err = verify_sec_ctx_len(attrs);
1494 if (err)
1495 return err;
1496
1497 ctx = NULL;
1498 if (rt) {
1499 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1500
1501 err = security_xfrm_policy_alloc(&ctx, uctx);
1502 if (err)
1503 return err;
1504 }
1505 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1506 ctx, delete, &err);
1507 security_xfrm_policy_free(ctx);
1508 }
1509 if (xp == NULL)
1510 return -ENOENT;
1511
1512 if (!delete) {
1513 struct sk_buff *resp_skb;
1514
1515 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1516 if (IS_ERR(resp_skb)) {
1517 err = PTR_ERR(resp_skb);
1518 } else {
1519 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1520 NETLINK_CB(skb).pid);
1521 }
1522 } else {
1523 uid_t loginuid = NETLINK_CB(skb).loginuid;
1524 u32 sessionid = NETLINK_CB(skb).sessionid;
1525 u32 sid = NETLINK_CB(skb).sid;
1526
1527 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1528 sid);
1529
1530 if (err != 0)
1531 goto out;
1532
1533 c.data.byid = p->index;
1534 c.event = nlh->nlmsg_type;
1535 c.seq = nlh->nlmsg_seq;
1536 c.pid = nlh->nlmsg_pid;
1537 km_policy_notify(xp, p->dir, &c);
1538 }
1539
1540 out:
1541 xfrm_pol_put(xp);
1542 return err;
1543 }
1544
1545 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1546 struct nlattr **attrs)
1547 {
1548 struct net *net = sock_net(skb->sk);
1549 struct km_event c;
1550 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1551 struct xfrm_audit audit_info;
1552 int err;
1553
1554 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1555 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1556 audit_info.secid = NETLINK_CB(skb).sid;
1557 err = xfrm_state_flush(net, p->proto, &audit_info);
1558 if (err) {
1559 if (err == -ESRCH) /* empty table */
1560 return 0;
1561 return err;
1562 }
1563 c.data.proto = p->proto;
1564 c.event = nlh->nlmsg_type;
1565 c.seq = nlh->nlmsg_seq;
1566 c.pid = nlh->nlmsg_pid;
1567 c.net = net;
1568 km_state_notify(NULL, &c);
1569
1570 return 0;
1571 }
1572
1573 static inline size_t xfrm_aevent_msgsize(void)
1574 {
1575 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1576 + nla_total_size(sizeof(struct xfrm_replay_state))
1577 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1578 + nla_total_size(sizeof(struct xfrm_mark))
1579 + nla_total_size(4) /* XFRM_AE_RTHR */
1580 + nla_total_size(4); /* XFRM_AE_ETHR */
1581 }
1582
1583 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1584 {
1585 struct xfrm_aevent_id *id;
1586 struct nlmsghdr *nlh;
1587
1588 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1589 if (nlh == NULL)
1590 return -EMSGSIZE;
1591
1592 id = nlmsg_data(nlh);
1593 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1594 id->sa_id.spi = x->id.spi;
1595 id->sa_id.family = x->props.family;
1596 id->sa_id.proto = x->id.proto;
1597 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1598 id->reqid = x->props.reqid;
1599 id->flags = c->data.aevent;
1600
1601 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1602 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1603
1604 if (id->flags & XFRM_AE_RTHR)
1605 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1606
1607 if (id->flags & XFRM_AE_ETHR)
1608 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1609 x->replay_maxage * 10 / HZ);
1610
1611 if (xfrm_mark_put(skb, &x->mark))
1612 goto nla_put_failure;
1613
1614 return nlmsg_end(skb, nlh);
1615
1616 nla_put_failure:
1617 nlmsg_cancel(skb, nlh);
1618 return -EMSGSIZE;
1619 }
1620
1621 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1622 struct nlattr **attrs)
1623 {
1624 struct net *net = sock_net(skb->sk);
1625 struct xfrm_state *x;
1626 struct sk_buff *r_skb;
1627 int err;
1628 struct km_event c;
1629 u32 mark;
1630 struct xfrm_mark m;
1631 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1632 struct xfrm_usersa_id *id = &p->sa_id;
1633
1634 r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1635 if (r_skb == NULL)
1636 return -ENOMEM;
1637
1638 mark = xfrm_mark_get(attrs, &m);
1639
1640 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1641 if (x == NULL) {
1642 kfree_skb(r_skb);
1643 return -ESRCH;
1644 }
1645
1646 /*
1647 * XXX: is this lock really needed - none of the other
1648 * gets lock (the concern is things getting updated
1649 * while we are still reading) - jhs
1650 */
1651 spin_lock_bh(&x->lock);
1652 c.data.aevent = p->flags;
1653 c.seq = nlh->nlmsg_seq;
1654 c.pid = nlh->nlmsg_pid;
1655
1656 if (build_aevent(r_skb, x, &c) < 0)
1657 BUG();
1658 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
1659 spin_unlock_bh(&x->lock);
1660 xfrm_state_put(x);
1661 return err;
1662 }
1663
1664 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1665 struct nlattr **attrs)
1666 {
1667 struct net *net = sock_net(skb->sk);
1668 struct xfrm_state *x;
1669 struct km_event c;
1670 int err = - EINVAL;
1671 u32 mark = 0;
1672 struct xfrm_mark m;
1673 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1674 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1675 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1676
1677 if (!lt && !rp)
1678 return err;
1679
1680 /* pedantic mode - thou shalt sayeth replaceth */
1681 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1682 return err;
1683
1684 mark = xfrm_mark_get(attrs, &m);
1685
1686 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1687 if (x == NULL)
1688 return -ESRCH;
1689
1690 if (x->km.state != XFRM_STATE_VALID)
1691 goto out;
1692
1693 spin_lock_bh(&x->lock);
1694 xfrm_update_ae_params(x, attrs);
1695 spin_unlock_bh(&x->lock);
1696
1697 c.event = nlh->nlmsg_type;
1698 c.seq = nlh->nlmsg_seq;
1699 c.pid = nlh->nlmsg_pid;
1700 c.data.aevent = XFRM_AE_CU;
1701 km_state_notify(x, &c);
1702 err = 0;
1703 out:
1704 xfrm_state_put(x);
1705 return err;
1706 }
1707
1708 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1709 struct nlattr **attrs)
1710 {
1711 struct net *net = sock_net(skb->sk);
1712 struct km_event c;
1713 u8 type = XFRM_POLICY_TYPE_MAIN;
1714 int err;
1715 struct xfrm_audit audit_info;
1716
1717 err = copy_from_user_policy_type(&type, attrs);
1718 if (err)
1719 return err;
1720
1721 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1722 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1723 audit_info.secid = NETLINK_CB(skb).sid;
1724 err = xfrm_policy_flush(net, type, &audit_info);
1725 if (err) {
1726 if (err == -ESRCH) /* empty table */
1727 return 0;
1728 return err;
1729 }
1730
1731 c.data.type = type;
1732 c.event = nlh->nlmsg_type;
1733 c.seq = nlh->nlmsg_seq;
1734 c.pid = nlh->nlmsg_pid;
1735 c.net = net;
1736 km_policy_notify(NULL, 0, &c);
1737 return 0;
1738 }
1739
1740 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1741 struct nlattr **attrs)
1742 {
1743 struct net *net = sock_net(skb->sk);
1744 struct xfrm_policy *xp;
1745 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1746 struct xfrm_userpolicy_info *p = &up->pol;
1747 u8 type = XFRM_POLICY_TYPE_MAIN;
1748 int err = -ENOENT;
1749 struct xfrm_mark m;
1750 u32 mark = xfrm_mark_get(attrs, &m);
1751
1752 err = copy_from_user_policy_type(&type, attrs);
1753 if (err)
1754 return err;
1755
1756 err = verify_policy_dir(p->dir);
1757 if (err)
1758 return err;
1759
1760 if (p->index)
1761 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1762 else {
1763 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1764 struct xfrm_sec_ctx *ctx;
1765
1766 err = verify_sec_ctx_len(attrs);
1767 if (err)
1768 return err;
1769
1770 ctx = NULL;
1771 if (rt) {
1772 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1773
1774 err = security_xfrm_policy_alloc(&ctx, uctx);
1775 if (err)
1776 return err;
1777 }
1778 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1779 &p->sel, ctx, 0, &err);
1780 security_xfrm_policy_free(ctx);
1781 }
1782 if (xp == NULL)
1783 return -ENOENT;
1784
1785 if (unlikely(xp->walk.dead))
1786 goto out;
1787
1788 err = 0;
1789 if (up->hard) {
1790 uid_t loginuid = NETLINK_CB(skb).loginuid;
1791 uid_t sessionid = NETLINK_CB(skb).sessionid;
1792 u32 sid = NETLINK_CB(skb).sid;
1793 xfrm_policy_delete(xp, p->dir);
1794 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1795
1796 } else {
1797 // reset the timers here?
1798 WARN(1, "Dont know what to do with soft policy expire\n");
1799 }
1800 km_policy_expired(xp, p->dir, up->hard, current->pid);
1801
1802 out:
1803 xfrm_pol_put(xp);
1804 return err;
1805 }
1806
1807 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1808 struct nlattr **attrs)
1809 {
1810 struct net *net = sock_net(skb->sk);
1811 struct xfrm_state *x;
1812 int err;
1813 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1814 struct xfrm_usersa_info *p = &ue->state;
1815 struct xfrm_mark m;
1816 u32 mark = xfrm_mark_get(attrs, &m);
1817
1818 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1819
1820 err = -ENOENT;
1821 if (x == NULL)
1822 return err;
1823
1824 spin_lock_bh(&x->lock);
1825 err = -EINVAL;
1826 if (x->km.state != XFRM_STATE_VALID)
1827 goto out;
1828 km_state_expired(x, ue->hard, current->pid);
1829
1830 if (ue->hard) {
1831 uid_t loginuid = NETLINK_CB(skb).loginuid;
1832 uid_t sessionid = NETLINK_CB(skb).sessionid;
1833 u32 sid = NETLINK_CB(skb).sid;
1834 __xfrm_state_delete(x);
1835 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1836 }
1837 err = 0;
1838 out:
1839 spin_unlock_bh(&x->lock);
1840 xfrm_state_put(x);
1841 return err;
1842 }
1843
1844 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1845 struct nlattr **attrs)
1846 {
1847 struct net *net = sock_net(skb->sk);
1848 struct xfrm_policy *xp;
1849 struct xfrm_user_tmpl *ut;
1850 int i;
1851 struct nlattr *rt = attrs[XFRMA_TMPL];
1852 struct xfrm_mark mark;
1853
1854 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1855 struct xfrm_state *x = xfrm_state_alloc(net);
1856 int err = -ENOMEM;
1857
1858 if (!x)
1859 goto nomem;
1860
1861 xfrm_mark_get(attrs, &mark);
1862
1863 err = verify_newpolicy_info(&ua->policy);
1864 if (err)
1865 goto bad_policy;
1866
1867 /* build an XP */
1868 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
1869 if (!xp)
1870 goto free_state;
1871
1872 memcpy(&x->id, &ua->id, sizeof(ua->id));
1873 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1874 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1875 xp->mark.m = x->mark.m = mark.m;
1876 xp->mark.v = x->mark.v = mark.v;
1877 ut = nla_data(rt);
1878 /* extract the templates and for each call km_key */
1879 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1880 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1881 memcpy(&x->id, &t->id, sizeof(x->id));
1882 x->props.mode = t->mode;
1883 x->props.reqid = t->reqid;
1884 x->props.family = ut->family;
1885 t->aalgos = ua->aalgos;
1886 t->ealgos = ua->ealgos;
1887 t->calgos = ua->calgos;
1888 err = km_query(x, t, xp);
1889
1890 }
1891
1892 kfree(x);
1893 kfree(xp);
1894
1895 return 0;
1896
1897 bad_policy:
1898 WARN(1, "BAD policy passed\n");
1899 free_state:
1900 kfree(x);
1901 nomem:
1902 return err;
1903 }
1904
1905 #ifdef CONFIG_XFRM_MIGRATE
1906 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1907 struct xfrm_kmaddress *k,
1908 struct nlattr **attrs, int *num)
1909 {
1910 struct nlattr *rt = attrs[XFRMA_MIGRATE];
1911 struct xfrm_user_migrate *um;
1912 int i, num_migrate;
1913
1914 if (k != NULL) {
1915 struct xfrm_user_kmaddress *uk;
1916
1917 uk = nla_data(attrs[XFRMA_KMADDRESS]);
1918 memcpy(&k->local, &uk->local, sizeof(k->local));
1919 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
1920 k->family = uk->family;
1921 k->reserved = uk->reserved;
1922 }
1923
1924 um = nla_data(rt);
1925 num_migrate = nla_len(rt) / sizeof(*um);
1926
1927 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1928 return -EINVAL;
1929
1930 for (i = 0; i < num_migrate; i++, um++, ma++) {
1931 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1932 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1933 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1934 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1935
1936 ma->proto = um->proto;
1937 ma->mode = um->mode;
1938 ma->reqid = um->reqid;
1939
1940 ma->old_family = um->old_family;
1941 ma->new_family = um->new_family;
1942 }
1943
1944 *num = i;
1945 return 0;
1946 }
1947
1948 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1949 struct nlattr **attrs)
1950 {
1951 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
1952 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1953 struct xfrm_kmaddress km, *kmp;
1954 u8 type;
1955 int err;
1956 int n = 0;
1957
1958 if (attrs[XFRMA_MIGRATE] == NULL)
1959 return -EINVAL;
1960
1961 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
1962
1963 err = copy_from_user_policy_type(&type, attrs);
1964 if (err)
1965 return err;
1966
1967 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
1968 if (err)
1969 return err;
1970
1971 if (!n)
1972 return 0;
1973
1974 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
1975
1976 return 0;
1977 }
1978 #else
1979 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1980 struct nlattr **attrs)
1981 {
1982 return -ENOPROTOOPT;
1983 }
1984 #endif
1985
1986 #ifdef CONFIG_XFRM_MIGRATE
1987 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1988 {
1989 struct xfrm_user_migrate um;
1990
1991 memset(&um, 0, sizeof(um));
1992 um.proto = m->proto;
1993 um.mode = m->mode;
1994 um.reqid = m->reqid;
1995 um.old_family = m->old_family;
1996 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1997 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1998 um.new_family = m->new_family;
1999 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2000 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2001
2002 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2003 }
2004
2005 static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
2006 {
2007 struct xfrm_user_kmaddress uk;
2008
2009 memset(&uk, 0, sizeof(uk));
2010 uk.family = k->family;
2011 uk.reserved = k->reserved;
2012 memcpy(&uk.local, &k->local, sizeof(uk.local));
2013 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2014
2015 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2016 }
2017
2018 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
2019 {
2020 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2021 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2022 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2023 + userpolicy_type_attrsize();
2024 }
2025
2026 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
2027 int num_migrate, struct xfrm_kmaddress *k,
2028 struct xfrm_selector *sel, u8 dir, u8 type)
2029 {
2030 struct xfrm_migrate *mp;
2031 struct xfrm_userpolicy_id *pol_id;
2032 struct nlmsghdr *nlh;
2033 int i;
2034
2035 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2036 if (nlh == NULL)
2037 return -EMSGSIZE;
2038
2039 pol_id = nlmsg_data(nlh);
2040 /* copy data from selector, dir, and type to the pol_id */
2041 memset(pol_id, 0, sizeof(*pol_id));
2042 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2043 pol_id->dir = dir;
2044
2045 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
2046 goto nlmsg_failure;
2047
2048 if (copy_to_user_policy_type(type, skb) < 0)
2049 goto nlmsg_failure;
2050
2051 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2052 if (copy_to_user_migrate(mp, skb) < 0)
2053 goto nlmsg_failure;
2054 }
2055
2056 return nlmsg_end(skb, nlh);
2057 nlmsg_failure:
2058 nlmsg_cancel(skb, nlh);
2059 return -EMSGSIZE;
2060 }
2061
2062 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2063 struct xfrm_migrate *m, int num_migrate,
2064 struct xfrm_kmaddress *k)
2065 {
2066 struct net *net = &init_net;
2067 struct sk_buff *skb;
2068
2069 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
2070 if (skb == NULL)
2071 return -ENOMEM;
2072
2073 /* build migrate */
2074 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2075 BUG();
2076
2077 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
2078 }
2079 #else
2080 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2081 struct xfrm_migrate *m, int num_migrate,
2082 struct xfrm_kmaddress *k)
2083 {
2084 return -ENOPROTOOPT;
2085 }
2086 #endif
2087
2088 #define XMSGSIZE(type) sizeof(struct type)
2089
2090 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2091 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2092 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2093 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2094 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2095 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2096 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2097 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2098 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2099 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2100 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2101 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2102 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2103 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2104 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2105 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2106 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2107 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2108 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2109 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2110 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2111 };
2112
2113 #undef XMSGSIZE
2114
2115 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2116 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2117 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2118 [XFRMA_LASTUSED] = { .type = NLA_U64},
2119 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2120 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2121 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2122 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2123 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2124 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2125 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2126 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2127 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2128 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2129 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2130 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2131 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2132 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2133 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2134 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2135 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2136 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2137 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2138 };
2139
2140 static struct xfrm_link {
2141 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2142 int (*dump)(struct sk_buff *, struct netlink_callback *);
2143 int (*done)(struct netlink_callback *);
2144 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2145 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2146 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2147 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2148 .dump = xfrm_dump_sa,
2149 .done = xfrm_dump_sa_done },
2150 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2151 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2152 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2153 .dump = xfrm_dump_policy,
2154 .done = xfrm_dump_policy_done },
2155 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2156 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2157 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2158 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2159 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2160 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2161 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2162 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2163 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2164 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2165 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2166 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2167 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2168 };
2169
2170 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2171 {
2172 struct net *net = sock_net(skb->sk);
2173 struct nlattr *attrs[XFRMA_MAX+1];
2174 struct xfrm_link *link;
2175 int type, err;
2176
2177 type = nlh->nlmsg_type;
2178 if (type > XFRM_MSG_MAX)
2179 return -EINVAL;
2180
2181 type -= XFRM_MSG_BASE;
2182 link = &xfrm_dispatch[type];
2183
2184 /* All operations require privileges, even GET */
2185 if (security_netlink_recv(skb, CAP_NET_ADMIN))
2186 return -EPERM;
2187
2188 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2189 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2190 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2191 if (link->dump == NULL)
2192 return -EINVAL;
2193
2194 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done);
2195 }
2196
2197 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2198 xfrma_policy);
2199 if (err < 0)
2200 return err;
2201
2202 if (link->doit == NULL)
2203 return -EINVAL;
2204
2205 return link->doit(skb, nlh, attrs);
2206 }
2207
2208 static void xfrm_netlink_rcv(struct sk_buff *skb)
2209 {
2210 mutex_lock(&xfrm_cfg_mutex);
2211 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2212 mutex_unlock(&xfrm_cfg_mutex);
2213 }
2214
2215 static inline size_t xfrm_expire_msgsize(void)
2216 {
2217 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2218 + nla_total_size(sizeof(struct xfrm_mark));
2219 }
2220
2221 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
2222 {
2223 struct xfrm_user_expire *ue;
2224 struct nlmsghdr *nlh;
2225
2226 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2227 if (nlh == NULL)
2228 return -EMSGSIZE;
2229
2230 ue = nlmsg_data(nlh);
2231 copy_to_user_state(x, &ue->state);
2232 ue->hard = (c->data.hard != 0) ? 1 : 0;
2233
2234 if (xfrm_mark_put(skb, &x->mark))
2235 goto nla_put_failure;
2236
2237 return nlmsg_end(skb, nlh);
2238
2239 nla_put_failure:
2240 return -EMSGSIZE;
2241 }
2242
2243 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2244 {
2245 struct net *net = xs_net(x);
2246 struct sk_buff *skb;
2247
2248 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2249 if (skb == NULL)
2250 return -ENOMEM;
2251
2252 if (build_expire(skb, x, c) < 0) {
2253 kfree_skb(skb);
2254 return -EMSGSIZE;
2255 }
2256
2257 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2258 }
2259
2260 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
2261 {
2262 struct net *net = xs_net(x);
2263 struct sk_buff *skb;
2264
2265 skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
2266 if (skb == NULL)
2267 return -ENOMEM;
2268
2269 if (build_aevent(skb, x, c) < 0)
2270 BUG();
2271
2272 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2273 }
2274
2275 static int xfrm_notify_sa_flush(struct km_event *c)
2276 {
2277 struct net *net = c->net;
2278 struct xfrm_usersa_flush *p;
2279 struct nlmsghdr *nlh;
2280 struct sk_buff *skb;
2281 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2282
2283 skb = nlmsg_new(len, GFP_ATOMIC);
2284 if (skb == NULL)
2285 return -ENOMEM;
2286
2287 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2288 if (nlh == NULL) {
2289 kfree_skb(skb);
2290 return -EMSGSIZE;
2291 }
2292
2293 p = nlmsg_data(nlh);
2294 p->proto = c->data.proto;
2295
2296 nlmsg_end(skb, nlh);
2297
2298 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2299 }
2300
2301 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2302 {
2303 size_t l = 0;
2304 if (x->aead)
2305 l += nla_total_size(aead_len(x->aead));
2306 if (x->aalg) {
2307 l += nla_total_size(sizeof(struct xfrm_algo) +
2308 (x->aalg->alg_key_len + 7) / 8);
2309 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2310 }
2311 if (x->ealg)
2312 l += nla_total_size(xfrm_alg_len(x->ealg));
2313 if (x->calg)
2314 l += nla_total_size(sizeof(*x->calg));
2315 if (x->encap)
2316 l += nla_total_size(sizeof(*x->encap));
2317 if (x->tfcpad)
2318 l += nla_total_size(sizeof(x->tfcpad));
2319 if (x->security)
2320 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2321 x->security->ctx_len);
2322 if (x->coaddr)
2323 l += nla_total_size(sizeof(*x->coaddr));
2324
2325 /* Must count x->lastused as it may become non-zero behind our back. */
2326 l += nla_total_size(sizeof(u64));
2327
2328 return l;
2329 }
2330
2331 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2332 {
2333 struct net *net = xs_net(x);
2334 struct xfrm_usersa_info *p;
2335 struct xfrm_usersa_id *id;
2336 struct nlmsghdr *nlh;
2337 struct sk_buff *skb;
2338 int len = xfrm_sa_len(x);
2339 int headlen;
2340
2341 headlen = sizeof(*p);
2342 if (c->event == XFRM_MSG_DELSA) {
2343 len += nla_total_size(headlen);
2344 headlen = sizeof(*id);
2345 len += nla_total_size(sizeof(struct xfrm_mark));
2346 }
2347 len += NLMSG_ALIGN(headlen);
2348
2349 skb = nlmsg_new(len, GFP_ATOMIC);
2350 if (skb == NULL)
2351 return -ENOMEM;
2352
2353 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2354 if (nlh == NULL)
2355 goto nla_put_failure;
2356
2357 p = nlmsg_data(nlh);
2358 if (c->event == XFRM_MSG_DELSA) {
2359 struct nlattr *attr;
2360
2361 id = nlmsg_data(nlh);
2362 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2363 id->spi = x->id.spi;
2364 id->family = x->props.family;
2365 id->proto = x->id.proto;
2366
2367 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2368 if (attr == NULL)
2369 goto nla_put_failure;
2370
2371 p = nla_data(attr);
2372 }
2373
2374 if (copy_to_user_state_extra(x, p, skb))
2375 goto nla_put_failure;
2376
2377 nlmsg_end(skb, nlh);
2378
2379 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2380
2381 nla_put_failure:
2382 /* Somebody screwed up with xfrm_sa_len! */
2383 WARN_ON(1);
2384 kfree_skb(skb);
2385 return -1;
2386 }
2387
2388 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2389 {
2390
2391 switch (c->event) {
2392 case XFRM_MSG_EXPIRE:
2393 return xfrm_exp_state_notify(x, c);
2394 case XFRM_MSG_NEWAE:
2395 return xfrm_aevent_state_notify(x, c);
2396 case XFRM_MSG_DELSA:
2397 case XFRM_MSG_UPDSA:
2398 case XFRM_MSG_NEWSA:
2399 return xfrm_notify_sa(x, c);
2400 case XFRM_MSG_FLUSHSA:
2401 return xfrm_notify_sa_flush(c);
2402 default:
2403 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
2404 c->event);
2405 break;
2406 }
2407
2408 return 0;
2409
2410 }
2411
2412 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2413 struct xfrm_policy *xp)
2414 {
2415 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2416 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2417 + nla_total_size(sizeof(struct xfrm_mark))
2418 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2419 + userpolicy_type_attrsize();
2420 }
2421
2422 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2423 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2424 int dir)
2425 {
2426 struct xfrm_user_acquire *ua;
2427 struct nlmsghdr *nlh;
2428 __u32 seq = xfrm_get_acqseq();
2429
2430 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2431 if (nlh == NULL)
2432 return -EMSGSIZE;
2433
2434 ua = nlmsg_data(nlh);
2435 memcpy(&ua->id, &x->id, sizeof(ua->id));
2436 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2437 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2438 copy_to_user_policy(xp, &ua->policy, dir);
2439 ua->aalgos = xt->aalgos;
2440 ua->ealgos = xt->ealgos;
2441 ua->calgos = xt->calgos;
2442 ua->seq = x->km.seq = seq;
2443
2444 if (copy_to_user_tmpl(xp, skb) < 0)
2445 goto nlmsg_failure;
2446 if (copy_to_user_state_sec_ctx(x, skb))
2447 goto nlmsg_failure;
2448 if (copy_to_user_policy_type(xp->type, skb) < 0)
2449 goto nlmsg_failure;
2450 if (xfrm_mark_put(skb, &xp->mark))
2451 goto nla_put_failure;
2452
2453 return nlmsg_end(skb, nlh);
2454
2455 nla_put_failure:
2456 nlmsg_failure:
2457 nlmsg_cancel(skb, nlh);
2458 return -EMSGSIZE;
2459 }
2460
2461 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2462 struct xfrm_policy *xp, int dir)
2463 {
2464 struct net *net = xs_net(x);
2465 struct sk_buff *skb;
2466
2467 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2468 if (skb == NULL)
2469 return -ENOMEM;
2470
2471 if (build_acquire(skb, x, xt, xp, dir) < 0)
2472 BUG();
2473
2474 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2475 }
2476
2477 /* User gives us xfrm_user_policy_info followed by an array of 0
2478 * or more templates.
2479 */
2480 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2481 u8 *data, int len, int *dir)
2482 {
2483 struct net *net = sock_net(sk);
2484 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2485 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2486 struct xfrm_policy *xp;
2487 int nr;
2488
2489 switch (sk->sk_family) {
2490 case AF_INET:
2491 if (opt != IP_XFRM_POLICY) {
2492 *dir = -EOPNOTSUPP;
2493 return NULL;
2494 }
2495 break;
2496 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2497 case AF_INET6:
2498 if (opt != IPV6_XFRM_POLICY) {
2499 *dir = -EOPNOTSUPP;
2500 return NULL;
2501 }
2502 break;
2503 #endif
2504 default:
2505 *dir = -EINVAL;
2506 return NULL;
2507 }
2508
2509 *dir = -EINVAL;
2510
2511 if (len < sizeof(*p) ||
2512 verify_newpolicy_info(p))
2513 return NULL;
2514
2515 nr = ((len - sizeof(*p)) / sizeof(*ut));
2516 if (validate_tmpl(nr, ut, p->sel.family))
2517 return NULL;
2518
2519 if (p->dir > XFRM_POLICY_OUT)
2520 return NULL;
2521
2522 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2523 if (xp == NULL) {
2524 *dir = -ENOBUFS;
2525 return NULL;
2526 }
2527
2528 copy_from_user_policy(xp, p);
2529 xp->type = XFRM_POLICY_TYPE_MAIN;
2530 copy_templates(xp, ut, nr);
2531
2532 *dir = p->dir;
2533
2534 return xp;
2535 }
2536
2537 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2538 {
2539 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2540 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2541 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2542 + nla_total_size(sizeof(struct xfrm_mark))
2543 + userpolicy_type_attrsize();
2544 }
2545
2546 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2547 int dir, struct km_event *c)
2548 {
2549 struct xfrm_user_polexpire *upe;
2550 struct nlmsghdr *nlh;
2551 int hard = c->data.hard;
2552
2553 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2554 if (nlh == NULL)
2555 return -EMSGSIZE;
2556
2557 upe = nlmsg_data(nlh);
2558 copy_to_user_policy(xp, &upe->pol, dir);
2559 if (copy_to_user_tmpl(xp, skb) < 0)
2560 goto nlmsg_failure;
2561 if (copy_to_user_sec_ctx(xp, skb))
2562 goto nlmsg_failure;
2563 if (copy_to_user_policy_type(xp->type, skb) < 0)
2564 goto nlmsg_failure;
2565 if (xfrm_mark_put(skb, &xp->mark))
2566 goto nla_put_failure;
2567 upe->hard = !!hard;
2568
2569 return nlmsg_end(skb, nlh);
2570
2571 nla_put_failure:
2572 nlmsg_failure:
2573 nlmsg_cancel(skb, nlh);
2574 return -EMSGSIZE;
2575 }
2576
2577 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2578 {
2579 struct net *net = xp_net(xp);
2580 struct sk_buff *skb;
2581
2582 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2583 if (skb == NULL)
2584 return -ENOMEM;
2585
2586 if (build_polexpire(skb, xp, dir, c) < 0)
2587 BUG();
2588
2589 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2590 }
2591
2592 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2593 {
2594 struct net *net = xp_net(xp);
2595 struct xfrm_userpolicy_info *p;
2596 struct xfrm_userpolicy_id *id;
2597 struct nlmsghdr *nlh;
2598 struct sk_buff *skb;
2599 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2600 int headlen;
2601
2602 headlen = sizeof(*p);
2603 if (c->event == XFRM_MSG_DELPOLICY) {
2604 len += nla_total_size(headlen);
2605 headlen = sizeof(*id);
2606 }
2607 len += userpolicy_type_attrsize();
2608 len += nla_total_size(sizeof(struct xfrm_mark));
2609 len += NLMSG_ALIGN(headlen);
2610
2611 skb = nlmsg_new(len, GFP_ATOMIC);
2612 if (skb == NULL)
2613 return -ENOMEM;
2614
2615 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2616 if (nlh == NULL)
2617 goto nlmsg_failure;
2618
2619 p = nlmsg_data(nlh);
2620 if (c->event == XFRM_MSG_DELPOLICY) {
2621 struct nlattr *attr;
2622
2623 id = nlmsg_data(nlh);
2624 memset(id, 0, sizeof(*id));
2625 id->dir = dir;
2626 if (c->data.byid)
2627 id->index = xp->index;
2628 else
2629 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2630
2631 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2632 if (attr == NULL)
2633 goto nlmsg_failure;
2634
2635 p = nla_data(attr);
2636 }
2637
2638 copy_to_user_policy(xp, p, dir);
2639 if (copy_to_user_tmpl(xp, skb) < 0)
2640 goto nlmsg_failure;
2641 if (copy_to_user_policy_type(xp->type, skb) < 0)
2642 goto nlmsg_failure;
2643
2644 if (xfrm_mark_put(skb, &xp->mark))
2645 goto nla_put_failure;
2646
2647 nlmsg_end(skb, nlh);
2648
2649 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2650
2651 nla_put_failure:
2652 nlmsg_failure:
2653 kfree_skb(skb);
2654 return -1;
2655 }
2656
2657 static int xfrm_notify_policy_flush(struct km_event *c)
2658 {
2659 struct net *net = c->net;
2660 struct nlmsghdr *nlh;
2661 struct sk_buff *skb;
2662
2663 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2664 if (skb == NULL)
2665 return -ENOMEM;
2666
2667 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2668 if (nlh == NULL)
2669 goto nlmsg_failure;
2670 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2671 goto nlmsg_failure;
2672
2673 nlmsg_end(skb, nlh);
2674
2675 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2676
2677 nlmsg_failure:
2678 kfree_skb(skb);
2679 return -1;
2680 }
2681
2682 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2683 {
2684
2685 switch (c->event) {
2686 case XFRM_MSG_NEWPOLICY:
2687 case XFRM_MSG_UPDPOLICY:
2688 case XFRM_MSG_DELPOLICY:
2689 return xfrm_notify_policy(xp, dir, c);
2690 case XFRM_MSG_FLUSHPOLICY:
2691 return xfrm_notify_policy_flush(c);
2692 case XFRM_MSG_POLEXPIRE:
2693 return xfrm_exp_policy_notify(xp, dir, c);
2694 default:
2695 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
2696 c->event);
2697 }
2698
2699 return 0;
2700
2701 }
2702
2703 static inline size_t xfrm_report_msgsize(void)
2704 {
2705 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2706 }
2707
2708 static int build_report(struct sk_buff *skb, u8 proto,
2709 struct xfrm_selector *sel, xfrm_address_t *addr)
2710 {
2711 struct xfrm_user_report *ur;
2712 struct nlmsghdr *nlh;
2713
2714 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2715 if (nlh == NULL)
2716 return -EMSGSIZE;
2717
2718 ur = nlmsg_data(nlh);
2719 ur->proto = proto;
2720 memcpy(&ur->sel, sel, sizeof(ur->sel));
2721
2722 if (addr)
2723 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2724
2725 return nlmsg_end(skb, nlh);
2726
2727 nla_put_failure:
2728 nlmsg_cancel(skb, nlh);
2729 return -EMSGSIZE;
2730 }
2731
2732 static int xfrm_send_report(struct net *net, u8 proto,
2733 struct xfrm_selector *sel, xfrm_address_t *addr)
2734 {
2735 struct sk_buff *skb;
2736
2737 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2738 if (skb == NULL)
2739 return -ENOMEM;
2740
2741 if (build_report(skb, proto, sel, addr) < 0)
2742 BUG();
2743
2744 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2745 }
2746
2747 static inline size_t xfrm_mapping_msgsize(void)
2748 {
2749 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2750 }
2751
2752 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2753 xfrm_address_t *new_saddr, __be16 new_sport)
2754 {
2755 struct xfrm_user_mapping *um;
2756 struct nlmsghdr *nlh;
2757
2758 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2759 if (nlh == NULL)
2760 return -EMSGSIZE;
2761
2762 um = nlmsg_data(nlh);
2763
2764 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2765 um->id.spi = x->id.spi;
2766 um->id.family = x->props.family;
2767 um->id.proto = x->id.proto;
2768 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2769 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2770 um->new_sport = new_sport;
2771 um->old_sport = x->encap->encap_sport;
2772 um->reqid = x->props.reqid;
2773
2774 return nlmsg_end(skb, nlh);
2775 }
2776
2777 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2778 __be16 sport)
2779 {
2780 struct net *net = xs_net(x);
2781 struct sk_buff *skb;
2782
2783 if (x->id.proto != IPPROTO_ESP)
2784 return -EINVAL;
2785
2786 if (!x->encap)
2787 return -EINVAL;
2788
2789 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2790 if (skb == NULL)
2791 return -ENOMEM;
2792
2793 if (build_mapping(skb, x, ipaddr, sport) < 0)
2794 BUG();
2795
2796 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2797 }
2798
2799 static struct xfrm_mgr netlink_mgr = {
2800 .id = "netlink",
2801 .notify = xfrm_send_state_notify,
2802 .acquire = xfrm_send_acquire,
2803 .compile_policy = xfrm_compile_policy,
2804 .notify_policy = xfrm_send_policy_notify,
2805 .report = xfrm_send_report,
2806 .migrate = xfrm_send_migrate,
2807 .new_mapping = xfrm_send_mapping,
2808 };
2809
2810 static int __net_init xfrm_user_net_init(struct net *net)
2811 {
2812 struct sock *nlsk;
2813
2814 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
2815 xfrm_netlink_rcv, NULL, THIS_MODULE);
2816 if (nlsk == NULL)
2817 return -ENOMEM;
2818 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2819 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
2820 return 0;
2821 }
2822
2823 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2824 {
2825 struct net *net;
2826 list_for_each_entry(net, net_exit_list, exit_list)
2827 rcu_assign_pointer(net->xfrm.nlsk, NULL);
2828 synchronize_net();
2829 list_for_each_entry(net, net_exit_list, exit_list)
2830 netlink_kernel_release(net->xfrm.nlsk_stash);
2831 }
2832
2833 static struct pernet_operations xfrm_user_net_ops = {
2834 .init = xfrm_user_net_init,
2835 .exit_batch = xfrm_user_net_exit,
2836 };
2837
2838 static int __init xfrm_user_init(void)
2839 {
2840 int rv;
2841
2842 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2843
2844 rv = register_pernet_subsys(&xfrm_user_net_ops);
2845 if (rv < 0)
2846 return rv;
2847 rv = xfrm_register_km(&netlink_mgr);
2848 if (rv < 0)
2849 unregister_pernet_subsys(&xfrm_user_net_ops);
2850 return rv;
2851 }
2852
2853 static void __exit xfrm_user_exit(void)
2854 {
2855 xfrm_unregister_km(&netlink_mgr);
2856 unregister_pernet_subsys(&xfrm_user_net_ops);
2857 }
2858
2859 module_init(xfrm_user_init);
2860 module_exit(xfrm_user_exit);
2861 MODULE_LICENSE("GPL");
2862 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
2863