]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/xfrm/xfrm_user.c
[XFRM]: Export SAD info.
[mirror_ubuntu-artful-kernel.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
27 #include <net/sock.h>
28 #include <net/xfrm.h>
29 #include <net/netlink.h>
30 #include <asm/uaccess.h>
31 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
32 #include <linux/in6.h>
33 #endif
34 #include <linux/audit.h>
35
36 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
37 {
38 struct rtattr *rt = xfrma[type - 1];
39 struct xfrm_algo *algp;
40 int len;
41
42 if (!rt)
43 return 0;
44
45 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
46 if (len < 0)
47 return -EINVAL;
48
49 algp = RTA_DATA(rt);
50
51 len -= (algp->alg_key_len + 7U) / 8;
52 if (len < 0)
53 return -EINVAL;
54
55 switch (type) {
56 case XFRMA_ALG_AUTH:
57 if (!algp->alg_key_len &&
58 strcmp(algp->alg_name, "digest_null") != 0)
59 return -EINVAL;
60 break;
61
62 case XFRMA_ALG_CRYPT:
63 if (!algp->alg_key_len &&
64 strcmp(algp->alg_name, "cipher_null") != 0)
65 return -EINVAL;
66 break;
67
68 case XFRMA_ALG_COMP:
69 /* Zero length keys are legal. */
70 break;
71
72 default:
73 return -EINVAL;
74 }
75
76 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
77 return 0;
78 }
79
80 static int verify_encap_tmpl(struct rtattr **xfrma)
81 {
82 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1];
83 struct xfrm_encap_tmpl *encap;
84
85 if (!rt)
86 return 0;
87
88 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap))
89 return -EINVAL;
90
91 return 0;
92 }
93
94 static int verify_one_addr(struct rtattr **xfrma, enum xfrm_attr_type_t type,
95 xfrm_address_t **addrp)
96 {
97 struct rtattr *rt = xfrma[type - 1];
98
99 if (!rt)
100 return 0;
101
102 if ((rt->rta_len - sizeof(*rt)) < sizeof(**addrp))
103 return -EINVAL;
104
105 if (addrp)
106 *addrp = RTA_DATA(rt);
107
108 return 0;
109 }
110
111 static inline int verify_sec_ctx_len(struct rtattr **xfrma)
112 {
113 struct rtattr *rt = xfrma[XFRMA_SEC_CTX - 1];
114 struct xfrm_user_sec_ctx *uctx;
115 int len = 0;
116
117 if (!rt)
118 return 0;
119
120 if (rt->rta_len < sizeof(*uctx))
121 return -EINVAL;
122
123 uctx = RTA_DATA(rt);
124
125 len += sizeof(struct xfrm_user_sec_ctx);
126 len += uctx->ctx_len;
127
128 if (uctx->len != len)
129 return -EINVAL;
130
131 return 0;
132 }
133
134
135 static int verify_newsa_info(struct xfrm_usersa_info *p,
136 struct rtattr **xfrma)
137 {
138 int err;
139
140 err = -EINVAL;
141 switch (p->family) {
142 case AF_INET:
143 break;
144
145 case AF_INET6:
146 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
147 break;
148 #else
149 err = -EAFNOSUPPORT;
150 goto out;
151 #endif
152
153 default:
154 goto out;
155 }
156
157 err = -EINVAL;
158 switch (p->id.proto) {
159 case IPPROTO_AH:
160 if (!xfrma[XFRMA_ALG_AUTH-1] ||
161 xfrma[XFRMA_ALG_CRYPT-1] ||
162 xfrma[XFRMA_ALG_COMP-1])
163 goto out;
164 break;
165
166 case IPPROTO_ESP:
167 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
168 !xfrma[XFRMA_ALG_CRYPT-1]) ||
169 xfrma[XFRMA_ALG_COMP-1])
170 goto out;
171 break;
172
173 case IPPROTO_COMP:
174 if (!xfrma[XFRMA_ALG_COMP-1] ||
175 xfrma[XFRMA_ALG_AUTH-1] ||
176 xfrma[XFRMA_ALG_CRYPT-1])
177 goto out;
178 break;
179
180 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
181 case IPPROTO_DSTOPTS:
182 case IPPROTO_ROUTING:
183 if (xfrma[XFRMA_ALG_COMP-1] ||
184 xfrma[XFRMA_ALG_AUTH-1] ||
185 xfrma[XFRMA_ALG_CRYPT-1] ||
186 xfrma[XFRMA_ENCAP-1] ||
187 xfrma[XFRMA_SEC_CTX-1] ||
188 !xfrma[XFRMA_COADDR-1])
189 goto out;
190 break;
191 #endif
192
193 default:
194 goto out;
195 }
196
197 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
198 goto out;
199 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
200 goto out;
201 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
202 goto out;
203 if ((err = verify_encap_tmpl(xfrma)))
204 goto out;
205 if ((err = verify_sec_ctx_len(xfrma)))
206 goto out;
207 if ((err = verify_one_addr(xfrma, XFRMA_COADDR, NULL)))
208 goto out;
209
210 err = -EINVAL;
211 switch (p->mode) {
212 case XFRM_MODE_TRANSPORT:
213 case XFRM_MODE_TUNNEL:
214 case XFRM_MODE_ROUTEOPTIMIZATION:
215 case XFRM_MODE_BEET:
216 break;
217
218 default:
219 goto out;
220 }
221
222 err = 0;
223
224 out:
225 return err;
226 }
227
228 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
229 struct xfrm_algo_desc *(*get_byname)(char *, int),
230 struct rtattr *u_arg)
231 {
232 struct rtattr *rta = u_arg;
233 struct xfrm_algo *p, *ualg;
234 struct xfrm_algo_desc *algo;
235 int len;
236
237 if (!rta)
238 return 0;
239
240 ualg = RTA_DATA(rta);
241
242 algo = get_byname(ualg->alg_name, 1);
243 if (!algo)
244 return -ENOSYS;
245 *props = algo->desc.sadb_alg_id;
246
247 len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
248 p = kmemdup(ualg, len, GFP_KERNEL);
249 if (!p)
250 return -ENOMEM;
251
252 strcpy(p->alg_name, algo->name);
253 *algpp = p;
254 return 0;
255 }
256
257 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
258 {
259 struct rtattr *rta = u_arg;
260 struct xfrm_encap_tmpl *p, *uencap;
261
262 if (!rta)
263 return 0;
264
265 uencap = RTA_DATA(rta);
266 p = kmemdup(uencap, sizeof(*p), GFP_KERNEL);
267 if (!p)
268 return -ENOMEM;
269
270 *encapp = p;
271 return 0;
272 }
273
274
275 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
276 {
277 int len = 0;
278
279 if (xfrm_ctx) {
280 len += sizeof(struct xfrm_user_sec_ctx);
281 len += xfrm_ctx->ctx_len;
282 }
283 return len;
284 }
285
286 static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg)
287 {
288 struct xfrm_user_sec_ctx *uctx;
289
290 if (!u_arg)
291 return 0;
292
293 uctx = RTA_DATA(u_arg);
294 return security_xfrm_state_alloc(x, uctx);
295 }
296
297 static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg)
298 {
299 struct rtattr *rta = u_arg;
300 xfrm_address_t *p, *uaddrp;
301
302 if (!rta)
303 return 0;
304
305 uaddrp = RTA_DATA(rta);
306 p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL);
307 if (!p)
308 return -ENOMEM;
309
310 *addrpp = p;
311 return 0;
312 }
313
314 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
315 {
316 memcpy(&x->id, &p->id, sizeof(x->id));
317 memcpy(&x->sel, &p->sel, sizeof(x->sel));
318 memcpy(&x->lft, &p->lft, sizeof(x->lft));
319 x->props.mode = p->mode;
320 x->props.replay_window = p->replay_window;
321 x->props.reqid = p->reqid;
322 x->props.family = p->family;
323 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
324 x->props.flags = p->flags;
325 }
326
327 /*
328 * someday when pfkey also has support, we could have the code
329 * somehow made shareable and move it to xfrm_state.c - JHS
330 *
331 */
332 static int xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **xfrma)
333 {
334 int err = - EINVAL;
335 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
336 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
337 struct rtattr *et = xfrma[XFRMA_ETIMER_THRESH-1];
338 struct rtattr *rt = xfrma[XFRMA_REPLAY_THRESH-1];
339
340 if (rp) {
341 struct xfrm_replay_state *replay;
342 if (RTA_PAYLOAD(rp) < sizeof(*replay))
343 goto error;
344 replay = RTA_DATA(rp);
345 memcpy(&x->replay, replay, sizeof(*replay));
346 memcpy(&x->preplay, replay, sizeof(*replay));
347 }
348
349 if (lt) {
350 struct xfrm_lifetime_cur *ltime;
351 if (RTA_PAYLOAD(lt) < sizeof(*ltime))
352 goto error;
353 ltime = RTA_DATA(lt);
354 x->curlft.bytes = ltime->bytes;
355 x->curlft.packets = ltime->packets;
356 x->curlft.add_time = ltime->add_time;
357 x->curlft.use_time = ltime->use_time;
358 }
359
360 if (et) {
361 if (RTA_PAYLOAD(et) < sizeof(u32))
362 goto error;
363 x->replay_maxage = *(u32*)RTA_DATA(et);
364 }
365
366 if (rt) {
367 if (RTA_PAYLOAD(rt) < sizeof(u32))
368 goto error;
369 x->replay_maxdiff = *(u32*)RTA_DATA(rt);
370 }
371
372 return 0;
373 error:
374 return err;
375 }
376
377 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
378 struct rtattr **xfrma,
379 int *errp)
380 {
381 struct xfrm_state *x = xfrm_state_alloc();
382 int err = -ENOMEM;
383
384 if (!x)
385 goto error_no_put;
386
387 copy_from_user_state(x, p);
388
389 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
390 xfrm_aalg_get_byname,
391 xfrma[XFRMA_ALG_AUTH-1])))
392 goto error;
393 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
394 xfrm_ealg_get_byname,
395 xfrma[XFRMA_ALG_CRYPT-1])))
396 goto error;
397 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
398 xfrm_calg_get_byname,
399 xfrma[XFRMA_ALG_COMP-1])))
400 goto error;
401 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
402 goto error;
403 if ((err = attach_one_addr(&x->coaddr, xfrma[XFRMA_COADDR-1])))
404 goto error;
405 err = xfrm_init_state(x);
406 if (err)
407 goto error;
408
409 if ((err = attach_sec_ctx(x, xfrma[XFRMA_SEC_CTX-1])))
410 goto error;
411
412 x->km.seq = p->seq;
413 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
414 /* sysctl_xfrm_aevent_etime is in 100ms units */
415 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
416 x->preplay.bitmap = 0;
417 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
418 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
419
420 /* override default values from above */
421
422 err = xfrm_update_ae_params(x, (struct rtattr **)xfrma);
423 if (err < 0)
424 goto error;
425
426 return x;
427
428 error:
429 x->km.state = XFRM_STATE_DEAD;
430 xfrm_state_put(x);
431 error_no_put:
432 *errp = err;
433 return NULL;
434 }
435
436 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
437 struct rtattr **xfrma)
438 {
439 struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
440 struct xfrm_state *x;
441 int err;
442 struct km_event c;
443
444 err = verify_newsa_info(p, xfrma);
445 if (err)
446 return err;
447
448 x = xfrm_state_construct(p, xfrma, &err);
449 if (!x)
450 return err;
451
452 xfrm_state_hold(x);
453 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
454 err = xfrm_state_add(x);
455 else
456 err = xfrm_state_update(x);
457
458 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
459 AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
460
461 if (err < 0) {
462 x->km.state = XFRM_STATE_DEAD;
463 __xfrm_state_put(x);
464 goto out;
465 }
466
467 c.seq = nlh->nlmsg_seq;
468 c.pid = nlh->nlmsg_pid;
469 c.event = nlh->nlmsg_type;
470
471 km_state_notify(x, &c);
472 out:
473 xfrm_state_put(x);
474 return err;
475 }
476
477 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
478 struct rtattr **xfrma,
479 int *errp)
480 {
481 struct xfrm_state *x = NULL;
482 int err;
483
484 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
485 err = -ESRCH;
486 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
487 } else {
488 xfrm_address_t *saddr = NULL;
489
490 err = verify_one_addr(xfrma, XFRMA_SRCADDR, &saddr);
491 if (err)
492 goto out;
493
494 if (!saddr) {
495 err = -EINVAL;
496 goto out;
497 }
498
499 err = -ESRCH;
500 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto,
501 p->family);
502 }
503
504 out:
505 if (!x && errp)
506 *errp = err;
507 return x;
508 }
509
510 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
511 struct rtattr **xfrma)
512 {
513 struct xfrm_state *x;
514 int err = -ESRCH;
515 struct km_event c;
516 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
517
518 x = xfrm_user_state_lookup(p, xfrma, &err);
519 if (x == NULL)
520 return err;
521
522 if ((err = security_xfrm_state_delete(x)) != 0)
523 goto out;
524
525 if (xfrm_state_kern(x)) {
526 err = -EPERM;
527 goto out;
528 }
529
530 err = xfrm_state_delete(x);
531
532 if (err < 0)
533 goto out;
534
535 c.seq = nlh->nlmsg_seq;
536 c.pid = nlh->nlmsg_pid;
537 c.event = nlh->nlmsg_type;
538 km_state_notify(x, &c);
539
540 out:
541 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
542 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
543 xfrm_state_put(x);
544 return err;
545 }
546
547 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
548 {
549 memcpy(&p->id, &x->id, sizeof(p->id));
550 memcpy(&p->sel, &x->sel, sizeof(p->sel));
551 memcpy(&p->lft, &x->lft, sizeof(p->lft));
552 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
553 memcpy(&p->stats, &x->stats, sizeof(p->stats));
554 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
555 p->mode = x->props.mode;
556 p->replay_window = x->props.replay_window;
557 p->reqid = x->props.reqid;
558 p->family = x->props.family;
559 p->flags = x->props.flags;
560 p->seq = x->km.seq;
561 }
562
563 struct xfrm_dump_info {
564 struct sk_buff *in_skb;
565 struct sk_buff *out_skb;
566 u32 nlmsg_seq;
567 u16 nlmsg_flags;
568 int start_idx;
569 int this_idx;
570 };
571
572 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
573 {
574 struct xfrm_dump_info *sp = ptr;
575 struct sk_buff *in_skb = sp->in_skb;
576 struct sk_buff *skb = sp->out_skb;
577 struct xfrm_usersa_info *p;
578 struct nlmsghdr *nlh;
579 unsigned char *b = skb_tail_pointer(skb);
580
581 if (sp->this_idx < sp->start_idx)
582 goto out;
583
584 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
585 sp->nlmsg_seq,
586 XFRM_MSG_NEWSA, sizeof(*p));
587 nlh->nlmsg_flags = sp->nlmsg_flags;
588
589 p = NLMSG_DATA(nlh);
590 copy_to_user_state(x, p);
591
592 if (x->aalg)
593 RTA_PUT(skb, XFRMA_ALG_AUTH,
594 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
595 if (x->ealg)
596 RTA_PUT(skb, XFRMA_ALG_CRYPT,
597 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
598 if (x->calg)
599 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
600
601 if (x->encap)
602 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
603
604 if (x->security) {
605 int ctx_size = sizeof(struct xfrm_sec_ctx) +
606 x->security->ctx_len;
607 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size);
608 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
609
610 uctx->exttype = XFRMA_SEC_CTX;
611 uctx->len = ctx_size;
612 uctx->ctx_doi = x->security->ctx_doi;
613 uctx->ctx_alg = x->security->ctx_alg;
614 uctx->ctx_len = x->security->ctx_len;
615 memcpy(uctx + 1, x->security->ctx_str, x->security->ctx_len);
616 }
617
618 if (x->coaddr)
619 RTA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
620
621 if (x->lastused)
622 RTA_PUT(skb, XFRMA_LASTUSED, sizeof(x->lastused), &x->lastused);
623
624 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
625 out:
626 sp->this_idx++;
627 return 0;
628
629 nlmsg_failure:
630 rtattr_failure:
631 nlmsg_trim(skb, b);
632 return -1;
633 }
634
635 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
636 {
637 struct xfrm_dump_info info;
638
639 info.in_skb = cb->skb;
640 info.out_skb = skb;
641 info.nlmsg_seq = cb->nlh->nlmsg_seq;
642 info.nlmsg_flags = NLM_F_MULTI;
643 info.this_idx = 0;
644 info.start_idx = cb->args[0];
645 (void) xfrm_state_walk(0, dump_one_state, &info);
646 cb->args[0] = info.this_idx;
647
648 return skb->len;
649 }
650
651 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
652 struct xfrm_state *x, u32 seq)
653 {
654 struct xfrm_dump_info info;
655 struct sk_buff *skb;
656
657 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
658 if (!skb)
659 return ERR_PTR(-ENOMEM);
660
661 info.in_skb = in_skb;
662 info.out_skb = skb;
663 info.nlmsg_seq = seq;
664 info.nlmsg_flags = 0;
665 info.this_idx = info.start_idx = 0;
666
667 if (dump_one_state(x, 0, &info)) {
668 kfree_skb(skb);
669 return NULL;
670 }
671
672 return skb;
673 }
674
675 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
676 {
677 struct xfrm_sadinfo si;
678 struct nlmsghdr *nlh;
679 u32 *f;
680
681 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
682 if (nlh == NULL) /* shouldnt really happen ... */
683 return -EMSGSIZE;
684
685 f = nlmsg_data(nlh);
686 *f = flags;
687 xfrm_sad_getinfo(&si);
688
689 if (flags & XFRM_SAD_HMASK)
690 NLA_PUT_U32(skb, XFRMA_SADHMASK, si.sadhcnt);
691 if (flags & XFRM_SAD_HMAX)
692 NLA_PUT_U32(skb, XFRMA_SADHMAX, si.sadhmcnt);
693 if (flags & XFRM_SAD_CNT)
694 NLA_PUT_U32(skb, XFRMA_SADCNT, si.sadcnt);
695
696 return nlmsg_end(skb, nlh);
697
698 nla_put_failure:
699 nlmsg_cancel(skb, nlh);
700 return -EMSGSIZE;
701 }
702
703 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
704 struct rtattr **xfrma)
705 {
706 struct sk_buff *r_skb;
707 u32 *flags = NLMSG_DATA(nlh);
708 u32 spid = NETLINK_CB(skb).pid;
709 u32 seq = nlh->nlmsg_seq;
710 int len = NLMSG_LENGTH(sizeof(u32));
711
712 if (*flags & XFRM_SAD_HMASK)
713 len += RTA_SPACE(sizeof(u32));
714 if (*flags & XFRM_SAD_HMAX)
715 len += RTA_SPACE(sizeof(u32));
716 if (*flags & XFRM_SAD_CNT)
717 len += RTA_SPACE(sizeof(u32));
718
719 r_skb = alloc_skb(len, GFP_ATOMIC);
720
721 if (r_skb == NULL)
722 return -ENOMEM;
723
724 if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
725 BUG();
726
727 return nlmsg_unicast(xfrm_nl, r_skb, spid);
728 }
729
730 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
731 struct rtattr **xfrma)
732 {
733 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
734 struct xfrm_state *x;
735 struct sk_buff *resp_skb;
736 int err = -ESRCH;
737
738 x = xfrm_user_state_lookup(p, xfrma, &err);
739 if (x == NULL)
740 goto out_noput;
741
742 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
743 if (IS_ERR(resp_skb)) {
744 err = PTR_ERR(resp_skb);
745 } else {
746 err = netlink_unicast(xfrm_nl, resp_skb,
747 NETLINK_CB(skb).pid, MSG_DONTWAIT);
748 }
749 xfrm_state_put(x);
750 out_noput:
751 return err;
752 }
753
754 static int verify_userspi_info(struct xfrm_userspi_info *p)
755 {
756 switch (p->info.id.proto) {
757 case IPPROTO_AH:
758 case IPPROTO_ESP:
759 break;
760
761 case IPPROTO_COMP:
762 /* IPCOMP spi is 16-bits. */
763 if (p->max >= 0x10000)
764 return -EINVAL;
765 break;
766
767 default:
768 return -EINVAL;
769 }
770
771 if (p->min > p->max)
772 return -EINVAL;
773
774 return 0;
775 }
776
777 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
778 struct rtattr **xfrma)
779 {
780 struct xfrm_state *x;
781 struct xfrm_userspi_info *p;
782 struct sk_buff *resp_skb;
783 xfrm_address_t *daddr;
784 int family;
785 int err;
786
787 p = NLMSG_DATA(nlh);
788 err = verify_userspi_info(p);
789 if (err)
790 goto out_noput;
791
792 family = p->info.family;
793 daddr = &p->info.id.daddr;
794
795 x = NULL;
796 if (p->info.seq) {
797 x = xfrm_find_acq_byseq(p->info.seq);
798 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
799 xfrm_state_put(x);
800 x = NULL;
801 }
802 }
803
804 if (!x)
805 x = xfrm_find_acq(p->info.mode, p->info.reqid,
806 p->info.id.proto, daddr,
807 &p->info.saddr, 1,
808 family);
809 err = -ENOENT;
810 if (x == NULL)
811 goto out_noput;
812
813 resp_skb = ERR_PTR(-ENOENT);
814
815 spin_lock_bh(&x->lock);
816 if (x->km.state != XFRM_STATE_DEAD) {
817 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
818 if (x->id.spi)
819 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
820 }
821 spin_unlock_bh(&x->lock);
822
823 if (IS_ERR(resp_skb)) {
824 err = PTR_ERR(resp_skb);
825 goto out;
826 }
827
828 err = netlink_unicast(xfrm_nl, resp_skb,
829 NETLINK_CB(skb).pid, MSG_DONTWAIT);
830
831 out:
832 xfrm_state_put(x);
833 out_noput:
834 return err;
835 }
836
837 static int verify_policy_dir(u8 dir)
838 {
839 switch (dir) {
840 case XFRM_POLICY_IN:
841 case XFRM_POLICY_OUT:
842 case XFRM_POLICY_FWD:
843 break;
844
845 default:
846 return -EINVAL;
847 }
848
849 return 0;
850 }
851
852 static int verify_policy_type(u8 type)
853 {
854 switch (type) {
855 case XFRM_POLICY_TYPE_MAIN:
856 #ifdef CONFIG_XFRM_SUB_POLICY
857 case XFRM_POLICY_TYPE_SUB:
858 #endif
859 break;
860
861 default:
862 return -EINVAL;
863 }
864
865 return 0;
866 }
867
868 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
869 {
870 switch (p->share) {
871 case XFRM_SHARE_ANY:
872 case XFRM_SHARE_SESSION:
873 case XFRM_SHARE_USER:
874 case XFRM_SHARE_UNIQUE:
875 break;
876
877 default:
878 return -EINVAL;
879 }
880
881 switch (p->action) {
882 case XFRM_POLICY_ALLOW:
883 case XFRM_POLICY_BLOCK:
884 break;
885
886 default:
887 return -EINVAL;
888 }
889
890 switch (p->sel.family) {
891 case AF_INET:
892 break;
893
894 case AF_INET6:
895 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
896 break;
897 #else
898 return -EAFNOSUPPORT;
899 #endif
900
901 default:
902 return -EINVAL;
903 }
904
905 return verify_policy_dir(p->dir);
906 }
907
908 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **xfrma)
909 {
910 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
911 struct xfrm_user_sec_ctx *uctx;
912
913 if (!rt)
914 return 0;
915
916 uctx = RTA_DATA(rt);
917 return security_xfrm_policy_alloc(pol, uctx);
918 }
919
920 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
921 int nr)
922 {
923 int i;
924
925 xp->xfrm_nr = nr;
926 for (i = 0; i < nr; i++, ut++) {
927 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
928
929 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
930 memcpy(&t->saddr, &ut->saddr,
931 sizeof(xfrm_address_t));
932 t->reqid = ut->reqid;
933 t->mode = ut->mode;
934 t->share = ut->share;
935 t->optional = ut->optional;
936 t->aalgos = ut->aalgos;
937 t->ealgos = ut->ealgos;
938 t->calgos = ut->calgos;
939 t->encap_family = ut->family;
940 }
941 }
942
943 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
944 {
945 int i;
946
947 if (nr > XFRM_MAX_DEPTH)
948 return -EINVAL;
949
950 for (i = 0; i < nr; i++) {
951 /* We never validated the ut->family value, so many
952 * applications simply leave it at zero. The check was
953 * never made and ut->family was ignored because all
954 * templates could be assumed to have the same family as
955 * the policy itself. Now that we will have ipv4-in-ipv6
956 * and ipv6-in-ipv4 tunnels, this is no longer true.
957 */
958 if (!ut[i].family)
959 ut[i].family = family;
960
961 switch (ut[i].family) {
962 case AF_INET:
963 break;
964 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
965 case AF_INET6:
966 break;
967 #endif
968 default:
969 return -EINVAL;
970 }
971 }
972
973 return 0;
974 }
975
976 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
977 {
978 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
979
980 if (!rt) {
981 pol->xfrm_nr = 0;
982 } else {
983 struct xfrm_user_tmpl *utmpl = RTA_DATA(rt);
984 int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
985 int err;
986
987 err = validate_tmpl(nr, utmpl, pol->family);
988 if (err)
989 return err;
990
991 copy_templates(pol, RTA_DATA(rt), nr);
992 }
993 return 0;
994 }
995
996 static int copy_from_user_policy_type(u8 *tp, struct rtattr **xfrma)
997 {
998 struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1];
999 struct xfrm_userpolicy_type *upt;
1000 u8 type = XFRM_POLICY_TYPE_MAIN;
1001 int err;
1002
1003 if (rt) {
1004 if (rt->rta_len < sizeof(*upt))
1005 return -EINVAL;
1006
1007 upt = RTA_DATA(rt);
1008 type = upt->type;
1009 }
1010
1011 err = verify_policy_type(type);
1012 if (err)
1013 return err;
1014
1015 *tp = type;
1016 return 0;
1017 }
1018
1019 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1020 {
1021 xp->priority = p->priority;
1022 xp->index = p->index;
1023 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1024 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1025 xp->action = p->action;
1026 xp->flags = p->flags;
1027 xp->family = p->sel.family;
1028 /* XXX xp->share = p->share; */
1029 }
1030
1031 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1032 {
1033 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1034 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1035 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1036 p->priority = xp->priority;
1037 p->index = xp->index;
1038 p->sel.family = xp->family;
1039 p->dir = dir;
1040 p->action = xp->action;
1041 p->flags = xp->flags;
1042 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1043 }
1044
1045 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
1046 {
1047 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
1048 int err;
1049
1050 if (!xp) {
1051 *errp = -ENOMEM;
1052 return NULL;
1053 }
1054
1055 copy_from_user_policy(xp, p);
1056
1057 err = copy_from_user_policy_type(&xp->type, xfrma);
1058 if (err)
1059 goto error;
1060
1061 if (!(err = copy_from_user_tmpl(xp, xfrma)))
1062 err = copy_from_user_sec_ctx(xp, xfrma);
1063 if (err)
1064 goto error;
1065
1066 return xp;
1067 error:
1068 *errp = err;
1069 kfree(xp);
1070 return NULL;
1071 }
1072
1073 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1074 struct rtattr **xfrma)
1075 {
1076 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
1077 struct xfrm_policy *xp;
1078 struct km_event c;
1079 int err;
1080 int excl;
1081
1082 err = verify_newpolicy_info(p);
1083 if (err)
1084 return err;
1085 err = verify_sec_ctx_len(xfrma);
1086 if (err)
1087 return err;
1088
1089 xp = xfrm_policy_construct(p, xfrma, &err);
1090 if (!xp)
1091 return err;
1092
1093 /* shouldnt excl be based on nlh flags??
1094 * Aha! this is anti-netlink really i.e more pfkey derived
1095 * in netlink excl is a flag and you wouldnt need
1096 * a type XFRM_MSG_UPDPOLICY - JHS */
1097 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1098 err = xfrm_policy_insert(p->dir, xp, excl);
1099 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1100 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
1101
1102 if (err) {
1103 security_xfrm_policy_free(xp);
1104 kfree(xp);
1105 return err;
1106 }
1107
1108 c.event = nlh->nlmsg_type;
1109 c.seq = nlh->nlmsg_seq;
1110 c.pid = nlh->nlmsg_pid;
1111 km_policy_notify(xp, p->dir, &c);
1112
1113 xfrm_pol_put(xp);
1114
1115 return 0;
1116 }
1117
1118 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1119 {
1120 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1121 int i;
1122
1123 if (xp->xfrm_nr == 0)
1124 return 0;
1125
1126 for (i = 0; i < xp->xfrm_nr; i++) {
1127 struct xfrm_user_tmpl *up = &vec[i];
1128 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1129
1130 memcpy(&up->id, &kp->id, sizeof(up->id));
1131 up->family = kp->encap_family;
1132 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1133 up->reqid = kp->reqid;
1134 up->mode = kp->mode;
1135 up->share = kp->share;
1136 up->optional = kp->optional;
1137 up->aalgos = kp->aalgos;
1138 up->ealgos = kp->ealgos;
1139 up->calgos = kp->calgos;
1140 }
1141 RTA_PUT(skb, XFRMA_TMPL,
1142 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr),
1143 vec);
1144
1145 return 0;
1146
1147 rtattr_failure:
1148 return -1;
1149 }
1150
1151 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
1152 {
1153 int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len;
1154 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size);
1155 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1156
1157 uctx->exttype = XFRMA_SEC_CTX;
1158 uctx->len = ctx_size;
1159 uctx->ctx_doi = s->ctx_doi;
1160 uctx->ctx_alg = s->ctx_alg;
1161 uctx->ctx_len = s->ctx_len;
1162 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
1163 return 0;
1164
1165 rtattr_failure:
1166 return -1;
1167 }
1168
1169 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1170 {
1171 if (x->security) {
1172 return copy_sec_ctx(x->security, skb);
1173 }
1174 return 0;
1175 }
1176
1177 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1178 {
1179 if (xp->security) {
1180 return copy_sec_ctx(xp->security, skb);
1181 }
1182 return 0;
1183 }
1184
1185 #ifdef CONFIG_XFRM_SUB_POLICY
1186 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1187 {
1188 struct xfrm_userpolicy_type upt;
1189
1190 memset(&upt, 0, sizeof(upt));
1191 upt.type = type;
1192
1193 RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1194
1195 return 0;
1196
1197 rtattr_failure:
1198 return -1;
1199 }
1200
1201 #else
1202 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1203 {
1204 return 0;
1205 }
1206 #endif
1207
1208 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1209 {
1210 struct xfrm_dump_info *sp = ptr;
1211 struct xfrm_userpolicy_info *p;
1212 struct sk_buff *in_skb = sp->in_skb;
1213 struct sk_buff *skb = sp->out_skb;
1214 struct nlmsghdr *nlh;
1215 unsigned char *b = skb_tail_pointer(skb);
1216
1217 if (sp->this_idx < sp->start_idx)
1218 goto out;
1219
1220 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
1221 sp->nlmsg_seq,
1222 XFRM_MSG_NEWPOLICY, sizeof(*p));
1223 p = NLMSG_DATA(nlh);
1224 nlh->nlmsg_flags = sp->nlmsg_flags;
1225
1226 copy_to_user_policy(xp, p, dir);
1227 if (copy_to_user_tmpl(xp, skb) < 0)
1228 goto nlmsg_failure;
1229 if (copy_to_user_sec_ctx(xp, skb))
1230 goto nlmsg_failure;
1231 if (copy_to_user_policy_type(xp->type, skb) < 0)
1232 goto nlmsg_failure;
1233
1234 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1235 out:
1236 sp->this_idx++;
1237 return 0;
1238
1239 nlmsg_failure:
1240 nlmsg_trim(skb, b);
1241 return -1;
1242 }
1243
1244 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1245 {
1246 struct xfrm_dump_info info;
1247
1248 info.in_skb = cb->skb;
1249 info.out_skb = skb;
1250 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1251 info.nlmsg_flags = NLM_F_MULTI;
1252 info.this_idx = 0;
1253 info.start_idx = cb->args[0];
1254 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info);
1255 #ifdef CONFIG_XFRM_SUB_POLICY
1256 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info);
1257 #endif
1258 cb->args[0] = info.this_idx;
1259
1260 return skb->len;
1261 }
1262
1263 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1264 struct xfrm_policy *xp,
1265 int dir, u32 seq)
1266 {
1267 struct xfrm_dump_info info;
1268 struct sk_buff *skb;
1269
1270 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1271 if (!skb)
1272 return ERR_PTR(-ENOMEM);
1273
1274 info.in_skb = in_skb;
1275 info.out_skb = skb;
1276 info.nlmsg_seq = seq;
1277 info.nlmsg_flags = 0;
1278 info.this_idx = info.start_idx = 0;
1279
1280 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1281 kfree_skb(skb);
1282 return NULL;
1283 }
1284
1285 return skb;
1286 }
1287
1288 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1289 struct rtattr **xfrma)
1290 {
1291 struct xfrm_policy *xp;
1292 struct xfrm_userpolicy_id *p;
1293 u8 type = XFRM_POLICY_TYPE_MAIN;
1294 int err;
1295 struct km_event c;
1296 int delete;
1297
1298 p = NLMSG_DATA(nlh);
1299 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1300
1301 err = copy_from_user_policy_type(&type, xfrma);
1302 if (err)
1303 return err;
1304
1305 err = verify_policy_dir(p->dir);
1306 if (err)
1307 return err;
1308
1309 if (p->index)
1310 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
1311 else {
1312 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
1313 struct xfrm_policy tmp;
1314
1315 err = verify_sec_ctx_len(xfrma);
1316 if (err)
1317 return err;
1318
1319 memset(&tmp, 0, sizeof(struct xfrm_policy));
1320 if (rt) {
1321 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1322
1323 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1324 return err;
1325 }
1326 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1327 delete, &err);
1328 security_xfrm_policy_free(&tmp);
1329 }
1330 if (xp == NULL)
1331 return -ENOENT;
1332
1333 if (!delete) {
1334 struct sk_buff *resp_skb;
1335
1336 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1337 if (IS_ERR(resp_skb)) {
1338 err = PTR_ERR(resp_skb);
1339 } else {
1340 err = netlink_unicast(xfrm_nl, resp_skb,
1341 NETLINK_CB(skb).pid,
1342 MSG_DONTWAIT);
1343 }
1344 } else {
1345 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1346 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
1347
1348 if (err != 0)
1349 goto out;
1350
1351 c.data.byid = p->index;
1352 c.event = nlh->nlmsg_type;
1353 c.seq = nlh->nlmsg_seq;
1354 c.pid = nlh->nlmsg_pid;
1355 km_policy_notify(xp, p->dir, &c);
1356 }
1357
1358 out:
1359 xfrm_pol_put(xp);
1360 return err;
1361 }
1362
1363 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1364 struct rtattr **xfrma)
1365 {
1366 struct km_event c;
1367 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
1368 struct xfrm_audit audit_info;
1369
1370 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1371 audit_info.secid = NETLINK_CB(skb).sid;
1372 xfrm_state_flush(p->proto, &audit_info);
1373 c.data.proto = p->proto;
1374 c.event = nlh->nlmsg_type;
1375 c.seq = nlh->nlmsg_seq;
1376 c.pid = nlh->nlmsg_pid;
1377 km_state_notify(NULL, &c);
1378
1379 return 0;
1380 }
1381
1382
1383 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1384 {
1385 struct xfrm_aevent_id *id;
1386 struct nlmsghdr *nlh;
1387 struct xfrm_lifetime_cur ltime;
1388 unsigned char *b = skb_tail_pointer(skb);
1389
1390 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id));
1391 id = NLMSG_DATA(nlh);
1392 nlh->nlmsg_flags = 0;
1393
1394 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1395 id->sa_id.spi = x->id.spi;
1396 id->sa_id.family = x->props.family;
1397 id->sa_id.proto = x->id.proto;
1398 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1399 id->reqid = x->props.reqid;
1400 id->flags = c->data.aevent;
1401
1402 RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1403
1404 ltime.bytes = x->curlft.bytes;
1405 ltime.packets = x->curlft.packets;
1406 ltime.add_time = x->curlft.add_time;
1407 ltime.use_time = x->curlft.use_time;
1408
1409 RTA_PUT(skb, XFRMA_LTIME_VAL, sizeof(struct xfrm_lifetime_cur), &ltime);
1410
1411 if (id->flags&XFRM_AE_RTHR) {
1412 RTA_PUT(skb,XFRMA_REPLAY_THRESH,sizeof(u32),&x->replay_maxdiff);
1413 }
1414
1415 if (id->flags&XFRM_AE_ETHR) {
1416 u32 etimer = x->replay_maxage*10/HZ;
1417 RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer);
1418 }
1419
1420 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1421 return skb->len;
1422
1423 rtattr_failure:
1424 nlmsg_failure:
1425 nlmsg_trim(skb, b);
1426 return -1;
1427 }
1428
1429 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1430 struct rtattr **xfrma)
1431 {
1432 struct xfrm_state *x;
1433 struct sk_buff *r_skb;
1434 int err;
1435 struct km_event c;
1436 struct xfrm_aevent_id *p = NLMSG_DATA(nlh);
1437 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1438 struct xfrm_usersa_id *id = &p->sa_id;
1439
1440 len += RTA_SPACE(sizeof(struct xfrm_replay_state));
1441 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur));
1442
1443 if (p->flags&XFRM_AE_RTHR)
1444 len+=RTA_SPACE(sizeof(u32));
1445
1446 if (p->flags&XFRM_AE_ETHR)
1447 len+=RTA_SPACE(sizeof(u32));
1448
1449 r_skb = alloc_skb(len, GFP_ATOMIC);
1450 if (r_skb == NULL)
1451 return -ENOMEM;
1452
1453 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
1454 if (x == NULL) {
1455 kfree_skb(r_skb);
1456 return -ESRCH;
1457 }
1458
1459 /*
1460 * XXX: is this lock really needed - none of the other
1461 * gets lock (the concern is things getting updated
1462 * while we are still reading) - jhs
1463 */
1464 spin_lock_bh(&x->lock);
1465 c.data.aevent = p->flags;
1466 c.seq = nlh->nlmsg_seq;
1467 c.pid = nlh->nlmsg_pid;
1468
1469 if (build_aevent(r_skb, x, &c) < 0)
1470 BUG();
1471 err = netlink_unicast(xfrm_nl, r_skb,
1472 NETLINK_CB(skb).pid, MSG_DONTWAIT);
1473 spin_unlock_bh(&x->lock);
1474 xfrm_state_put(x);
1475 return err;
1476 }
1477
1478 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1479 struct rtattr **xfrma)
1480 {
1481 struct xfrm_state *x;
1482 struct km_event c;
1483 int err = - EINVAL;
1484 struct xfrm_aevent_id *p = NLMSG_DATA(nlh);
1485 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
1486 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
1487
1488 if (!lt && !rp)
1489 return err;
1490
1491 /* pedantic mode - thou shalt sayeth replaceth */
1492 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1493 return err;
1494
1495 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1496 if (x == NULL)
1497 return -ESRCH;
1498
1499 if (x->km.state != XFRM_STATE_VALID)
1500 goto out;
1501
1502 spin_lock_bh(&x->lock);
1503 err = xfrm_update_ae_params(x, xfrma);
1504 spin_unlock_bh(&x->lock);
1505 if (err < 0)
1506 goto out;
1507
1508 c.event = nlh->nlmsg_type;
1509 c.seq = nlh->nlmsg_seq;
1510 c.pid = nlh->nlmsg_pid;
1511 c.data.aevent = XFRM_AE_CU;
1512 km_state_notify(x, &c);
1513 err = 0;
1514 out:
1515 xfrm_state_put(x);
1516 return err;
1517 }
1518
1519 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1520 struct rtattr **xfrma)
1521 {
1522 struct km_event c;
1523 u8 type = XFRM_POLICY_TYPE_MAIN;
1524 int err;
1525 struct xfrm_audit audit_info;
1526
1527 err = copy_from_user_policy_type(&type, xfrma);
1528 if (err)
1529 return err;
1530
1531 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1532 audit_info.secid = NETLINK_CB(skb).sid;
1533 xfrm_policy_flush(type, &audit_info);
1534 c.data.type = type;
1535 c.event = nlh->nlmsg_type;
1536 c.seq = nlh->nlmsg_seq;
1537 c.pid = nlh->nlmsg_pid;
1538 km_policy_notify(NULL, 0, &c);
1539 return 0;
1540 }
1541
1542 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1543 struct rtattr **xfrma)
1544 {
1545 struct xfrm_policy *xp;
1546 struct xfrm_user_polexpire *up = NLMSG_DATA(nlh);
1547 struct xfrm_userpolicy_info *p = &up->pol;
1548 u8 type = XFRM_POLICY_TYPE_MAIN;
1549 int err = -ENOENT;
1550
1551 err = copy_from_user_policy_type(&type, xfrma);
1552 if (err)
1553 return err;
1554
1555 if (p->index)
1556 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
1557 else {
1558 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
1559 struct xfrm_policy tmp;
1560
1561 err = verify_sec_ctx_len(xfrma);
1562 if (err)
1563 return err;
1564
1565 memset(&tmp, 0, sizeof(struct xfrm_policy));
1566 if (rt) {
1567 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1568
1569 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1570 return err;
1571 }
1572 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1573 0, &err);
1574 security_xfrm_policy_free(&tmp);
1575 }
1576
1577 if (xp == NULL)
1578 return -ENOENT;
1579 read_lock(&xp->lock);
1580 if (xp->dead) {
1581 read_unlock(&xp->lock);
1582 goto out;
1583 }
1584
1585 read_unlock(&xp->lock);
1586 err = 0;
1587 if (up->hard) {
1588 xfrm_policy_delete(xp, p->dir);
1589 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1590 AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL);
1591
1592 } else {
1593 // reset the timers here?
1594 printk("Dont know what to do with soft policy expire\n");
1595 }
1596 km_policy_expired(xp, p->dir, up->hard, current->pid);
1597
1598 out:
1599 xfrm_pol_put(xp);
1600 return err;
1601 }
1602
1603 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1604 struct rtattr **xfrma)
1605 {
1606 struct xfrm_state *x;
1607 int err;
1608 struct xfrm_user_expire *ue = NLMSG_DATA(nlh);
1609 struct xfrm_usersa_info *p = &ue->state;
1610
1611 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
1612
1613 err = -ENOENT;
1614 if (x == NULL)
1615 return err;
1616
1617 spin_lock_bh(&x->lock);
1618 err = -EINVAL;
1619 if (x->km.state != XFRM_STATE_VALID)
1620 goto out;
1621 km_state_expired(x, ue->hard, current->pid);
1622
1623 if (ue->hard) {
1624 __xfrm_state_delete(x);
1625 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1626 AUDIT_MAC_IPSEC_DELSA, 1, NULL, x);
1627 }
1628 err = 0;
1629 out:
1630 spin_unlock_bh(&x->lock);
1631 xfrm_state_put(x);
1632 return err;
1633 }
1634
1635 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1636 struct rtattr **xfrma)
1637 {
1638 struct xfrm_policy *xp;
1639 struct xfrm_user_tmpl *ut;
1640 int i;
1641 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
1642
1643 struct xfrm_user_acquire *ua = NLMSG_DATA(nlh);
1644 struct xfrm_state *x = xfrm_state_alloc();
1645 int err = -ENOMEM;
1646
1647 if (!x)
1648 return err;
1649
1650 err = verify_newpolicy_info(&ua->policy);
1651 if (err) {
1652 printk("BAD policy passed\n");
1653 kfree(x);
1654 return err;
1655 }
1656
1657 /* build an XP */
1658 xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err);
1659 if (!xp) {
1660 kfree(x);
1661 return err;
1662 }
1663
1664 memcpy(&x->id, &ua->id, sizeof(ua->id));
1665 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1666 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1667
1668 ut = RTA_DATA(rt);
1669 /* extract the templates and for each call km_key */
1670 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1671 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1672 memcpy(&x->id, &t->id, sizeof(x->id));
1673 x->props.mode = t->mode;
1674 x->props.reqid = t->reqid;
1675 x->props.family = ut->family;
1676 t->aalgos = ua->aalgos;
1677 t->ealgos = ua->ealgos;
1678 t->calgos = ua->calgos;
1679 err = km_query(x, t, xp);
1680
1681 }
1682
1683 kfree(x);
1684 kfree(xp);
1685
1686 return 0;
1687 }
1688
1689 #ifdef CONFIG_XFRM_MIGRATE
1690 static int verify_user_migrate(struct rtattr **xfrma)
1691 {
1692 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1693 struct xfrm_user_migrate *um;
1694
1695 if (!rt)
1696 return -EINVAL;
1697
1698 if ((rt->rta_len - sizeof(*rt)) < sizeof(*um))
1699 return -EINVAL;
1700
1701 return 0;
1702 }
1703
1704 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1705 struct rtattr **xfrma, int *num)
1706 {
1707 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1708 struct xfrm_user_migrate *um;
1709 int i, num_migrate;
1710
1711 um = RTA_DATA(rt);
1712 num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um);
1713
1714 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1715 return -EINVAL;
1716
1717 for (i = 0; i < num_migrate; i++, um++, ma++) {
1718 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1719 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1720 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1721 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1722
1723 ma->proto = um->proto;
1724 ma->mode = um->mode;
1725 ma->reqid = um->reqid;
1726
1727 ma->old_family = um->old_family;
1728 ma->new_family = um->new_family;
1729 }
1730
1731 *num = i;
1732 return 0;
1733 }
1734
1735 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1736 struct rtattr **xfrma)
1737 {
1738 struct xfrm_userpolicy_id *pi = NLMSG_DATA(nlh);
1739 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1740 u8 type;
1741 int err;
1742 int n = 0;
1743
1744 err = verify_user_migrate((struct rtattr **)xfrma);
1745 if (err)
1746 return err;
1747
1748 err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
1749 if (err)
1750 return err;
1751
1752 err = copy_from_user_migrate((struct xfrm_migrate *)m,
1753 (struct rtattr **)xfrma, &n);
1754 if (err)
1755 return err;
1756
1757 if (!n)
1758 return 0;
1759
1760 xfrm_migrate(&pi->sel, pi->dir, type, m, n);
1761
1762 return 0;
1763 }
1764 #else
1765 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1766 struct rtattr **xfrma)
1767 {
1768 return -ENOPROTOOPT;
1769 }
1770 #endif
1771
1772 #ifdef CONFIG_XFRM_MIGRATE
1773 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1774 {
1775 struct xfrm_user_migrate um;
1776
1777 memset(&um, 0, sizeof(um));
1778 um.proto = m->proto;
1779 um.mode = m->mode;
1780 um.reqid = m->reqid;
1781 um.old_family = m->old_family;
1782 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1783 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1784 um.new_family = m->new_family;
1785 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1786 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1787
1788 RTA_PUT(skb, XFRMA_MIGRATE, sizeof(um), &um);
1789 return 0;
1790
1791 rtattr_failure:
1792 return -1;
1793 }
1794
1795 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1796 int num_migrate, struct xfrm_selector *sel,
1797 u8 dir, u8 type)
1798 {
1799 struct xfrm_migrate *mp;
1800 struct xfrm_userpolicy_id *pol_id;
1801 struct nlmsghdr *nlh;
1802 unsigned char *b = skb_tail_pointer(skb);
1803 int i;
1804
1805 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id));
1806 pol_id = NLMSG_DATA(nlh);
1807 nlh->nlmsg_flags = 0;
1808
1809 /* copy data from selector, dir, and type to the pol_id */
1810 memset(pol_id, 0, sizeof(*pol_id));
1811 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1812 pol_id->dir = dir;
1813
1814 if (copy_to_user_policy_type(type, skb) < 0)
1815 goto nlmsg_failure;
1816
1817 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1818 if (copy_to_user_migrate(mp, skb) < 0)
1819 goto nlmsg_failure;
1820 }
1821
1822 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1823 return skb->len;
1824 nlmsg_failure:
1825 nlmsg_trim(skb, b);
1826 return -1;
1827 }
1828
1829 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1830 struct xfrm_migrate *m, int num_migrate)
1831 {
1832 struct sk_buff *skb;
1833 size_t len;
1834
1835 len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate);
1836 len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id));
1837 #ifdef CONFIG_XFRM_SUB_POLICY
1838 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
1839 #endif
1840 skb = alloc_skb(len, GFP_ATOMIC);
1841 if (skb == NULL)
1842 return -ENOMEM;
1843
1844 /* build migrate */
1845 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
1846 BUG();
1847
1848 NETLINK_CB(skb).dst_group = XFRMNLGRP_MIGRATE;
1849 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE,
1850 GFP_ATOMIC);
1851 }
1852 #else
1853 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1854 struct xfrm_migrate *m, int num_migrate)
1855 {
1856 return -ENOPROTOOPT;
1857 }
1858 #endif
1859
1860 #define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
1861
1862 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1863 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1864 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1865 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1866 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1867 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1868 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1869 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1870 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1871 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1872 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1873 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1874 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1875 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1876 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0),
1877 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1878 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1879 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1880 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1881 };
1882
1883 #undef XMSGSIZE
1884
1885 static struct xfrm_link {
1886 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **);
1887 int (*dump)(struct sk_buff *, struct netlink_callback *);
1888 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1889 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1890 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1891 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1892 .dump = xfrm_dump_sa },
1893 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1894 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1895 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1896 .dump = xfrm_dump_policy },
1897 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1898 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1899 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1900 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1901 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1902 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1903 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1904 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1905 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1906 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1907 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1908 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1909 };
1910
1911 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1912 {
1913 struct rtattr *xfrma[XFRMA_MAX];
1914 struct xfrm_link *link;
1915 int type, min_len;
1916
1917 type = nlh->nlmsg_type;
1918 if (type > XFRM_MSG_MAX)
1919 return -EINVAL;
1920
1921 type -= XFRM_MSG_BASE;
1922 link = &xfrm_dispatch[type];
1923
1924 /* All operations require privileges, even GET */
1925 if (security_netlink_recv(skb, CAP_NET_ADMIN))
1926 return -EPERM;
1927
1928 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
1929 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
1930 (nlh->nlmsg_flags & NLM_F_DUMP)) {
1931 if (link->dump == NULL)
1932 return -EINVAL;
1933
1934 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL);
1935 }
1936
1937 memset(xfrma, 0, sizeof(xfrma));
1938
1939 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
1940 return -EINVAL;
1941
1942 if (nlh->nlmsg_len > min_len) {
1943 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
1944 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len);
1945
1946 while (RTA_OK(attr, attrlen)) {
1947 unsigned short flavor = attr->rta_type;
1948 if (flavor) {
1949 if (flavor > XFRMA_MAX)
1950 return -EINVAL;
1951 xfrma[flavor - 1] = attr;
1952 }
1953 attr = RTA_NEXT(attr, attrlen);
1954 }
1955 }
1956
1957 if (link->doit == NULL)
1958 return -EINVAL;
1959
1960 return link->doit(skb, nlh, xfrma);
1961 }
1962
1963 static void xfrm_netlink_rcv(struct sock *sk, int len)
1964 {
1965 unsigned int qlen = 0;
1966
1967 do {
1968 mutex_lock(&xfrm_cfg_mutex);
1969 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
1970 mutex_unlock(&xfrm_cfg_mutex);
1971
1972 } while (qlen);
1973 }
1974
1975 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1976 {
1977 struct xfrm_user_expire *ue;
1978 struct nlmsghdr *nlh;
1979 unsigned char *b = skb_tail_pointer(skb);
1980
1981 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE,
1982 sizeof(*ue));
1983 ue = NLMSG_DATA(nlh);
1984 nlh->nlmsg_flags = 0;
1985
1986 copy_to_user_state(x, &ue->state);
1987 ue->hard = (c->data.hard != 0) ? 1 : 0;
1988
1989 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1990 return skb->len;
1991
1992 nlmsg_failure:
1993 nlmsg_trim(skb, b);
1994 return -1;
1995 }
1996
1997 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
1998 {
1999 struct sk_buff *skb;
2000 int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire));
2001
2002 skb = alloc_skb(len, GFP_ATOMIC);
2003 if (skb == NULL)
2004 return -ENOMEM;
2005
2006 if (build_expire(skb, x, c) < 0)
2007 BUG();
2008
2009 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
2010 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2011 }
2012
2013 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
2014 {
2015 struct sk_buff *skb;
2016 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
2017
2018 len += RTA_SPACE(sizeof(struct xfrm_replay_state));
2019 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur));
2020 skb = alloc_skb(len, GFP_ATOMIC);
2021 if (skb == NULL)
2022 return -ENOMEM;
2023
2024 if (build_aevent(skb, x, c) < 0)
2025 BUG();
2026
2027 NETLINK_CB(skb).dst_group = XFRMNLGRP_AEVENTS;
2028 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2029 }
2030
2031 static int xfrm_notify_sa_flush(struct km_event *c)
2032 {
2033 struct xfrm_usersa_flush *p;
2034 struct nlmsghdr *nlh;
2035 struct sk_buff *skb;
2036 sk_buff_data_t b;
2037 int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
2038
2039 skb = alloc_skb(len, GFP_ATOMIC);
2040 if (skb == NULL)
2041 return -ENOMEM;
2042 b = skb->tail;
2043
2044 nlh = NLMSG_PUT(skb, c->pid, c->seq,
2045 XFRM_MSG_FLUSHSA, sizeof(*p));
2046 nlh->nlmsg_flags = 0;
2047
2048 p = NLMSG_DATA(nlh);
2049 p->proto = c->data.proto;
2050
2051 nlh->nlmsg_len = skb->tail - b;
2052
2053 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
2054 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2055
2056 nlmsg_failure:
2057 kfree_skb(skb);
2058 return -1;
2059 }
2060
2061 static inline int xfrm_sa_len(struct xfrm_state *x)
2062 {
2063 int l = 0;
2064 if (x->aalg)
2065 l += RTA_SPACE(sizeof(*x->aalg) + (x->aalg->alg_key_len+7)/8);
2066 if (x->ealg)
2067 l += RTA_SPACE(sizeof(*x->ealg) + (x->ealg->alg_key_len+7)/8);
2068 if (x->calg)
2069 l += RTA_SPACE(sizeof(*x->calg));
2070 if (x->encap)
2071 l += RTA_SPACE(sizeof(*x->encap));
2072
2073 return l;
2074 }
2075
2076 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2077 {
2078 struct xfrm_usersa_info *p;
2079 struct xfrm_usersa_id *id;
2080 struct nlmsghdr *nlh;
2081 struct sk_buff *skb;
2082 sk_buff_data_t b;
2083 int len = xfrm_sa_len(x);
2084 int headlen;
2085
2086 headlen = sizeof(*p);
2087 if (c->event == XFRM_MSG_DELSA) {
2088 len += RTA_SPACE(headlen);
2089 headlen = sizeof(*id);
2090 }
2091 len += NLMSG_SPACE(headlen);
2092
2093 skb = alloc_skb(len, GFP_ATOMIC);
2094 if (skb == NULL)
2095 return -ENOMEM;
2096 b = skb->tail;
2097
2098 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
2099 nlh->nlmsg_flags = 0;
2100
2101 p = NLMSG_DATA(nlh);
2102 if (c->event == XFRM_MSG_DELSA) {
2103 id = NLMSG_DATA(nlh);
2104 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2105 id->spi = x->id.spi;
2106 id->family = x->props.family;
2107 id->proto = x->id.proto;
2108
2109 p = RTA_DATA(__RTA_PUT(skb, XFRMA_SA, sizeof(*p)));
2110 }
2111
2112 copy_to_user_state(x, p);
2113
2114 if (x->aalg)
2115 RTA_PUT(skb, XFRMA_ALG_AUTH,
2116 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
2117 if (x->ealg)
2118 RTA_PUT(skb, XFRMA_ALG_CRYPT,
2119 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
2120 if (x->calg)
2121 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
2122
2123 if (x->encap)
2124 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
2125
2126 nlh->nlmsg_len = skb->tail - b;
2127
2128 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
2129 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2130
2131 nlmsg_failure:
2132 rtattr_failure:
2133 kfree_skb(skb);
2134 return -1;
2135 }
2136
2137 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2138 {
2139
2140 switch (c->event) {
2141 case XFRM_MSG_EXPIRE:
2142 return xfrm_exp_state_notify(x, c);
2143 case XFRM_MSG_NEWAE:
2144 return xfrm_aevent_state_notify(x, c);
2145 case XFRM_MSG_DELSA:
2146 case XFRM_MSG_UPDSA:
2147 case XFRM_MSG_NEWSA:
2148 return xfrm_notify_sa(x, c);
2149 case XFRM_MSG_FLUSHSA:
2150 return xfrm_notify_sa_flush(c);
2151 default:
2152 printk("xfrm_user: Unknown SA event %d\n", c->event);
2153 break;
2154 }
2155
2156 return 0;
2157
2158 }
2159
2160 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2161 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2162 int dir)
2163 {
2164 struct xfrm_user_acquire *ua;
2165 struct nlmsghdr *nlh;
2166 unsigned char *b = skb_tail_pointer(skb);
2167 __u32 seq = xfrm_get_acqseq();
2168
2169 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
2170 sizeof(*ua));
2171 ua = NLMSG_DATA(nlh);
2172 nlh->nlmsg_flags = 0;
2173
2174 memcpy(&ua->id, &x->id, sizeof(ua->id));
2175 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2176 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2177 copy_to_user_policy(xp, &ua->policy, dir);
2178 ua->aalgos = xt->aalgos;
2179 ua->ealgos = xt->ealgos;
2180 ua->calgos = xt->calgos;
2181 ua->seq = x->km.seq = seq;
2182
2183 if (copy_to_user_tmpl(xp, skb) < 0)
2184 goto nlmsg_failure;
2185 if (copy_to_user_state_sec_ctx(x, skb))
2186 goto nlmsg_failure;
2187 if (copy_to_user_policy_type(xp->type, skb) < 0)
2188 goto nlmsg_failure;
2189
2190 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2191 return skb->len;
2192
2193 nlmsg_failure:
2194 nlmsg_trim(skb, b);
2195 return -1;
2196 }
2197
2198 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2199 struct xfrm_policy *xp, int dir)
2200 {
2201 struct sk_buff *skb;
2202 size_t len;
2203
2204 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2205 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire));
2206 len += RTA_SPACE(xfrm_user_sec_ctx_size(x->security));
2207 #ifdef CONFIG_XFRM_SUB_POLICY
2208 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2209 #endif
2210 skb = alloc_skb(len, GFP_ATOMIC);
2211 if (skb == NULL)
2212 return -ENOMEM;
2213
2214 if (build_acquire(skb, x, xt, xp, dir) < 0)
2215 BUG();
2216
2217 NETLINK_CB(skb).dst_group = XFRMNLGRP_ACQUIRE;
2218 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2219 }
2220
2221 /* User gives us xfrm_user_policy_info followed by an array of 0
2222 * or more templates.
2223 */
2224 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2225 u8 *data, int len, int *dir)
2226 {
2227 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2228 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2229 struct xfrm_policy *xp;
2230 int nr;
2231
2232 switch (sk->sk_family) {
2233 case AF_INET:
2234 if (opt != IP_XFRM_POLICY) {
2235 *dir = -EOPNOTSUPP;
2236 return NULL;
2237 }
2238 break;
2239 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2240 case AF_INET6:
2241 if (opt != IPV6_XFRM_POLICY) {
2242 *dir = -EOPNOTSUPP;
2243 return NULL;
2244 }
2245 break;
2246 #endif
2247 default:
2248 *dir = -EINVAL;
2249 return NULL;
2250 }
2251
2252 *dir = -EINVAL;
2253
2254 if (len < sizeof(*p) ||
2255 verify_newpolicy_info(p))
2256 return NULL;
2257
2258 nr = ((len - sizeof(*p)) / sizeof(*ut));
2259 if (validate_tmpl(nr, ut, p->sel.family))
2260 return NULL;
2261
2262 if (p->dir > XFRM_POLICY_OUT)
2263 return NULL;
2264
2265 xp = xfrm_policy_alloc(GFP_KERNEL);
2266 if (xp == NULL) {
2267 *dir = -ENOBUFS;
2268 return NULL;
2269 }
2270
2271 copy_from_user_policy(xp, p);
2272 xp->type = XFRM_POLICY_TYPE_MAIN;
2273 copy_templates(xp, ut, nr);
2274
2275 *dir = p->dir;
2276
2277 return xp;
2278 }
2279
2280 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2281 int dir, struct km_event *c)
2282 {
2283 struct xfrm_user_polexpire *upe;
2284 struct nlmsghdr *nlh;
2285 int hard = c->data.hard;
2286 unsigned char *b = skb_tail_pointer(skb);
2287
2288 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
2289 upe = NLMSG_DATA(nlh);
2290 nlh->nlmsg_flags = 0;
2291
2292 copy_to_user_policy(xp, &upe->pol, dir);
2293 if (copy_to_user_tmpl(xp, skb) < 0)
2294 goto nlmsg_failure;
2295 if (copy_to_user_sec_ctx(xp, skb))
2296 goto nlmsg_failure;
2297 if (copy_to_user_policy_type(xp->type, skb) < 0)
2298 goto nlmsg_failure;
2299 upe->hard = !!hard;
2300
2301 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2302 return skb->len;
2303
2304 nlmsg_failure:
2305 nlmsg_trim(skb, b);
2306 return -1;
2307 }
2308
2309 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2310 {
2311 struct sk_buff *skb;
2312 size_t len;
2313
2314 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2315 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire));
2316 len += RTA_SPACE(xfrm_user_sec_ctx_size(xp->security));
2317 #ifdef CONFIG_XFRM_SUB_POLICY
2318 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2319 #endif
2320 skb = alloc_skb(len, GFP_ATOMIC);
2321 if (skb == NULL)
2322 return -ENOMEM;
2323
2324 if (build_polexpire(skb, xp, dir, c) < 0)
2325 BUG();
2326
2327 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
2328 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2329 }
2330
2331 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2332 {
2333 struct xfrm_userpolicy_info *p;
2334 struct xfrm_userpolicy_id *id;
2335 struct nlmsghdr *nlh;
2336 struct sk_buff *skb;
2337 sk_buff_data_t b;
2338 int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2339 int headlen;
2340
2341 headlen = sizeof(*p);
2342 if (c->event == XFRM_MSG_DELPOLICY) {
2343 len += RTA_SPACE(headlen);
2344 headlen = sizeof(*id);
2345 }
2346 #ifdef CONFIG_XFRM_SUB_POLICY
2347 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2348 #endif
2349 len += NLMSG_SPACE(headlen);
2350
2351 skb = alloc_skb(len, GFP_ATOMIC);
2352 if (skb == NULL)
2353 return -ENOMEM;
2354 b = skb->tail;
2355
2356 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
2357
2358 p = NLMSG_DATA(nlh);
2359 if (c->event == XFRM_MSG_DELPOLICY) {
2360 id = NLMSG_DATA(nlh);
2361 memset(id, 0, sizeof(*id));
2362 id->dir = dir;
2363 if (c->data.byid)
2364 id->index = xp->index;
2365 else
2366 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2367
2368 p = RTA_DATA(__RTA_PUT(skb, XFRMA_POLICY, sizeof(*p)));
2369 }
2370
2371 nlh->nlmsg_flags = 0;
2372
2373 copy_to_user_policy(xp, p, dir);
2374 if (copy_to_user_tmpl(xp, skb) < 0)
2375 goto nlmsg_failure;
2376 if (copy_to_user_policy_type(xp->type, skb) < 0)
2377 goto nlmsg_failure;
2378
2379 nlh->nlmsg_len = skb->tail - b;
2380
2381 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
2382 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2383
2384 nlmsg_failure:
2385 rtattr_failure:
2386 kfree_skb(skb);
2387 return -1;
2388 }
2389
2390 static int xfrm_notify_policy_flush(struct km_event *c)
2391 {
2392 struct nlmsghdr *nlh;
2393 struct sk_buff *skb;
2394 sk_buff_data_t b;
2395 int len = 0;
2396 #ifdef CONFIG_XFRM_SUB_POLICY
2397 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2398 #endif
2399 len += NLMSG_LENGTH(0);
2400
2401 skb = alloc_skb(len, GFP_ATOMIC);
2402 if (skb == NULL)
2403 return -ENOMEM;
2404 b = skb->tail;
2405
2406
2407 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0);
2408 nlh->nlmsg_flags = 0;
2409 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2410 goto nlmsg_failure;
2411
2412 nlh->nlmsg_len = skb->tail - b;
2413
2414 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
2415 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2416
2417 nlmsg_failure:
2418 kfree_skb(skb);
2419 return -1;
2420 }
2421
2422 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2423 {
2424
2425 switch (c->event) {
2426 case XFRM_MSG_NEWPOLICY:
2427 case XFRM_MSG_UPDPOLICY:
2428 case XFRM_MSG_DELPOLICY:
2429 return xfrm_notify_policy(xp, dir, c);
2430 case XFRM_MSG_FLUSHPOLICY:
2431 return xfrm_notify_policy_flush(c);
2432 case XFRM_MSG_POLEXPIRE:
2433 return xfrm_exp_policy_notify(xp, dir, c);
2434 default:
2435 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2436 }
2437
2438 return 0;
2439
2440 }
2441
2442 static int build_report(struct sk_buff *skb, u8 proto,
2443 struct xfrm_selector *sel, xfrm_address_t *addr)
2444 {
2445 struct xfrm_user_report *ur;
2446 struct nlmsghdr *nlh;
2447 unsigned char *b = skb_tail_pointer(skb);
2448
2449 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur));
2450 ur = NLMSG_DATA(nlh);
2451 nlh->nlmsg_flags = 0;
2452
2453 ur->proto = proto;
2454 memcpy(&ur->sel, sel, sizeof(ur->sel));
2455
2456 if (addr)
2457 RTA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2458
2459 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2460 return skb->len;
2461
2462 nlmsg_failure:
2463 rtattr_failure:
2464 nlmsg_trim(skb, b);
2465 return -1;
2466 }
2467
2468 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel,
2469 xfrm_address_t *addr)
2470 {
2471 struct sk_buff *skb;
2472 size_t len;
2473
2474 len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct xfrm_user_report)));
2475 skb = alloc_skb(len, GFP_ATOMIC);
2476 if (skb == NULL)
2477 return -ENOMEM;
2478
2479 if (build_report(skb, proto, sel, addr) < 0)
2480 BUG();
2481
2482 NETLINK_CB(skb).dst_group = XFRMNLGRP_REPORT;
2483 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2484 }
2485
2486 static struct xfrm_mgr netlink_mgr = {
2487 .id = "netlink",
2488 .notify = xfrm_send_state_notify,
2489 .acquire = xfrm_send_acquire,
2490 .compile_policy = xfrm_compile_policy,
2491 .notify_policy = xfrm_send_policy_notify,
2492 .report = xfrm_send_report,
2493 .migrate = xfrm_send_migrate,
2494 };
2495
2496 static int __init xfrm_user_init(void)
2497 {
2498 struct sock *nlsk;
2499
2500 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2501
2502 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
2503 xfrm_netlink_rcv, NULL, THIS_MODULE);
2504 if (nlsk == NULL)
2505 return -ENOMEM;
2506 rcu_assign_pointer(xfrm_nl, nlsk);
2507
2508 xfrm_register_km(&netlink_mgr);
2509
2510 return 0;
2511 }
2512
2513 static void __exit xfrm_user_exit(void)
2514 {
2515 struct sock *nlsk = xfrm_nl;
2516
2517 xfrm_unregister_km(&netlink_mgr);
2518 rcu_assign_pointer(xfrm_nl, NULL);
2519 synchronize_rcu();
2520 sock_release(nlsk->sk_socket);
2521 }
2522
2523 module_init(xfrm_user_init);
2524 module_exit(xfrm_user_exit);
2525 MODULE_LICENSE("GPL");
2526 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
2527