]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/xfrm/xfrm_user.c
Pull mem-attribute into release branch
[mirror_ubuntu-artful-kernel.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
27 #include <net/sock.h>
28 #include <net/xfrm.h>
29 #include <net/netlink.h>
30 #include <asm/uaccess.h>
31 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
32 #include <linux/in6.h>
33 #endif
34 #include <linux/audit.h>
35
36 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
37 {
38 struct rtattr *rt = xfrma[type - 1];
39 struct xfrm_algo *algp;
40 int len;
41
42 if (!rt)
43 return 0;
44
45 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
46 if (len < 0)
47 return -EINVAL;
48
49 algp = RTA_DATA(rt);
50
51 len -= (algp->alg_key_len + 7U) / 8;
52 if (len < 0)
53 return -EINVAL;
54
55 switch (type) {
56 case XFRMA_ALG_AUTH:
57 if (!algp->alg_key_len &&
58 strcmp(algp->alg_name, "digest_null") != 0)
59 return -EINVAL;
60 break;
61
62 case XFRMA_ALG_CRYPT:
63 if (!algp->alg_key_len &&
64 strcmp(algp->alg_name, "cipher_null") != 0)
65 return -EINVAL;
66 break;
67
68 case XFRMA_ALG_COMP:
69 /* Zero length keys are legal. */
70 break;
71
72 default:
73 return -EINVAL;
74 }
75
76 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
77 return 0;
78 }
79
80 static int verify_encap_tmpl(struct rtattr **xfrma)
81 {
82 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1];
83 struct xfrm_encap_tmpl *encap;
84
85 if (!rt)
86 return 0;
87
88 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap))
89 return -EINVAL;
90
91 return 0;
92 }
93
94 static int verify_one_addr(struct rtattr **xfrma, enum xfrm_attr_type_t type,
95 xfrm_address_t **addrp)
96 {
97 struct rtattr *rt = xfrma[type - 1];
98
99 if (!rt)
100 return 0;
101
102 if ((rt->rta_len - sizeof(*rt)) < sizeof(**addrp))
103 return -EINVAL;
104
105 if (addrp)
106 *addrp = RTA_DATA(rt);
107
108 return 0;
109 }
110
111 static inline int verify_sec_ctx_len(struct rtattr **xfrma)
112 {
113 struct rtattr *rt = xfrma[XFRMA_SEC_CTX - 1];
114 struct xfrm_user_sec_ctx *uctx;
115 int len = 0;
116
117 if (!rt)
118 return 0;
119
120 if (rt->rta_len < sizeof(*uctx))
121 return -EINVAL;
122
123 uctx = RTA_DATA(rt);
124
125 len += sizeof(struct xfrm_user_sec_ctx);
126 len += uctx->ctx_len;
127
128 if (uctx->len != len)
129 return -EINVAL;
130
131 return 0;
132 }
133
134
135 static int verify_newsa_info(struct xfrm_usersa_info *p,
136 struct rtattr **xfrma)
137 {
138 int err;
139
140 err = -EINVAL;
141 switch (p->family) {
142 case AF_INET:
143 break;
144
145 case AF_INET6:
146 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
147 break;
148 #else
149 err = -EAFNOSUPPORT;
150 goto out;
151 #endif
152
153 default:
154 goto out;
155 }
156
157 err = -EINVAL;
158 switch (p->id.proto) {
159 case IPPROTO_AH:
160 if (!xfrma[XFRMA_ALG_AUTH-1] ||
161 xfrma[XFRMA_ALG_CRYPT-1] ||
162 xfrma[XFRMA_ALG_COMP-1])
163 goto out;
164 break;
165
166 case IPPROTO_ESP:
167 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
168 !xfrma[XFRMA_ALG_CRYPT-1]) ||
169 xfrma[XFRMA_ALG_COMP-1])
170 goto out;
171 break;
172
173 case IPPROTO_COMP:
174 if (!xfrma[XFRMA_ALG_COMP-1] ||
175 xfrma[XFRMA_ALG_AUTH-1] ||
176 xfrma[XFRMA_ALG_CRYPT-1])
177 goto out;
178 break;
179
180 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
181 case IPPROTO_DSTOPTS:
182 case IPPROTO_ROUTING:
183 if (xfrma[XFRMA_ALG_COMP-1] ||
184 xfrma[XFRMA_ALG_AUTH-1] ||
185 xfrma[XFRMA_ALG_CRYPT-1] ||
186 xfrma[XFRMA_ENCAP-1] ||
187 xfrma[XFRMA_SEC_CTX-1] ||
188 !xfrma[XFRMA_COADDR-1])
189 goto out;
190 break;
191 #endif
192
193 default:
194 goto out;
195 }
196
197 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
198 goto out;
199 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
200 goto out;
201 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
202 goto out;
203 if ((err = verify_encap_tmpl(xfrma)))
204 goto out;
205 if ((err = verify_sec_ctx_len(xfrma)))
206 goto out;
207 if ((err = verify_one_addr(xfrma, XFRMA_COADDR, NULL)))
208 goto out;
209
210 err = -EINVAL;
211 switch (p->mode) {
212 case XFRM_MODE_TRANSPORT:
213 case XFRM_MODE_TUNNEL:
214 case XFRM_MODE_ROUTEOPTIMIZATION:
215 case XFRM_MODE_BEET:
216 break;
217
218 default:
219 goto out;
220 }
221
222 err = 0;
223
224 out:
225 return err;
226 }
227
228 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
229 struct xfrm_algo_desc *(*get_byname)(char *, int),
230 struct rtattr *u_arg)
231 {
232 struct rtattr *rta = u_arg;
233 struct xfrm_algo *p, *ualg;
234 struct xfrm_algo_desc *algo;
235 int len;
236
237 if (!rta)
238 return 0;
239
240 ualg = RTA_DATA(rta);
241
242 algo = get_byname(ualg->alg_name, 1);
243 if (!algo)
244 return -ENOSYS;
245 *props = algo->desc.sadb_alg_id;
246
247 len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
248 p = kmemdup(ualg, len, GFP_KERNEL);
249 if (!p)
250 return -ENOMEM;
251
252 strcpy(p->alg_name, algo->name);
253 *algpp = p;
254 return 0;
255 }
256
257 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
258 {
259 struct rtattr *rta = u_arg;
260 struct xfrm_encap_tmpl *p, *uencap;
261
262 if (!rta)
263 return 0;
264
265 uencap = RTA_DATA(rta);
266 p = kmemdup(uencap, sizeof(*p), GFP_KERNEL);
267 if (!p)
268 return -ENOMEM;
269
270 *encapp = p;
271 return 0;
272 }
273
274
275 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
276 {
277 int len = 0;
278
279 if (xfrm_ctx) {
280 len += sizeof(struct xfrm_user_sec_ctx);
281 len += xfrm_ctx->ctx_len;
282 }
283 return len;
284 }
285
286 static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg)
287 {
288 struct xfrm_user_sec_ctx *uctx;
289
290 if (!u_arg)
291 return 0;
292
293 uctx = RTA_DATA(u_arg);
294 return security_xfrm_state_alloc(x, uctx);
295 }
296
297 static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg)
298 {
299 struct rtattr *rta = u_arg;
300 xfrm_address_t *p, *uaddrp;
301
302 if (!rta)
303 return 0;
304
305 uaddrp = RTA_DATA(rta);
306 p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL);
307 if (!p)
308 return -ENOMEM;
309
310 *addrpp = p;
311 return 0;
312 }
313
314 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
315 {
316 memcpy(&x->id, &p->id, sizeof(x->id));
317 memcpy(&x->sel, &p->sel, sizeof(x->sel));
318 memcpy(&x->lft, &p->lft, sizeof(x->lft));
319 x->props.mode = p->mode;
320 x->props.replay_window = p->replay_window;
321 x->props.reqid = p->reqid;
322 x->props.family = p->family;
323 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
324 x->props.flags = p->flags;
325 }
326
327 /*
328 * someday when pfkey also has support, we could have the code
329 * somehow made shareable and move it to xfrm_state.c - JHS
330 *
331 */
332 static int xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **xfrma)
333 {
334 int err = - EINVAL;
335 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
336 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
337 struct rtattr *et = xfrma[XFRMA_ETIMER_THRESH-1];
338 struct rtattr *rt = xfrma[XFRMA_REPLAY_THRESH-1];
339
340 if (rp) {
341 struct xfrm_replay_state *replay;
342 if (RTA_PAYLOAD(rp) < sizeof(*replay))
343 goto error;
344 replay = RTA_DATA(rp);
345 memcpy(&x->replay, replay, sizeof(*replay));
346 memcpy(&x->preplay, replay, sizeof(*replay));
347 }
348
349 if (lt) {
350 struct xfrm_lifetime_cur *ltime;
351 if (RTA_PAYLOAD(lt) < sizeof(*ltime))
352 goto error;
353 ltime = RTA_DATA(lt);
354 x->curlft.bytes = ltime->bytes;
355 x->curlft.packets = ltime->packets;
356 x->curlft.add_time = ltime->add_time;
357 x->curlft.use_time = ltime->use_time;
358 }
359
360 if (et) {
361 if (RTA_PAYLOAD(et) < sizeof(u32))
362 goto error;
363 x->replay_maxage = *(u32*)RTA_DATA(et);
364 }
365
366 if (rt) {
367 if (RTA_PAYLOAD(rt) < sizeof(u32))
368 goto error;
369 x->replay_maxdiff = *(u32*)RTA_DATA(rt);
370 }
371
372 return 0;
373 error:
374 return err;
375 }
376
377 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
378 struct rtattr **xfrma,
379 int *errp)
380 {
381 struct xfrm_state *x = xfrm_state_alloc();
382 int err = -ENOMEM;
383
384 if (!x)
385 goto error_no_put;
386
387 copy_from_user_state(x, p);
388
389 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
390 xfrm_aalg_get_byname,
391 xfrma[XFRMA_ALG_AUTH-1])))
392 goto error;
393 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
394 xfrm_ealg_get_byname,
395 xfrma[XFRMA_ALG_CRYPT-1])))
396 goto error;
397 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
398 xfrm_calg_get_byname,
399 xfrma[XFRMA_ALG_COMP-1])))
400 goto error;
401 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
402 goto error;
403 if ((err = attach_one_addr(&x->coaddr, xfrma[XFRMA_COADDR-1])))
404 goto error;
405 err = xfrm_init_state(x);
406 if (err)
407 goto error;
408
409 if ((err = attach_sec_ctx(x, xfrma[XFRMA_SEC_CTX-1])))
410 goto error;
411
412 x->km.seq = p->seq;
413 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
414 /* sysctl_xfrm_aevent_etime is in 100ms units */
415 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
416 x->preplay.bitmap = 0;
417 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
418 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
419
420 /* override default values from above */
421
422 err = xfrm_update_ae_params(x, (struct rtattr **)xfrma);
423 if (err < 0)
424 goto error;
425
426 return x;
427
428 error:
429 x->km.state = XFRM_STATE_DEAD;
430 xfrm_state_put(x);
431 error_no_put:
432 *errp = err;
433 return NULL;
434 }
435
436 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
437 struct rtattr **xfrma)
438 {
439 struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
440 struct xfrm_state *x;
441 int err;
442 struct km_event c;
443
444 err = verify_newsa_info(p, xfrma);
445 if (err)
446 return err;
447
448 x = xfrm_state_construct(p, xfrma, &err);
449 if (!x)
450 return err;
451
452 xfrm_state_hold(x);
453 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
454 err = xfrm_state_add(x);
455 else
456 err = xfrm_state_update(x);
457
458 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
459 AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
460
461 if (err < 0) {
462 x->km.state = XFRM_STATE_DEAD;
463 __xfrm_state_put(x);
464 goto out;
465 }
466
467 c.seq = nlh->nlmsg_seq;
468 c.pid = nlh->nlmsg_pid;
469 c.event = nlh->nlmsg_type;
470
471 km_state_notify(x, &c);
472 out:
473 xfrm_state_put(x);
474 return err;
475 }
476
477 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
478 struct rtattr **xfrma,
479 int *errp)
480 {
481 struct xfrm_state *x = NULL;
482 int err;
483
484 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
485 err = -ESRCH;
486 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
487 } else {
488 xfrm_address_t *saddr = NULL;
489
490 err = verify_one_addr(xfrma, XFRMA_SRCADDR, &saddr);
491 if (err)
492 goto out;
493
494 if (!saddr) {
495 err = -EINVAL;
496 goto out;
497 }
498
499 err = -ESRCH;
500 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto,
501 p->family);
502 }
503
504 out:
505 if (!x && errp)
506 *errp = err;
507 return x;
508 }
509
510 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
511 struct rtattr **xfrma)
512 {
513 struct xfrm_state *x;
514 int err = -ESRCH;
515 struct km_event c;
516 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
517
518 x = xfrm_user_state_lookup(p, xfrma, &err);
519 if (x == NULL)
520 return err;
521
522 if ((err = security_xfrm_state_delete(x)) != 0)
523 goto out;
524
525 if (xfrm_state_kern(x)) {
526 err = -EPERM;
527 goto out;
528 }
529
530 err = xfrm_state_delete(x);
531
532 if (err < 0)
533 goto out;
534
535 c.seq = nlh->nlmsg_seq;
536 c.pid = nlh->nlmsg_pid;
537 c.event = nlh->nlmsg_type;
538 km_state_notify(x, &c);
539
540 out:
541 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
542 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
543 xfrm_state_put(x);
544 return err;
545 }
546
547 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
548 {
549 memcpy(&p->id, &x->id, sizeof(p->id));
550 memcpy(&p->sel, &x->sel, sizeof(p->sel));
551 memcpy(&p->lft, &x->lft, sizeof(p->lft));
552 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
553 memcpy(&p->stats, &x->stats, sizeof(p->stats));
554 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
555 p->mode = x->props.mode;
556 p->replay_window = x->props.replay_window;
557 p->reqid = x->props.reqid;
558 p->family = x->props.family;
559 p->flags = x->props.flags;
560 p->seq = x->km.seq;
561 }
562
563 struct xfrm_dump_info {
564 struct sk_buff *in_skb;
565 struct sk_buff *out_skb;
566 u32 nlmsg_seq;
567 u16 nlmsg_flags;
568 int start_idx;
569 int this_idx;
570 };
571
572 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
573 {
574 struct xfrm_dump_info *sp = ptr;
575 struct sk_buff *in_skb = sp->in_skb;
576 struct sk_buff *skb = sp->out_skb;
577 struct xfrm_usersa_info *p;
578 struct nlmsghdr *nlh;
579 unsigned char *b = skb_tail_pointer(skb);
580
581 if (sp->this_idx < sp->start_idx)
582 goto out;
583
584 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
585 sp->nlmsg_seq,
586 XFRM_MSG_NEWSA, sizeof(*p));
587 nlh->nlmsg_flags = sp->nlmsg_flags;
588
589 p = NLMSG_DATA(nlh);
590 copy_to_user_state(x, p);
591
592 if (x->aalg)
593 RTA_PUT(skb, XFRMA_ALG_AUTH,
594 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
595 if (x->ealg)
596 RTA_PUT(skb, XFRMA_ALG_CRYPT,
597 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
598 if (x->calg)
599 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
600
601 if (x->encap)
602 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
603
604 if (x->security) {
605 int ctx_size = sizeof(struct xfrm_sec_ctx) +
606 x->security->ctx_len;
607 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size);
608 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
609
610 uctx->exttype = XFRMA_SEC_CTX;
611 uctx->len = ctx_size;
612 uctx->ctx_doi = x->security->ctx_doi;
613 uctx->ctx_alg = x->security->ctx_alg;
614 uctx->ctx_len = x->security->ctx_len;
615 memcpy(uctx + 1, x->security->ctx_str, x->security->ctx_len);
616 }
617
618 if (x->coaddr)
619 RTA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
620
621 if (x->lastused)
622 RTA_PUT(skb, XFRMA_LASTUSED, sizeof(x->lastused), &x->lastused);
623
624 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
625 out:
626 sp->this_idx++;
627 return 0;
628
629 nlmsg_failure:
630 rtattr_failure:
631 nlmsg_trim(skb, b);
632 return -1;
633 }
634
635 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
636 {
637 struct xfrm_dump_info info;
638
639 info.in_skb = cb->skb;
640 info.out_skb = skb;
641 info.nlmsg_seq = cb->nlh->nlmsg_seq;
642 info.nlmsg_flags = NLM_F_MULTI;
643 info.this_idx = 0;
644 info.start_idx = cb->args[0];
645 (void) xfrm_state_walk(0, dump_one_state, &info);
646 cb->args[0] = info.this_idx;
647
648 return skb->len;
649 }
650
651 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
652 struct xfrm_state *x, u32 seq)
653 {
654 struct xfrm_dump_info info;
655 struct sk_buff *skb;
656
657 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
658 if (!skb)
659 return ERR_PTR(-ENOMEM);
660
661 info.in_skb = in_skb;
662 info.out_skb = skb;
663 info.nlmsg_seq = seq;
664 info.nlmsg_flags = 0;
665 info.this_idx = info.start_idx = 0;
666
667 if (dump_one_state(x, 0, &info)) {
668 kfree_skb(skb);
669 return NULL;
670 }
671
672 return skb;
673 }
674
675 static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
676 {
677 struct xfrm_spdinfo si;
678 struct nlmsghdr *nlh;
679 u32 *f;
680
681 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
682 if (nlh == NULL) /* shouldnt really happen ... */
683 return -EMSGSIZE;
684
685 f = nlmsg_data(nlh);
686 *f = flags;
687 xfrm_spd_getinfo(&si);
688
689 if (flags & XFRM_SPD_HMASK)
690 NLA_PUT_U32(skb, XFRMA_SPDHMASK, si.spdhcnt);
691 if (flags & XFRM_SPD_HMAX)
692 NLA_PUT_U32(skb, XFRMA_SPDHMAX, si.spdhmcnt);
693 if (flags & XFRM_SPD_ICNT)
694 NLA_PUT_U32(skb, XFRMA_SPDICNT, si.incnt);
695 if (flags & XFRM_SPD_OCNT)
696 NLA_PUT_U32(skb, XFRMA_SPDOCNT, si.outcnt);
697 if (flags & XFRM_SPD_FCNT)
698 NLA_PUT_U32(skb, XFRMA_SPDFCNT, si.fwdcnt);
699 if (flags & XFRM_SPD_ISCNT)
700 NLA_PUT_U32(skb, XFRMA_SPDISCNT, si.inscnt);
701 if (flags & XFRM_SPD_OSCNT)
702 NLA_PUT_U32(skb, XFRMA_SPDOSCNT, si.inscnt);
703 if (flags & XFRM_SPD_FSCNT)
704 NLA_PUT_U32(skb, XFRMA_SPDFSCNT, si.inscnt);
705
706 return nlmsg_end(skb, nlh);
707
708 nla_put_failure:
709 nlmsg_cancel(skb, nlh);
710 return -EMSGSIZE;
711 }
712
713 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
714 struct rtattr **xfrma)
715 {
716 struct sk_buff *r_skb;
717 u32 *flags = NLMSG_DATA(nlh);
718 u32 spid = NETLINK_CB(skb).pid;
719 u32 seq = nlh->nlmsg_seq;
720 int len = NLMSG_LENGTH(sizeof(u32));
721
722
723 if (*flags & XFRM_SPD_HMASK)
724 len += RTA_SPACE(sizeof(u32));
725 if (*flags & XFRM_SPD_HMAX)
726 len += RTA_SPACE(sizeof(u32));
727 if (*flags & XFRM_SPD_ICNT)
728 len += RTA_SPACE(sizeof(u32));
729 if (*flags & XFRM_SPD_OCNT)
730 len += RTA_SPACE(sizeof(u32));
731 if (*flags & XFRM_SPD_FCNT)
732 len += RTA_SPACE(sizeof(u32));
733 if (*flags & XFRM_SPD_ISCNT)
734 len += RTA_SPACE(sizeof(u32));
735 if (*flags & XFRM_SPD_OSCNT)
736 len += RTA_SPACE(sizeof(u32));
737 if (*flags & XFRM_SPD_FSCNT)
738 len += RTA_SPACE(sizeof(u32));
739
740 r_skb = alloc_skb(len, GFP_ATOMIC);
741 if (r_skb == NULL)
742 return -ENOMEM;
743
744 if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
745 BUG();
746
747 return nlmsg_unicast(xfrm_nl, r_skb, spid);
748 }
749
750 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
751 {
752 struct xfrm_sadinfo si;
753 struct nlmsghdr *nlh;
754 u32 *f;
755
756 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
757 if (nlh == NULL) /* shouldnt really happen ... */
758 return -EMSGSIZE;
759
760 f = nlmsg_data(nlh);
761 *f = flags;
762 xfrm_sad_getinfo(&si);
763
764 if (flags & XFRM_SAD_HMASK)
765 NLA_PUT_U32(skb, XFRMA_SADHMASK, si.sadhcnt);
766 if (flags & XFRM_SAD_HMAX)
767 NLA_PUT_U32(skb, XFRMA_SADHMAX, si.sadhmcnt);
768 if (flags & XFRM_SAD_CNT)
769 NLA_PUT_U32(skb, XFRMA_SADCNT, si.sadcnt);
770
771 return nlmsg_end(skb, nlh);
772
773 nla_put_failure:
774 nlmsg_cancel(skb, nlh);
775 return -EMSGSIZE;
776 }
777
778 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
779 struct rtattr **xfrma)
780 {
781 struct sk_buff *r_skb;
782 u32 *flags = NLMSG_DATA(nlh);
783 u32 spid = NETLINK_CB(skb).pid;
784 u32 seq = nlh->nlmsg_seq;
785 int len = NLMSG_LENGTH(sizeof(u32));
786
787 if (*flags & XFRM_SAD_HMASK)
788 len += RTA_SPACE(sizeof(u32));
789 if (*flags & XFRM_SAD_HMAX)
790 len += RTA_SPACE(sizeof(u32));
791 if (*flags & XFRM_SAD_CNT)
792 len += RTA_SPACE(sizeof(u32));
793
794 r_skb = alloc_skb(len, GFP_ATOMIC);
795
796 if (r_skb == NULL)
797 return -ENOMEM;
798
799 if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
800 BUG();
801
802 return nlmsg_unicast(xfrm_nl, r_skb, spid);
803 }
804
805 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
806 struct rtattr **xfrma)
807 {
808 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
809 struct xfrm_state *x;
810 struct sk_buff *resp_skb;
811 int err = -ESRCH;
812
813 x = xfrm_user_state_lookup(p, xfrma, &err);
814 if (x == NULL)
815 goto out_noput;
816
817 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
818 if (IS_ERR(resp_skb)) {
819 err = PTR_ERR(resp_skb);
820 } else {
821 err = netlink_unicast(xfrm_nl, resp_skb,
822 NETLINK_CB(skb).pid, MSG_DONTWAIT);
823 }
824 xfrm_state_put(x);
825 out_noput:
826 return err;
827 }
828
829 static int verify_userspi_info(struct xfrm_userspi_info *p)
830 {
831 switch (p->info.id.proto) {
832 case IPPROTO_AH:
833 case IPPROTO_ESP:
834 break;
835
836 case IPPROTO_COMP:
837 /* IPCOMP spi is 16-bits. */
838 if (p->max >= 0x10000)
839 return -EINVAL;
840 break;
841
842 default:
843 return -EINVAL;
844 }
845
846 if (p->min > p->max)
847 return -EINVAL;
848
849 return 0;
850 }
851
852 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
853 struct rtattr **xfrma)
854 {
855 struct xfrm_state *x;
856 struct xfrm_userspi_info *p;
857 struct sk_buff *resp_skb;
858 xfrm_address_t *daddr;
859 int family;
860 int err;
861
862 p = NLMSG_DATA(nlh);
863 err = verify_userspi_info(p);
864 if (err)
865 goto out_noput;
866
867 family = p->info.family;
868 daddr = &p->info.id.daddr;
869
870 x = NULL;
871 if (p->info.seq) {
872 x = xfrm_find_acq_byseq(p->info.seq);
873 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
874 xfrm_state_put(x);
875 x = NULL;
876 }
877 }
878
879 if (!x)
880 x = xfrm_find_acq(p->info.mode, p->info.reqid,
881 p->info.id.proto, daddr,
882 &p->info.saddr, 1,
883 family);
884 err = -ENOENT;
885 if (x == NULL)
886 goto out_noput;
887
888 resp_skb = ERR_PTR(-ENOENT);
889
890 spin_lock_bh(&x->lock);
891 if (x->km.state != XFRM_STATE_DEAD) {
892 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
893 if (x->id.spi)
894 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
895 }
896 spin_unlock_bh(&x->lock);
897
898 if (IS_ERR(resp_skb)) {
899 err = PTR_ERR(resp_skb);
900 goto out;
901 }
902
903 err = netlink_unicast(xfrm_nl, resp_skb,
904 NETLINK_CB(skb).pid, MSG_DONTWAIT);
905
906 out:
907 xfrm_state_put(x);
908 out_noput:
909 return err;
910 }
911
912 static int verify_policy_dir(u8 dir)
913 {
914 switch (dir) {
915 case XFRM_POLICY_IN:
916 case XFRM_POLICY_OUT:
917 case XFRM_POLICY_FWD:
918 break;
919
920 default:
921 return -EINVAL;
922 }
923
924 return 0;
925 }
926
927 static int verify_policy_type(u8 type)
928 {
929 switch (type) {
930 case XFRM_POLICY_TYPE_MAIN:
931 #ifdef CONFIG_XFRM_SUB_POLICY
932 case XFRM_POLICY_TYPE_SUB:
933 #endif
934 break;
935
936 default:
937 return -EINVAL;
938 }
939
940 return 0;
941 }
942
943 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
944 {
945 switch (p->share) {
946 case XFRM_SHARE_ANY:
947 case XFRM_SHARE_SESSION:
948 case XFRM_SHARE_USER:
949 case XFRM_SHARE_UNIQUE:
950 break;
951
952 default:
953 return -EINVAL;
954 }
955
956 switch (p->action) {
957 case XFRM_POLICY_ALLOW:
958 case XFRM_POLICY_BLOCK:
959 break;
960
961 default:
962 return -EINVAL;
963 }
964
965 switch (p->sel.family) {
966 case AF_INET:
967 break;
968
969 case AF_INET6:
970 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
971 break;
972 #else
973 return -EAFNOSUPPORT;
974 #endif
975
976 default:
977 return -EINVAL;
978 }
979
980 return verify_policy_dir(p->dir);
981 }
982
983 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **xfrma)
984 {
985 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
986 struct xfrm_user_sec_ctx *uctx;
987
988 if (!rt)
989 return 0;
990
991 uctx = RTA_DATA(rt);
992 return security_xfrm_policy_alloc(pol, uctx);
993 }
994
995 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
996 int nr)
997 {
998 int i;
999
1000 xp->xfrm_nr = nr;
1001 for (i = 0; i < nr; i++, ut++) {
1002 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1003
1004 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1005 memcpy(&t->saddr, &ut->saddr,
1006 sizeof(xfrm_address_t));
1007 t->reqid = ut->reqid;
1008 t->mode = ut->mode;
1009 t->share = ut->share;
1010 t->optional = ut->optional;
1011 t->aalgos = ut->aalgos;
1012 t->ealgos = ut->ealgos;
1013 t->calgos = ut->calgos;
1014 t->encap_family = ut->family;
1015 }
1016 }
1017
1018 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1019 {
1020 int i;
1021
1022 if (nr > XFRM_MAX_DEPTH)
1023 return -EINVAL;
1024
1025 for (i = 0; i < nr; i++) {
1026 /* We never validated the ut->family value, so many
1027 * applications simply leave it at zero. The check was
1028 * never made and ut->family was ignored because all
1029 * templates could be assumed to have the same family as
1030 * the policy itself. Now that we will have ipv4-in-ipv6
1031 * and ipv6-in-ipv4 tunnels, this is no longer true.
1032 */
1033 if (!ut[i].family)
1034 ut[i].family = family;
1035
1036 switch (ut[i].family) {
1037 case AF_INET:
1038 break;
1039 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1040 case AF_INET6:
1041 break;
1042 #endif
1043 default:
1044 return -EINVAL;
1045 }
1046 }
1047
1048 return 0;
1049 }
1050
1051 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
1052 {
1053 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
1054
1055 if (!rt) {
1056 pol->xfrm_nr = 0;
1057 } else {
1058 struct xfrm_user_tmpl *utmpl = RTA_DATA(rt);
1059 int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
1060 int err;
1061
1062 err = validate_tmpl(nr, utmpl, pol->family);
1063 if (err)
1064 return err;
1065
1066 copy_templates(pol, RTA_DATA(rt), nr);
1067 }
1068 return 0;
1069 }
1070
1071 static int copy_from_user_policy_type(u8 *tp, struct rtattr **xfrma)
1072 {
1073 struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1];
1074 struct xfrm_userpolicy_type *upt;
1075 u8 type = XFRM_POLICY_TYPE_MAIN;
1076 int err;
1077
1078 if (rt) {
1079 if (rt->rta_len < sizeof(*upt))
1080 return -EINVAL;
1081
1082 upt = RTA_DATA(rt);
1083 type = upt->type;
1084 }
1085
1086 err = verify_policy_type(type);
1087 if (err)
1088 return err;
1089
1090 *tp = type;
1091 return 0;
1092 }
1093
1094 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1095 {
1096 xp->priority = p->priority;
1097 xp->index = p->index;
1098 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1099 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1100 xp->action = p->action;
1101 xp->flags = p->flags;
1102 xp->family = p->sel.family;
1103 /* XXX xp->share = p->share; */
1104 }
1105
1106 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1107 {
1108 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1109 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1110 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1111 p->priority = xp->priority;
1112 p->index = xp->index;
1113 p->sel.family = xp->family;
1114 p->dir = dir;
1115 p->action = xp->action;
1116 p->flags = xp->flags;
1117 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1118 }
1119
1120 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
1121 {
1122 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
1123 int err;
1124
1125 if (!xp) {
1126 *errp = -ENOMEM;
1127 return NULL;
1128 }
1129
1130 copy_from_user_policy(xp, p);
1131
1132 err = copy_from_user_policy_type(&xp->type, xfrma);
1133 if (err)
1134 goto error;
1135
1136 if (!(err = copy_from_user_tmpl(xp, xfrma)))
1137 err = copy_from_user_sec_ctx(xp, xfrma);
1138 if (err)
1139 goto error;
1140
1141 return xp;
1142 error:
1143 *errp = err;
1144 kfree(xp);
1145 return NULL;
1146 }
1147
1148 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1149 struct rtattr **xfrma)
1150 {
1151 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
1152 struct xfrm_policy *xp;
1153 struct km_event c;
1154 int err;
1155 int excl;
1156
1157 err = verify_newpolicy_info(p);
1158 if (err)
1159 return err;
1160 err = verify_sec_ctx_len(xfrma);
1161 if (err)
1162 return err;
1163
1164 xp = xfrm_policy_construct(p, xfrma, &err);
1165 if (!xp)
1166 return err;
1167
1168 /* shouldnt excl be based on nlh flags??
1169 * Aha! this is anti-netlink really i.e more pfkey derived
1170 * in netlink excl is a flag and you wouldnt need
1171 * a type XFRM_MSG_UPDPOLICY - JHS */
1172 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1173 err = xfrm_policy_insert(p->dir, xp, excl);
1174 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1175 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
1176
1177 if (err) {
1178 security_xfrm_policy_free(xp);
1179 kfree(xp);
1180 return err;
1181 }
1182
1183 c.event = nlh->nlmsg_type;
1184 c.seq = nlh->nlmsg_seq;
1185 c.pid = nlh->nlmsg_pid;
1186 km_policy_notify(xp, p->dir, &c);
1187
1188 xfrm_pol_put(xp);
1189
1190 return 0;
1191 }
1192
1193 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1194 {
1195 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1196 int i;
1197
1198 if (xp->xfrm_nr == 0)
1199 return 0;
1200
1201 for (i = 0; i < xp->xfrm_nr; i++) {
1202 struct xfrm_user_tmpl *up = &vec[i];
1203 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1204
1205 memcpy(&up->id, &kp->id, sizeof(up->id));
1206 up->family = kp->encap_family;
1207 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1208 up->reqid = kp->reqid;
1209 up->mode = kp->mode;
1210 up->share = kp->share;
1211 up->optional = kp->optional;
1212 up->aalgos = kp->aalgos;
1213 up->ealgos = kp->ealgos;
1214 up->calgos = kp->calgos;
1215 }
1216 RTA_PUT(skb, XFRMA_TMPL,
1217 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr),
1218 vec);
1219
1220 return 0;
1221
1222 rtattr_failure:
1223 return -1;
1224 }
1225
1226 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
1227 {
1228 int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len;
1229 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size);
1230 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1231
1232 uctx->exttype = XFRMA_SEC_CTX;
1233 uctx->len = ctx_size;
1234 uctx->ctx_doi = s->ctx_doi;
1235 uctx->ctx_alg = s->ctx_alg;
1236 uctx->ctx_len = s->ctx_len;
1237 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
1238 return 0;
1239
1240 rtattr_failure:
1241 return -1;
1242 }
1243
1244 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1245 {
1246 if (x->security) {
1247 return copy_sec_ctx(x->security, skb);
1248 }
1249 return 0;
1250 }
1251
1252 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1253 {
1254 if (xp->security) {
1255 return copy_sec_ctx(xp->security, skb);
1256 }
1257 return 0;
1258 }
1259
1260 #ifdef CONFIG_XFRM_SUB_POLICY
1261 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1262 {
1263 struct xfrm_userpolicy_type upt;
1264
1265 memset(&upt, 0, sizeof(upt));
1266 upt.type = type;
1267
1268 RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1269
1270 return 0;
1271
1272 rtattr_failure:
1273 return -1;
1274 }
1275
1276 #else
1277 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1278 {
1279 return 0;
1280 }
1281 #endif
1282
1283 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1284 {
1285 struct xfrm_dump_info *sp = ptr;
1286 struct xfrm_userpolicy_info *p;
1287 struct sk_buff *in_skb = sp->in_skb;
1288 struct sk_buff *skb = sp->out_skb;
1289 struct nlmsghdr *nlh;
1290 unsigned char *b = skb_tail_pointer(skb);
1291
1292 if (sp->this_idx < sp->start_idx)
1293 goto out;
1294
1295 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
1296 sp->nlmsg_seq,
1297 XFRM_MSG_NEWPOLICY, sizeof(*p));
1298 p = NLMSG_DATA(nlh);
1299 nlh->nlmsg_flags = sp->nlmsg_flags;
1300
1301 copy_to_user_policy(xp, p, dir);
1302 if (copy_to_user_tmpl(xp, skb) < 0)
1303 goto nlmsg_failure;
1304 if (copy_to_user_sec_ctx(xp, skb))
1305 goto nlmsg_failure;
1306 if (copy_to_user_policy_type(xp->type, skb) < 0)
1307 goto nlmsg_failure;
1308
1309 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1310 out:
1311 sp->this_idx++;
1312 return 0;
1313
1314 nlmsg_failure:
1315 nlmsg_trim(skb, b);
1316 return -1;
1317 }
1318
1319 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1320 {
1321 struct xfrm_dump_info info;
1322
1323 info.in_skb = cb->skb;
1324 info.out_skb = skb;
1325 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1326 info.nlmsg_flags = NLM_F_MULTI;
1327 info.this_idx = 0;
1328 info.start_idx = cb->args[0];
1329 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info);
1330 #ifdef CONFIG_XFRM_SUB_POLICY
1331 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info);
1332 #endif
1333 cb->args[0] = info.this_idx;
1334
1335 return skb->len;
1336 }
1337
1338 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1339 struct xfrm_policy *xp,
1340 int dir, u32 seq)
1341 {
1342 struct xfrm_dump_info info;
1343 struct sk_buff *skb;
1344
1345 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1346 if (!skb)
1347 return ERR_PTR(-ENOMEM);
1348
1349 info.in_skb = in_skb;
1350 info.out_skb = skb;
1351 info.nlmsg_seq = seq;
1352 info.nlmsg_flags = 0;
1353 info.this_idx = info.start_idx = 0;
1354
1355 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1356 kfree_skb(skb);
1357 return NULL;
1358 }
1359
1360 return skb;
1361 }
1362
1363 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1364 struct rtattr **xfrma)
1365 {
1366 struct xfrm_policy *xp;
1367 struct xfrm_userpolicy_id *p;
1368 u8 type = XFRM_POLICY_TYPE_MAIN;
1369 int err;
1370 struct km_event c;
1371 int delete;
1372
1373 p = NLMSG_DATA(nlh);
1374 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1375
1376 err = copy_from_user_policy_type(&type, xfrma);
1377 if (err)
1378 return err;
1379
1380 err = verify_policy_dir(p->dir);
1381 if (err)
1382 return err;
1383
1384 if (p->index)
1385 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
1386 else {
1387 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
1388 struct xfrm_policy tmp;
1389
1390 err = verify_sec_ctx_len(xfrma);
1391 if (err)
1392 return err;
1393
1394 memset(&tmp, 0, sizeof(struct xfrm_policy));
1395 if (rt) {
1396 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1397
1398 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1399 return err;
1400 }
1401 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1402 delete, &err);
1403 security_xfrm_policy_free(&tmp);
1404 }
1405 if (xp == NULL)
1406 return -ENOENT;
1407
1408 if (!delete) {
1409 struct sk_buff *resp_skb;
1410
1411 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1412 if (IS_ERR(resp_skb)) {
1413 err = PTR_ERR(resp_skb);
1414 } else {
1415 err = netlink_unicast(xfrm_nl, resp_skb,
1416 NETLINK_CB(skb).pid,
1417 MSG_DONTWAIT);
1418 }
1419 } else {
1420 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1421 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
1422
1423 if (err != 0)
1424 goto out;
1425
1426 c.data.byid = p->index;
1427 c.event = nlh->nlmsg_type;
1428 c.seq = nlh->nlmsg_seq;
1429 c.pid = nlh->nlmsg_pid;
1430 km_policy_notify(xp, p->dir, &c);
1431 }
1432
1433 out:
1434 xfrm_pol_put(xp);
1435 return err;
1436 }
1437
1438 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1439 struct rtattr **xfrma)
1440 {
1441 struct km_event c;
1442 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
1443 struct xfrm_audit audit_info;
1444
1445 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1446 audit_info.secid = NETLINK_CB(skb).sid;
1447 xfrm_state_flush(p->proto, &audit_info);
1448 c.data.proto = p->proto;
1449 c.event = nlh->nlmsg_type;
1450 c.seq = nlh->nlmsg_seq;
1451 c.pid = nlh->nlmsg_pid;
1452 km_state_notify(NULL, &c);
1453
1454 return 0;
1455 }
1456
1457
1458 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1459 {
1460 struct xfrm_aevent_id *id;
1461 struct nlmsghdr *nlh;
1462 struct xfrm_lifetime_cur ltime;
1463 unsigned char *b = skb_tail_pointer(skb);
1464
1465 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id));
1466 id = NLMSG_DATA(nlh);
1467 nlh->nlmsg_flags = 0;
1468
1469 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1470 id->sa_id.spi = x->id.spi;
1471 id->sa_id.family = x->props.family;
1472 id->sa_id.proto = x->id.proto;
1473 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1474 id->reqid = x->props.reqid;
1475 id->flags = c->data.aevent;
1476
1477 RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1478
1479 ltime.bytes = x->curlft.bytes;
1480 ltime.packets = x->curlft.packets;
1481 ltime.add_time = x->curlft.add_time;
1482 ltime.use_time = x->curlft.use_time;
1483
1484 RTA_PUT(skb, XFRMA_LTIME_VAL, sizeof(struct xfrm_lifetime_cur), &ltime);
1485
1486 if (id->flags&XFRM_AE_RTHR) {
1487 RTA_PUT(skb,XFRMA_REPLAY_THRESH,sizeof(u32),&x->replay_maxdiff);
1488 }
1489
1490 if (id->flags&XFRM_AE_ETHR) {
1491 u32 etimer = x->replay_maxage*10/HZ;
1492 RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer);
1493 }
1494
1495 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1496 return skb->len;
1497
1498 rtattr_failure:
1499 nlmsg_failure:
1500 nlmsg_trim(skb, b);
1501 return -1;
1502 }
1503
1504 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1505 struct rtattr **xfrma)
1506 {
1507 struct xfrm_state *x;
1508 struct sk_buff *r_skb;
1509 int err;
1510 struct km_event c;
1511 struct xfrm_aevent_id *p = NLMSG_DATA(nlh);
1512 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1513 struct xfrm_usersa_id *id = &p->sa_id;
1514
1515 len += RTA_SPACE(sizeof(struct xfrm_replay_state));
1516 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur));
1517
1518 if (p->flags&XFRM_AE_RTHR)
1519 len+=RTA_SPACE(sizeof(u32));
1520
1521 if (p->flags&XFRM_AE_ETHR)
1522 len+=RTA_SPACE(sizeof(u32));
1523
1524 r_skb = alloc_skb(len, GFP_ATOMIC);
1525 if (r_skb == NULL)
1526 return -ENOMEM;
1527
1528 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
1529 if (x == NULL) {
1530 kfree_skb(r_skb);
1531 return -ESRCH;
1532 }
1533
1534 /*
1535 * XXX: is this lock really needed - none of the other
1536 * gets lock (the concern is things getting updated
1537 * while we are still reading) - jhs
1538 */
1539 spin_lock_bh(&x->lock);
1540 c.data.aevent = p->flags;
1541 c.seq = nlh->nlmsg_seq;
1542 c.pid = nlh->nlmsg_pid;
1543
1544 if (build_aevent(r_skb, x, &c) < 0)
1545 BUG();
1546 err = netlink_unicast(xfrm_nl, r_skb,
1547 NETLINK_CB(skb).pid, MSG_DONTWAIT);
1548 spin_unlock_bh(&x->lock);
1549 xfrm_state_put(x);
1550 return err;
1551 }
1552
1553 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1554 struct rtattr **xfrma)
1555 {
1556 struct xfrm_state *x;
1557 struct km_event c;
1558 int err = - EINVAL;
1559 struct xfrm_aevent_id *p = NLMSG_DATA(nlh);
1560 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
1561 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
1562
1563 if (!lt && !rp)
1564 return err;
1565
1566 /* pedantic mode - thou shalt sayeth replaceth */
1567 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1568 return err;
1569
1570 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1571 if (x == NULL)
1572 return -ESRCH;
1573
1574 if (x->km.state != XFRM_STATE_VALID)
1575 goto out;
1576
1577 spin_lock_bh(&x->lock);
1578 err = xfrm_update_ae_params(x, xfrma);
1579 spin_unlock_bh(&x->lock);
1580 if (err < 0)
1581 goto out;
1582
1583 c.event = nlh->nlmsg_type;
1584 c.seq = nlh->nlmsg_seq;
1585 c.pid = nlh->nlmsg_pid;
1586 c.data.aevent = XFRM_AE_CU;
1587 km_state_notify(x, &c);
1588 err = 0;
1589 out:
1590 xfrm_state_put(x);
1591 return err;
1592 }
1593
1594 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1595 struct rtattr **xfrma)
1596 {
1597 struct km_event c;
1598 u8 type = XFRM_POLICY_TYPE_MAIN;
1599 int err;
1600 struct xfrm_audit audit_info;
1601
1602 err = copy_from_user_policy_type(&type, xfrma);
1603 if (err)
1604 return err;
1605
1606 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1607 audit_info.secid = NETLINK_CB(skb).sid;
1608 xfrm_policy_flush(type, &audit_info);
1609 c.data.type = type;
1610 c.event = nlh->nlmsg_type;
1611 c.seq = nlh->nlmsg_seq;
1612 c.pid = nlh->nlmsg_pid;
1613 km_policy_notify(NULL, 0, &c);
1614 return 0;
1615 }
1616
1617 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1618 struct rtattr **xfrma)
1619 {
1620 struct xfrm_policy *xp;
1621 struct xfrm_user_polexpire *up = NLMSG_DATA(nlh);
1622 struct xfrm_userpolicy_info *p = &up->pol;
1623 u8 type = XFRM_POLICY_TYPE_MAIN;
1624 int err = -ENOENT;
1625
1626 err = copy_from_user_policy_type(&type, xfrma);
1627 if (err)
1628 return err;
1629
1630 if (p->index)
1631 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
1632 else {
1633 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
1634 struct xfrm_policy tmp;
1635
1636 err = verify_sec_ctx_len(xfrma);
1637 if (err)
1638 return err;
1639
1640 memset(&tmp, 0, sizeof(struct xfrm_policy));
1641 if (rt) {
1642 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1643
1644 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1645 return err;
1646 }
1647 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1648 0, &err);
1649 security_xfrm_policy_free(&tmp);
1650 }
1651
1652 if (xp == NULL)
1653 return -ENOENT;
1654 read_lock(&xp->lock);
1655 if (xp->dead) {
1656 read_unlock(&xp->lock);
1657 goto out;
1658 }
1659
1660 read_unlock(&xp->lock);
1661 err = 0;
1662 if (up->hard) {
1663 xfrm_policy_delete(xp, p->dir);
1664 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1665 AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL);
1666
1667 } else {
1668 // reset the timers here?
1669 printk("Dont know what to do with soft policy expire\n");
1670 }
1671 km_policy_expired(xp, p->dir, up->hard, current->pid);
1672
1673 out:
1674 xfrm_pol_put(xp);
1675 return err;
1676 }
1677
1678 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1679 struct rtattr **xfrma)
1680 {
1681 struct xfrm_state *x;
1682 int err;
1683 struct xfrm_user_expire *ue = NLMSG_DATA(nlh);
1684 struct xfrm_usersa_info *p = &ue->state;
1685
1686 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
1687
1688 err = -ENOENT;
1689 if (x == NULL)
1690 return err;
1691
1692 spin_lock_bh(&x->lock);
1693 err = -EINVAL;
1694 if (x->km.state != XFRM_STATE_VALID)
1695 goto out;
1696 km_state_expired(x, ue->hard, current->pid);
1697
1698 if (ue->hard) {
1699 __xfrm_state_delete(x);
1700 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1701 AUDIT_MAC_IPSEC_DELSA, 1, NULL, x);
1702 }
1703 err = 0;
1704 out:
1705 spin_unlock_bh(&x->lock);
1706 xfrm_state_put(x);
1707 return err;
1708 }
1709
1710 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1711 struct rtattr **xfrma)
1712 {
1713 struct xfrm_policy *xp;
1714 struct xfrm_user_tmpl *ut;
1715 int i;
1716 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
1717
1718 struct xfrm_user_acquire *ua = NLMSG_DATA(nlh);
1719 struct xfrm_state *x = xfrm_state_alloc();
1720 int err = -ENOMEM;
1721
1722 if (!x)
1723 return err;
1724
1725 err = verify_newpolicy_info(&ua->policy);
1726 if (err) {
1727 printk("BAD policy passed\n");
1728 kfree(x);
1729 return err;
1730 }
1731
1732 /* build an XP */
1733 xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err);
1734 if (!xp) {
1735 kfree(x);
1736 return err;
1737 }
1738
1739 memcpy(&x->id, &ua->id, sizeof(ua->id));
1740 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1741 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1742
1743 ut = RTA_DATA(rt);
1744 /* extract the templates and for each call km_key */
1745 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1746 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1747 memcpy(&x->id, &t->id, sizeof(x->id));
1748 x->props.mode = t->mode;
1749 x->props.reqid = t->reqid;
1750 x->props.family = ut->family;
1751 t->aalgos = ua->aalgos;
1752 t->ealgos = ua->ealgos;
1753 t->calgos = ua->calgos;
1754 err = km_query(x, t, xp);
1755
1756 }
1757
1758 kfree(x);
1759 kfree(xp);
1760
1761 return 0;
1762 }
1763
1764 #ifdef CONFIG_XFRM_MIGRATE
1765 static int verify_user_migrate(struct rtattr **xfrma)
1766 {
1767 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1768 struct xfrm_user_migrate *um;
1769
1770 if (!rt)
1771 return -EINVAL;
1772
1773 if ((rt->rta_len - sizeof(*rt)) < sizeof(*um))
1774 return -EINVAL;
1775
1776 return 0;
1777 }
1778
1779 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1780 struct rtattr **xfrma, int *num)
1781 {
1782 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1783 struct xfrm_user_migrate *um;
1784 int i, num_migrate;
1785
1786 um = RTA_DATA(rt);
1787 num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um);
1788
1789 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1790 return -EINVAL;
1791
1792 for (i = 0; i < num_migrate; i++, um++, ma++) {
1793 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1794 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1795 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1796 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1797
1798 ma->proto = um->proto;
1799 ma->mode = um->mode;
1800 ma->reqid = um->reqid;
1801
1802 ma->old_family = um->old_family;
1803 ma->new_family = um->new_family;
1804 }
1805
1806 *num = i;
1807 return 0;
1808 }
1809
1810 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1811 struct rtattr **xfrma)
1812 {
1813 struct xfrm_userpolicy_id *pi = NLMSG_DATA(nlh);
1814 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1815 u8 type;
1816 int err;
1817 int n = 0;
1818
1819 err = verify_user_migrate((struct rtattr **)xfrma);
1820 if (err)
1821 return err;
1822
1823 err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
1824 if (err)
1825 return err;
1826
1827 err = copy_from_user_migrate((struct xfrm_migrate *)m,
1828 (struct rtattr **)xfrma, &n);
1829 if (err)
1830 return err;
1831
1832 if (!n)
1833 return 0;
1834
1835 xfrm_migrate(&pi->sel, pi->dir, type, m, n);
1836
1837 return 0;
1838 }
1839 #else
1840 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1841 struct rtattr **xfrma)
1842 {
1843 return -ENOPROTOOPT;
1844 }
1845 #endif
1846
1847 #ifdef CONFIG_XFRM_MIGRATE
1848 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1849 {
1850 struct xfrm_user_migrate um;
1851
1852 memset(&um, 0, sizeof(um));
1853 um.proto = m->proto;
1854 um.mode = m->mode;
1855 um.reqid = m->reqid;
1856 um.old_family = m->old_family;
1857 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1858 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1859 um.new_family = m->new_family;
1860 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1861 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1862
1863 RTA_PUT(skb, XFRMA_MIGRATE, sizeof(um), &um);
1864 return 0;
1865
1866 rtattr_failure:
1867 return -1;
1868 }
1869
1870 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1871 int num_migrate, struct xfrm_selector *sel,
1872 u8 dir, u8 type)
1873 {
1874 struct xfrm_migrate *mp;
1875 struct xfrm_userpolicy_id *pol_id;
1876 struct nlmsghdr *nlh;
1877 unsigned char *b = skb_tail_pointer(skb);
1878 int i;
1879
1880 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id));
1881 pol_id = NLMSG_DATA(nlh);
1882 nlh->nlmsg_flags = 0;
1883
1884 /* copy data from selector, dir, and type to the pol_id */
1885 memset(pol_id, 0, sizeof(*pol_id));
1886 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1887 pol_id->dir = dir;
1888
1889 if (copy_to_user_policy_type(type, skb) < 0)
1890 goto nlmsg_failure;
1891
1892 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1893 if (copy_to_user_migrate(mp, skb) < 0)
1894 goto nlmsg_failure;
1895 }
1896
1897 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1898 return skb->len;
1899 nlmsg_failure:
1900 nlmsg_trim(skb, b);
1901 return -1;
1902 }
1903
1904 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1905 struct xfrm_migrate *m, int num_migrate)
1906 {
1907 struct sk_buff *skb;
1908 size_t len;
1909
1910 len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate);
1911 len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id));
1912 #ifdef CONFIG_XFRM_SUB_POLICY
1913 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
1914 #endif
1915 skb = alloc_skb(len, GFP_ATOMIC);
1916 if (skb == NULL)
1917 return -ENOMEM;
1918
1919 /* build migrate */
1920 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
1921 BUG();
1922
1923 NETLINK_CB(skb).dst_group = XFRMNLGRP_MIGRATE;
1924 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE,
1925 GFP_ATOMIC);
1926 }
1927 #else
1928 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1929 struct xfrm_migrate *m, int num_migrate)
1930 {
1931 return -ENOPROTOOPT;
1932 }
1933 #endif
1934
1935 #define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
1936
1937 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1938 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1939 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1940 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1941 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1942 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1943 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1944 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1945 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1946 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1947 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1948 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1949 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1950 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1951 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0),
1952 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1953 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1954 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1955 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1956 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)),
1957 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)),
1958 };
1959
1960 #undef XMSGSIZE
1961
1962 static struct xfrm_link {
1963 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **);
1964 int (*dump)(struct sk_buff *, struct netlink_callback *);
1965 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1966 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1967 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1968 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1969 .dump = xfrm_dump_sa },
1970 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1971 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1972 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1973 .dump = xfrm_dump_policy },
1974 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1975 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1976 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1977 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1978 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1979 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1980 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1981 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1982 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1983 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1984 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1985 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1986 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
1987 };
1988
1989 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1990 {
1991 struct rtattr *xfrma[XFRMA_MAX];
1992 struct xfrm_link *link;
1993 int type, min_len;
1994
1995 type = nlh->nlmsg_type;
1996 if (type > XFRM_MSG_MAX)
1997 return -EINVAL;
1998
1999 type -= XFRM_MSG_BASE;
2000 link = &xfrm_dispatch[type];
2001
2002 /* All operations require privileges, even GET */
2003 if (security_netlink_recv(skb, CAP_NET_ADMIN))
2004 return -EPERM;
2005
2006 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2007 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2008 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2009 if (link->dump == NULL)
2010 return -EINVAL;
2011
2012 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL);
2013 }
2014
2015 memset(xfrma, 0, sizeof(xfrma));
2016
2017 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
2018 return -EINVAL;
2019
2020 if (nlh->nlmsg_len > min_len) {
2021 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
2022 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len);
2023
2024 while (RTA_OK(attr, attrlen)) {
2025 unsigned short flavor = attr->rta_type;
2026 if (flavor) {
2027 if (flavor > XFRMA_MAX)
2028 return -EINVAL;
2029 xfrma[flavor - 1] = attr;
2030 }
2031 attr = RTA_NEXT(attr, attrlen);
2032 }
2033 }
2034
2035 if (link->doit == NULL)
2036 return -EINVAL;
2037
2038 return link->doit(skb, nlh, xfrma);
2039 }
2040
2041 static void xfrm_netlink_rcv(struct sock *sk, int len)
2042 {
2043 unsigned int qlen = 0;
2044
2045 do {
2046 mutex_lock(&xfrm_cfg_mutex);
2047 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
2048 mutex_unlock(&xfrm_cfg_mutex);
2049
2050 } while (qlen);
2051 }
2052
2053 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
2054 {
2055 struct xfrm_user_expire *ue;
2056 struct nlmsghdr *nlh;
2057 unsigned char *b = skb_tail_pointer(skb);
2058
2059 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE,
2060 sizeof(*ue));
2061 ue = NLMSG_DATA(nlh);
2062 nlh->nlmsg_flags = 0;
2063
2064 copy_to_user_state(x, &ue->state);
2065 ue->hard = (c->data.hard != 0) ? 1 : 0;
2066
2067 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2068 return skb->len;
2069
2070 nlmsg_failure:
2071 nlmsg_trim(skb, b);
2072 return -1;
2073 }
2074
2075 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2076 {
2077 struct sk_buff *skb;
2078 int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire));
2079
2080 skb = alloc_skb(len, GFP_ATOMIC);
2081 if (skb == NULL)
2082 return -ENOMEM;
2083
2084 if (build_expire(skb, x, c) < 0)
2085 BUG();
2086
2087 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
2088 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2089 }
2090
2091 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
2092 {
2093 struct sk_buff *skb;
2094 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
2095
2096 len += RTA_SPACE(sizeof(struct xfrm_replay_state));
2097 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur));
2098 skb = alloc_skb(len, GFP_ATOMIC);
2099 if (skb == NULL)
2100 return -ENOMEM;
2101
2102 if (build_aevent(skb, x, c) < 0)
2103 BUG();
2104
2105 NETLINK_CB(skb).dst_group = XFRMNLGRP_AEVENTS;
2106 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2107 }
2108
2109 static int xfrm_notify_sa_flush(struct km_event *c)
2110 {
2111 struct xfrm_usersa_flush *p;
2112 struct nlmsghdr *nlh;
2113 struct sk_buff *skb;
2114 sk_buff_data_t b;
2115 int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
2116
2117 skb = alloc_skb(len, GFP_ATOMIC);
2118 if (skb == NULL)
2119 return -ENOMEM;
2120 b = skb->tail;
2121
2122 nlh = NLMSG_PUT(skb, c->pid, c->seq,
2123 XFRM_MSG_FLUSHSA, sizeof(*p));
2124 nlh->nlmsg_flags = 0;
2125
2126 p = NLMSG_DATA(nlh);
2127 p->proto = c->data.proto;
2128
2129 nlh->nlmsg_len = skb->tail - b;
2130
2131 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
2132 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2133
2134 nlmsg_failure:
2135 kfree_skb(skb);
2136 return -1;
2137 }
2138
2139 static inline int xfrm_sa_len(struct xfrm_state *x)
2140 {
2141 int l = 0;
2142 if (x->aalg)
2143 l += RTA_SPACE(sizeof(*x->aalg) + (x->aalg->alg_key_len+7)/8);
2144 if (x->ealg)
2145 l += RTA_SPACE(sizeof(*x->ealg) + (x->ealg->alg_key_len+7)/8);
2146 if (x->calg)
2147 l += RTA_SPACE(sizeof(*x->calg));
2148 if (x->encap)
2149 l += RTA_SPACE(sizeof(*x->encap));
2150
2151 return l;
2152 }
2153
2154 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2155 {
2156 struct xfrm_usersa_info *p;
2157 struct xfrm_usersa_id *id;
2158 struct nlmsghdr *nlh;
2159 struct sk_buff *skb;
2160 sk_buff_data_t b;
2161 int len = xfrm_sa_len(x);
2162 int headlen;
2163
2164 headlen = sizeof(*p);
2165 if (c->event == XFRM_MSG_DELSA) {
2166 len += RTA_SPACE(headlen);
2167 headlen = sizeof(*id);
2168 }
2169 len += NLMSG_SPACE(headlen);
2170
2171 skb = alloc_skb(len, GFP_ATOMIC);
2172 if (skb == NULL)
2173 return -ENOMEM;
2174 b = skb->tail;
2175
2176 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
2177 nlh->nlmsg_flags = 0;
2178
2179 p = NLMSG_DATA(nlh);
2180 if (c->event == XFRM_MSG_DELSA) {
2181 id = NLMSG_DATA(nlh);
2182 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2183 id->spi = x->id.spi;
2184 id->family = x->props.family;
2185 id->proto = x->id.proto;
2186
2187 p = RTA_DATA(__RTA_PUT(skb, XFRMA_SA, sizeof(*p)));
2188 }
2189
2190 copy_to_user_state(x, p);
2191
2192 if (x->aalg)
2193 RTA_PUT(skb, XFRMA_ALG_AUTH,
2194 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
2195 if (x->ealg)
2196 RTA_PUT(skb, XFRMA_ALG_CRYPT,
2197 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
2198 if (x->calg)
2199 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
2200
2201 if (x->encap)
2202 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
2203
2204 nlh->nlmsg_len = skb->tail - b;
2205
2206 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
2207 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2208
2209 nlmsg_failure:
2210 rtattr_failure:
2211 kfree_skb(skb);
2212 return -1;
2213 }
2214
2215 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2216 {
2217
2218 switch (c->event) {
2219 case XFRM_MSG_EXPIRE:
2220 return xfrm_exp_state_notify(x, c);
2221 case XFRM_MSG_NEWAE:
2222 return xfrm_aevent_state_notify(x, c);
2223 case XFRM_MSG_DELSA:
2224 case XFRM_MSG_UPDSA:
2225 case XFRM_MSG_NEWSA:
2226 return xfrm_notify_sa(x, c);
2227 case XFRM_MSG_FLUSHSA:
2228 return xfrm_notify_sa_flush(c);
2229 default:
2230 printk("xfrm_user: Unknown SA event %d\n", c->event);
2231 break;
2232 }
2233
2234 return 0;
2235
2236 }
2237
2238 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2239 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2240 int dir)
2241 {
2242 struct xfrm_user_acquire *ua;
2243 struct nlmsghdr *nlh;
2244 unsigned char *b = skb_tail_pointer(skb);
2245 __u32 seq = xfrm_get_acqseq();
2246
2247 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
2248 sizeof(*ua));
2249 ua = NLMSG_DATA(nlh);
2250 nlh->nlmsg_flags = 0;
2251
2252 memcpy(&ua->id, &x->id, sizeof(ua->id));
2253 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2254 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2255 copy_to_user_policy(xp, &ua->policy, dir);
2256 ua->aalgos = xt->aalgos;
2257 ua->ealgos = xt->ealgos;
2258 ua->calgos = xt->calgos;
2259 ua->seq = x->km.seq = seq;
2260
2261 if (copy_to_user_tmpl(xp, skb) < 0)
2262 goto nlmsg_failure;
2263 if (copy_to_user_state_sec_ctx(x, skb))
2264 goto nlmsg_failure;
2265 if (copy_to_user_policy_type(xp->type, skb) < 0)
2266 goto nlmsg_failure;
2267
2268 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2269 return skb->len;
2270
2271 nlmsg_failure:
2272 nlmsg_trim(skb, b);
2273 return -1;
2274 }
2275
2276 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2277 struct xfrm_policy *xp, int dir)
2278 {
2279 struct sk_buff *skb;
2280 size_t len;
2281
2282 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2283 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire));
2284 len += RTA_SPACE(xfrm_user_sec_ctx_size(x->security));
2285 #ifdef CONFIG_XFRM_SUB_POLICY
2286 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2287 #endif
2288 skb = alloc_skb(len, GFP_ATOMIC);
2289 if (skb == NULL)
2290 return -ENOMEM;
2291
2292 if (build_acquire(skb, x, xt, xp, dir) < 0)
2293 BUG();
2294
2295 NETLINK_CB(skb).dst_group = XFRMNLGRP_ACQUIRE;
2296 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2297 }
2298
2299 /* User gives us xfrm_user_policy_info followed by an array of 0
2300 * or more templates.
2301 */
2302 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2303 u8 *data, int len, int *dir)
2304 {
2305 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2306 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2307 struct xfrm_policy *xp;
2308 int nr;
2309
2310 switch (sk->sk_family) {
2311 case AF_INET:
2312 if (opt != IP_XFRM_POLICY) {
2313 *dir = -EOPNOTSUPP;
2314 return NULL;
2315 }
2316 break;
2317 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2318 case AF_INET6:
2319 if (opt != IPV6_XFRM_POLICY) {
2320 *dir = -EOPNOTSUPP;
2321 return NULL;
2322 }
2323 break;
2324 #endif
2325 default:
2326 *dir = -EINVAL;
2327 return NULL;
2328 }
2329
2330 *dir = -EINVAL;
2331
2332 if (len < sizeof(*p) ||
2333 verify_newpolicy_info(p))
2334 return NULL;
2335
2336 nr = ((len - sizeof(*p)) / sizeof(*ut));
2337 if (validate_tmpl(nr, ut, p->sel.family))
2338 return NULL;
2339
2340 if (p->dir > XFRM_POLICY_OUT)
2341 return NULL;
2342
2343 xp = xfrm_policy_alloc(GFP_KERNEL);
2344 if (xp == NULL) {
2345 *dir = -ENOBUFS;
2346 return NULL;
2347 }
2348
2349 copy_from_user_policy(xp, p);
2350 xp->type = XFRM_POLICY_TYPE_MAIN;
2351 copy_templates(xp, ut, nr);
2352
2353 *dir = p->dir;
2354
2355 return xp;
2356 }
2357
2358 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2359 int dir, struct km_event *c)
2360 {
2361 struct xfrm_user_polexpire *upe;
2362 struct nlmsghdr *nlh;
2363 int hard = c->data.hard;
2364 unsigned char *b = skb_tail_pointer(skb);
2365
2366 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
2367 upe = NLMSG_DATA(nlh);
2368 nlh->nlmsg_flags = 0;
2369
2370 copy_to_user_policy(xp, &upe->pol, dir);
2371 if (copy_to_user_tmpl(xp, skb) < 0)
2372 goto nlmsg_failure;
2373 if (copy_to_user_sec_ctx(xp, skb))
2374 goto nlmsg_failure;
2375 if (copy_to_user_policy_type(xp->type, skb) < 0)
2376 goto nlmsg_failure;
2377 upe->hard = !!hard;
2378
2379 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2380 return skb->len;
2381
2382 nlmsg_failure:
2383 nlmsg_trim(skb, b);
2384 return -1;
2385 }
2386
2387 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2388 {
2389 struct sk_buff *skb;
2390 size_t len;
2391
2392 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2393 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire));
2394 len += RTA_SPACE(xfrm_user_sec_ctx_size(xp->security));
2395 #ifdef CONFIG_XFRM_SUB_POLICY
2396 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2397 #endif
2398 skb = alloc_skb(len, GFP_ATOMIC);
2399 if (skb == NULL)
2400 return -ENOMEM;
2401
2402 if (build_polexpire(skb, xp, dir, c) < 0)
2403 BUG();
2404
2405 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
2406 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2407 }
2408
2409 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2410 {
2411 struct xfrm_userpolicy_info *p;
2412 struct xfrm_userpolicy_id *id;
2413 struct nlmsghdr *nlh;
2414 struct sk_buff *skb;
2415 sk_buff_data_t b;
2416 int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2417 int headlen;
2418
2419 headlen = sizeof(*p);
2420 if (c->event == XFRM_MSG_DELPOLICY) {
2421 len += RTA_SPACE(headlen);
2422 headlen = sizeof(*id);
2423 }
2424 #ifdef CONFIG_XFRM_SUB_POLICY
2425 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2426 #endif
2427 len += NLMSG_SPACE(headlen);
2428
2429 skb = alloc_skb(len, GFP_ATOMIC);
2430 if (skb == NULL)
2431 return -ENOMEM;
2432 b = skb->tail;
2433
2434 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
2435
2436 p = NLMSG_DATA(nlh);
2437 if (c->event == XFRM_MSG_DELPOLICY) {
2438 id = NLMSG_DATA(nlh);
2439 memset(id, 0, sizeof(*id));
2440 id->dir = dir;
2441 if (c->data.byid)
2442 id->index = xp->index;
2443 else
2444 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2445
2446 p = RTA_DATA(__RTA_PUT(skb, XFRMA_POLICY, sizeof(*p)));
2447 }
2448
2449 nlh->nlmsg_flags = 0;
2450
2451 copy_to_user_policy(xp, p, dir);
2452 if (copy_to_user_tmpl(xp, skb) < 0)
2453 goto nlmsg_failure;
2454 if (copy_to_user_policy_type(xp->type, skb) < 0)
2455 goto nlmsg_failure;
2456
2457 nlh->nlmsg_len = skb->tail - b;
2458
2459 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
2460 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2461
2462 nlmsg_failure:
2463 rtattr_failure:
2464 kfree_skb(skb);
2465 return -1;
2466 }
2467
2468 static int xfrm_notify_policy_flush(struct km_event *c)
2469 {
2470 struct nlmsghdr *nlh;
2471 struct sk_buff *skb;
2472 sk_buff_data_t b;
2473 int len = 0;
2474 #ifdef CONFIG_XFRM_SUB_POLICY
2475 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
2476 #endif
2477 len += NLMSG_LENGTH(0);
2478
2479 skb = alloc_skb(len, GFP_ATOMIC);
2480 if (skb == NULL)
2481 return -ENOMEM;
2482 b = skb->tail;
2483
2484
2485 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0);
2486 nlh->nlmsg_flags = 0;
2487 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2488 goto nlmsg_failure;
2489
2490 nlh->nlmsg_len = skb->tail - b;
2491
2492 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
2493 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2494
2495 nlmsg_failure:
2496 kfree_skb(skb);
2497 return -1;
2498 }
2499
2500 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2501 {
2502
2503 switch (c->event) {
2504 case XFRM_MSG_NEWPOLICY:
2505 case XFRM_MSG_UPDPOLICY:
2506 case XFRM_MSG_DELPOLICY:
2507 return xfrm_notify_policy(xp, dir, c);
2508 case XFRM_MSG_FLUSHPOLICY:
2509 return xfrm_notify_policy_flush(c);
2510 case XFRM_MSG_POLEXPIRE:
2511 return xfrm_exp_policy_notify(xp, dir, c);
2512 default:
2513 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2514 }
2515
2516 return 0;
2517
2518 }
2519
2520 static int build_report(struct sk_buff *skb, u8 proto,
2521 struct xfrm_selector *sel, xfrm_address_t *addr)
2522 {
2523 struct xfrm_user_report *ur;
2524 struct nlmsghdr *nlh;
2525 unsigned char *b = skb_tail_pointer(skb);
2526
2527 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur));
2528 ur = NLMSG_DATA(nlh);
2529 nlh->nlmsg_flags = 0;
2530
2531 ur->proto = proto;
2532 memcpy(&ur->sel, sel, sizeof(ur->sel));
2533
2534 if (addr)
2535 RTA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2536
2537 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2538 return skb->len;
2539
2540 nlmsg_failure:
2541 rtattr_failure:
2542 nlmsg_trim(skb, b);
2543 return -1;
2544 }
2545
2546 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel,
2547 xfrm_address_t *addr)
2548 {
2549 struct sk_buff *skb;
2550 size_t len;
2551
2552 len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct xfrm_user_report)));
2553 skb = alloc_skb(len, GFP_ATOMIC);
2554 if (skb == NULL)
2555 return -ENOMEM;
2556
2557 if (build_report(skb, proto, sel, addr) < 0)
2558 BUG();
2559
2560 NETLINK_CB(skb).dst_group = XFRMNLGRP_REPORT;
2561 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2562 }
2563
2564 static struct xfrm_mgr netlink_mgr = {
2565 .id = "netlink",
2566 .notify = xfrm_send_state_notify,
2567 .acquire = xfrm_send_acquire,
2568 .compile_policy = xfrm_compile_policy,
2569 .notify_policy = xfrm_send_policy_notify,
2570 .report = xfrm_send_report,
2571 .migrate = xfrm_send_migrate,
2572 };
2573
2574 static int __init xfrm_user_init(void)
2575 {
2576 struct sock *nlsk;
2577
2578 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2579
2580 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
2581 xfrm_netlink_rcv, NULL, THIS_MODULE);
2582 if (nlsk == NULL)
2583 return -ENOMEM;
2584 rcu_assign_pointer(xfrm_nl, nlsk);
2585
2586 xfrm_register_km(&netlink_mgr);
2587
2588 return 0;
2589 }
2590
2591 static void __exit xfrm_user_exit(void)
2592 {
2593 struct sock *nlsk = xfrm_nl;
2594
2595 xfrm_unregister_km(&netlink_mgr);
2596 rcu_assign_pointer(xfrm_nl, NULL);
2597 synchronize_rcu();
2598 sock_release(nlsk->sk_socket);
2599 }
2600
2601 module_init(xfrm_user_init);
2602 module_exit(xfrm_user_exit);
2603 MODULE_LICENSE("GPL");
2604 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
2605