]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/xfrm/xfrm_user.c
[XFRM] netlink: Establish an attribute policy
[mirror_ubuntu-artful-kernel.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
27 #include <net/sock.h>
28 #include <net/xfrm.h>
29 #include <net/netlink.h>
30 #include <asm/uaccess.h>
31 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
32 #include <linux/in6.h>
33 #endif
34 #include <linux/audit.h>
35
36 static inline int alg_len(struct xfrm_algo *alg)
37 {
38 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
39 }
40
41 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
42 {
43 struct rtattr *rt = xfrma[type - 1];
44 struct xfrm_algo *algp;
45
46 if (!rt)
47 return 0;
48
49 algp = RTA_DATA(rt);
50 if (RTA_PAYLOAD(rt) < alg_len(algp))
51 return -EINVAL;
52
53 switch (type) {
54 case XFRMA_ALG_AUTH:
55 if (!algp->alg_key_len &&
56 strcmp(algp->alg_name, "digest_null") != 0)
57 return -EINVAL;
58 break;
59
60 case XFRMA_ALG_CRYPT:
61 if (!algp->alg_key_len &&
62 strcmp(algp->alg_name, "cipher_null") != 0)
63 return -EINVAL;
64 break;
65
66 case XFRMA_ALG_COMP:
67 /* Zero length keys are legal. */
68 break;
69
70 default:
71 return -EINVAL;
72 }
73
74 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
75 return 0;
76 }
77
78 static void verify_one_addr(struct rtattr **xfrma, enum xfrm_attr_type_t type,
79 xfrm_address_t **addrp)
80 {
81 struct rtattr *rt = xfrma[type - 1];
82
83 if (rt && addrp)
84 *addrp = RTA_DATA(rt);
85 }
86
87 static inline int verify_sec_ctx_len(struct rtattr **xfrma)
88 {
89 struct rtattr *rt = xfrma[XFRMA_SEC_CTX - 1];
90 struct xfrm_user_sec_ctx *uctx;
91
92 if (!rt)
93 return 0;
94
95 uctx = RTA_DATA(rt);
96 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
97 return -EINVAL;
98
99 return 0;
100 }
101
102
103 static int verify_newsa_info(struct xfrm_usersa_info *p,
104 struct rtattr **xfrma)
105 {
106 int err;
107
108 err = -EINVAL;
109 switch (p->family) {
110 case AF_INET:
111 break;
112
113 case AF_INET6:
114 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
115 break;
116 #else
117 err = -EAFNOSUPPORT;
118 goto out;
119 #endif
120
121 default:
122 goto out;
123 }
124
125 err = -EINVAL;
126 switch (p->id.proto) {
127 case IPPROTO_AH:
128 if (!xfrma[XFRMA_ALG_AUTH-1] ||
129 xfrma[XFRMA_ALG_CRYPT-1] ||
130 xfrma[XFRMA_ALG_COMP-1])
131 goto out;
132 break;
133
134 case IPPROTO_ESP:
135 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
136 !xfrma[XFRMA_ALG_CRYPT-1]) ||
137 xfrma[XFRMA_ALG_COMP-1])
138 goto out;
139 break;
140
141 case IPPROTO_COMP:
142 if (!xfrma[XFRMA_ALG_COMP-1] ||
143 xfrma[XFRMA_ALG_AUTH-1] ||
144 xfrma[XFRMA_ALG_CRYPT-1])
145 goto out;
146 break;
147
148 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
149 case IPPROTO_DSTOPTS:
150 case IPPROTO_ROUTING:
151 if (xfrma[XFRMA_ALG_COMP-1] ||
152 xfrma[XFRMA_ALG_AUTH-1] ||
153 xfrma[XFRMA_ALG_CRYPT-1] ||
154 xfrma[XFRMA_ENCAP-1] ||
155 xfrma[XFRMA_SEC_CTX-1] ||
156 !xfrma[XFRMA_COADDR-1])
157 goto out;
158 break;
159 #endif
160
161 default:
162 goto out;
163 }
164
165 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
166 goto out;
167 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
168 goto out;
169 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
170 goto out;
171 if ((err = verify_sec_ctx_len(xfrma)))
172 goto out;
173
174 err = -EINVAL;
175 switch (p->mode) {
176 case XFRM_MODE_TRANSPORT:
177 case XFRM_MODE_TUNNEL:
178 case XFRM_MODE_ROUTEOPTIMIZATION:
179 case XFRM_MODE_BEET:
180 break;
181
182 default:
183 goto out;
184 }
185
186 err = 0;
187
188 out:
189 return err;
190 }
191
192 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
193 struct xfrm_algo_desc *(*get_byname)(char *, int),
194 struct rtattr *u_arg)
195 {
196 struct rtattr *rta = u_arg;
197 struct xfrm_algo *p, *ualg;
198 struct xfrm_algo_desc *algo;
199
200 if (!rta)
201 return 0;
202
203 ualg = RTA_DATA(rta);
204
205 algo = get_byname(ualg->alg_name, 1);
206 if (!algo)
207 return -ENOSYS;
208 *props = algo->desc.sadb_alg_id;
209
210 p = kmemdup(ualg, alg_len(ualg), GFP_KERNEL);
211 if (!p)
212 return -ENOMEM;
213
214 strcpy(p->alg_name, algo->name);
215 *algpp = p;
216 return 0;
217 }
218
219 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
220 {
221 struct rtattr *rta = u_arg;
222 struct xfrm_encap_tmpl *p, *uencap;
223
224 if (!rta)
225 return 0;
226
227 uencap = RTA_DATA(rta);
228 p = kmemdup(uencap, sizeof(*p), GFP_KERNEL);
229 if (!p)
230 return -ENOMEM;
231
232 *encapp = p;
233 return 0;
234 }
235
236
237 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
238 {
239 int len = 0;
240
241 if (xfrm_ctx) {
242 len += sizeof(struct xfrm_user_sec_ctx);
243 len += xfrm_ctx->ctx_len;
244 }
245 return len;
246 }
247
248 static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg)
249 {
250 struct xfrm_user_sec_ctx *uctx;
251
252 if (!u_arg)
253 return 0;
254
255 uctx = RTA_DATA(u_arg);
256 return security_xfrm_state_alloc(x, uctx);
257 }
258
259 static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg)
260 {
261 struct rtattr *rta = u_arg;
262 xfrm_address_t *p, *uaddrp;
263
264 if (!rta)
265 return 0;
266
267 uaddrp = RTA_DATA(rta);
268 p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL);
269 if (!p)
270 return -ENOMEM;
271
272 *addrpp = p;
273 return 0;
274 }
275
276 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
277 {
278 memcpy(&x->id, &p->id, sizeof(x->id));
279 memcpy(&x->sel, &p->sel, sizeof(x->sel));
280 memcpy(&x->lft, &p->lft, sizeof(x->lft));
281 x->props.mode = p->mode;
282 x->props.replay_window = p->replay_window;
283 x->props.reqid = p->reqid;
284 x->props.family = p->family;
285 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
286 x->props.flags = p->flags;
287
288 /*
289 * Set inner address family if the KM left it as zero.
290 * See comment in validate_tmpl.
291 */
292 if (!x->sel.family)
293 x->sel.family = p->family;
294 }
295
296 /*
297 * someday when pfkey also has support, we could have the code
298 * somehow made shareable and move it to xfrm_state.c - JHS
299 *
300 */
301 static void xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **xfrma)
302 {
303 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
304 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
305 struct rtattr *et = xfrma[XFRMA_ETIMER_THRESH-1];
306 struct rtattr *rt = xfrma[XFRMA_REPLAY_THRESH-1];
307
308 if (rp) {
309 struct xfrm_replay_state *replay;
310 replay = RTA_DATA(rp);
311 memcpy(&x->replay, replay, sizeof(*replay));
312 memcpy(&x->preplay, replay, sizeof(*replay));
313 }
314
315 if (lt) {
316 struct xfrm_lifetime_cur *ltime;
317 ltime = RTA_DATA(lt);
318 x->curlft.bytes = ltime->bytes;
319 x->curlft.packets = ltime->packets;
320 x->curlft.add_time = ltime->add_time;
321 x->curlft.use_time = ltime->use_time;
322 }
323
324 if (et)
325 x->replay_maxage = *(u32*)RTA_DATA(et);
326
327 if (rt)
328 x->replay_maxdiff = *(u32*)RTA_DATA(rt);
329 }
330
331 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
332 struct rtattr **xfrma,
333 int *errp)
334 {
335 struct xfrm_state *x = xfrm_state_alloc();
336 int err = -ENOMEM;
337
338 if (!x)
339 goto error_no_put;
340
341 copy_from_user_state(x, p);
342
343 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
344 xfrm_aalg_get_byname,
345 xfrma[XFRMA_ALG_AUTH-1])))
346 goto error;
347 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
348 xfrm_ealg_get_byname,
349 xfrma[XFRMA_ALG_CRYPT-1])))
350 goto error;
351 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
352 xfrm_calg_get_byname,
353 xfrma[XFRMA_ALG_COMP-1])))
354 goto error;
355 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
356 goto error;
357 if ((err = attach_one_addr(&x->coaddr, xfrma[XFRMA_COADDR-1])))
358 goto error;
359 err = xfrm_init_state(x);
360 if (err)
361 goto error;
362
363 if ((err = attach_sec_ctx(x, xfrma[XFRMA_SEC_CTX-1])))
364 goto error;
365
366 x->km.seq = p->seq;
367 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
368 /* sysctl_xfrm_aevent_etime is in 100ms units */
369 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
370 x->preplay.bitmap = 0;
371 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
372 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
373
374 /* override default values from above */
375
376 xfrm_update_ae_params(x, (struct rtattr **)xfrma);
377
378 return x;
379
380 error:
381 x->km.state = XFRM_STATE_DEAD;
382 xfrm_state_put(x);
383 error_no_put:
384 *errp = err;
385 return NULL;
386 }
387
388 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
389 struct rtattr **xfrma)
390 {
391 struct xfrm_usersa_info *p = nlmsg_data(nlh);
392 struct xfrm_state *x;
393 int err;
394 struct km_event c;
395
396 err = verify_newsa_info(p, xfrma);
397 if (err)
398 return err;
399
400 x = xfrm_state_construct(p, xfrma, &err);
401 if (!x)
402 return err;
403
404 xfrm_state_hold(x);
405 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
406 err = xfrm_state_add(x);
407 else
408 err = xfrm_state_update(x);
409
410 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
411 AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
412
413 if (err < 0) {
414 x->km.state = XFRM_STATE_DEAD;
415 __xfrm_state_put(x);
416 goto out;
417 }
418
419 c.seq = nlh->nlmsg_seq;
420 c.pid = nlh->nlmsg_pid;
421 c.event = nlh->nlmsg_type;
422
423 km_state_notify(x, &c);
424 out:
425 xfrm_state_put(x);
426 return err;
427 }
428
429 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
430 struct rtattr **xfrma,
431 int *errp)
432 {
433 struct xfrm_state *x = NULL;
434 int err;
435
436 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
437 err = -ESRCH;
438 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
439 } else {
440 xfrm_address_t *saddr = NULL;
441
442 verify_one_addr(xfrma, XFRMA_SRCADDR, &saddr);
443 if (!saddr) {
444 err = -EINVAL;
445 goto out;
446 }
447
448 err = -ESRCH;
449 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto,
450 p->family);
451 }
452
453 out:
454 if (!x && errp)
455 *errp = err;
456 return x;
457 }
458
459 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
460 struct rtattr **xfrma)
461 {
462 struct xfrm_state *x;
463 int err = -ESRCH;
464 struct km_event c;
465 struct xfrm_usersa_id *p = nlmsg_data(nlh);
466
467 x = xfrm_user_state_lookup(p, xfrma, &err);
468 if (x == NULL)
469 return err;
470
471 if ((err = security_xfrm_state_delete(x)) != 0)
472 goto out;
473
474 if (xfrm_state_kern(x)) {
475 err = -EPERM;
476 goto out;
477 }
478
479 err = xfrm_state_delete(x);
480
481 if (err < 0)
482 goto out;
483
484 c.seq = nlh->nlmsg_seq;
485 c.pid = nlh->nlmsg_pid;
486 c.event = nlh->nlmsg_type;
487 km_state_notify(x, &c);
488
489 out:
490 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
491 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
492 xfrm_state_put(x);
493 return err;
494 }
495
496 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
497 {
498 memcpy(&p->id, &x->id, sizeof(p->id));
499 memcpy(&p->sel, &x->sel, sizeof(p->sel));
500 memcpy(&p->lft, &x->lft, sizeof(p->lft));
501 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
502 memcpy(&p->stats, &x->stats, sizeof(p->stats));
503 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
504 p->mode = x->props.mode;
505 p->replay_window = x->props.replay_window;
506 p->reqid = x->props.reqid;
507 p->family = x->props.family;
508 p->flags = x->props.flags;
509 p->seq = x->km.seq;
510 }
511
512 struct xfrm_dump_info {
513 struct sk_buff *in_skb;
514 struct sk_buff *out_skb;
515 u32 nlmsg_seq;
516 u16 nlmsg_flags;
517 int start_idx;
518 int this_idx;
519 };
520
521 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
522 {
523 int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len;
524 struct xfrm_user_sec_ctx *uctx;
525 struct nlattr *attr;
526
527 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
528 if (attr == NULL)
529 return -EMSGSIZE;
530
531 uctx = nla_data(attr);
532 uctx->exttype = XFRMA_SEC_CTX;
533 uctx->len = ctx_size;
534 uctx->ctx_doi = s->ctx_doi;
535 uctx->ctx_alg = s->ctx_alg;
536 uctx->ctx_len = s->ctx_len;
537 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
538
539 return 0;
540 }
541
542 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
543 {
544 struct xfrm_dump_info *sp = ptr;
545 struct sk_buff *in_skb = sp->in_skb;
546 struct sk_buff *skb = sp->out_skb;
547 struct xfrm_usersa_info *p;
548 struct nlmsghdr *nlh;
549
550 if (sp->this_idx < sp->start_idx)
551 goto out;
552
553 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
554 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
555 if (nlh == NULL)
556 return -EMSGSIZE;
557
558 p = nlmsg_data(nlh);
559 copy_to_user_state(x, p);
560
561 if (x->aalg)
562 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);
563 if (x->ealg)
564 NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg);
565 if (x->calg)
566 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
567
568 if (x->encap)
569 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
570
571 if (x->security && copy_sec_ctx(x->security, skb) < 0)
572 goto nla_put_failure;
573
574 if (x->coaddr)
575 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
576
577 if (x->lastused)
578 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
579
580 nlmsg_end(skb, nlh);
581 out:
582 sp->this_idx++;
583 return 0;
584
585 nla_put_failure:
586 nlmsg_cancel(skb, nlh);
587 return -EMSGSIZE;
588 }
589
590 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
591 {
592 struct xfrm_dump_info info;
593
594 info.in_skb = cb->skb;
595 info.out_skb = skb;
596 info.nlmsg_seq = cb->nlh->nlmsg_seq;
597 info.nlmsg_flags = NLM_F_MULTI;
598 info.this_idx = 0;
599 info.start_idx = cb->args[0];
600 (void) xfrm_state_walk(0, dump_one_state, &info);
601 cb->args[0] = info.this_idx;
602
603 return skb->len;
604 }
605
606 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
607 struct xfrm_state *x, u32 seq)
608 {
609 struct xfrm_dump_info info;
610 struct sk_buff *skb;
611
612 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
613 if (!skb)
614 return ERR_PTR(-ENOMEM);
615
616 info.in_skb = in_skb;
617 info.out_skb = skb;
618 info.nlmsg_seq = seq;
619 info.nlmsg_flags = 0;
620 info.this_idx = info.start_idx = 0;
621
622 if (dump_one_state(x, 0, &info)) {
623 kfree_skb(skb);
624 return NULL;
625 }
626
627 return skb;
628 }
629
630 static inline size_t xfrm_spdinfo_msgsize(void)
631 {
632 return NLMSG_ALIGN(4)
633 + nla_total_size(sizeof(struct xfrmu_spdinfo))
634 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
635 }
636
637 static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
638 {
639 struct xfrmk_spdinfo si;
640 struct xfrmu_spdinfo spc;
641 struct xfrmu_spdhinfo sph;
642 struct nlmsghdr *nlh;
643 u32 *f;
644
645 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
646 if (nlh == NULL) /* shouldnt really happen ... */
647 return -EMSGSIZE;
648
649 f = nlmsg_data(nlh);
650 *f = flags;
651 xfrm_spd_getinfo(&si);
652 spc.incnt = si.incnt;
653 spc.outcnt = si.outcnt;
654 spc.fwdcnt = si.fwdcnt;
655 spc.inscnt = si.inscnt;
656 spc.outscnt = si.outscnt;
657 spc.fwdscnt = si.fwdscnt;
658 sph.spdhcnt = si.spdhcnt;
659 sph.spdhmcnt = si.spdhmcnt;
660
661 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
662 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
663
664 return nlmsg_end(skb, nlh);
665
666 nla_put_failure:
667 nlmsg_cancel(skb, nlh);
668 return -EMSGSIZE;
669 }
670
671 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
672 struct rtattr **xfrma)
673 {
674 struct sk_buff *r_skb;
675 u32 *flags = nlmsg_data(nlh);
676 u32 spid = NETLINK_CB(skb).pid;
677 u32 seq = nlh->nlmsg_seq;
678
679 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
680 if (r_skb == NULL)
681 return -ENOMEM;
682
683 if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
684 BUG();
685
686 return nlmsg_unicast(xfrm_nl, r_skb, spid);
687 }
688
689 static inline size_t xfrm_sadinfo_msgsize(void)
690 {
691 return NLMSG_ALIGN(4)
692 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
693 + nla_total_size(4); /* XFRMA_SAD_CNT */
694 }
695
696 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
697 {
698 struct xfrmk_sadinfo si;
699 struct xfrmu_sadhinfo sh;
700 struct nlmsghdr *nlh;
701 u32 *f;
702
703 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
704 if (nlh == NULL) /* shouldnt really happen ... */
705 return -EMSGSIZE;
706
707 f = nlmsg_data(nlh);
708 *f = flags;
709 xfrm_sad_getinfo(&si);
710
711 sh.sadhmcnt = si.sadhmcnt;
712 sh.sadhcnt = si.sadhcnt;
713
714 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
715 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
716
717 return nlmsg_end(skb, nlh);
718
719 nla_put_failure:
720 nlmsg_cancel(skb, nlh);
721 return -EMSGSIZE;
722 }
723
724 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
725 struct rtattr **xfrma)
726 {
727 struct sk_buff *r_skb;
728 u32 *flags = nlmsg_data(nlh);
729 u32 spid = NETLINK_CB(skb).pid;
730 u32 seq = nlh->nlmsg_seq;
731
732 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
733 if (r_skb == NULL)
734 return -ENOMEM;
735
736 if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
737 BUG();
738
739 return nlmsg_unicast(xfrm_nl, r_skb, spid);
740 }
741
742 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
743 struct rtattr **xfrma)
744 {
745 struct xfrm_usersa_id *p = nlmsg_data(nlh);
746 struct xfrm_state *x;
747 struct sk_buff *resp_skb;
748 int err = -ESRCH;
749
750 x = xfrm_user_state_lookup(p, xfrma, &err);
751 if (x == NULL)
752 goto out_noput;
753
754 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
755 if (IS_ERR(resp_skb)) {
756 err = PTR_ERR(resp_skb);
757 } else {
758 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
759 }
760 xfrm_state_put(x);
761 out_noput:
762 return err;
763 }
764
765 static int verify_userspi_info(struct xfrm_userspi_info *p)
766 {
767 switch (p->info.id.proto) {
768 case IPPROTO_AH:
769 case IPPROTO_ESP:
770 break;
771
772 case IPPROTO_COMP:
773 /* IPCOMP spi is 16-bits. */
774 if (p->max >= 0x10000)
775 return -EINVAL;
776 break;
777
778 default:
779 return -EINVAL;
780 }
781
782 if (p->min > p->max)
783 return -EINVAL;
784
785 return 0;
786 }
787
788 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
789 struct rtattr **xfrma)
790 {
791 struct xfrm_state *x;
792 struct xfrm_userspi_info *p;
793 struct sk_buff *resp_skb;
794 xfrm_address_t *daddr;
795 int family;
796 int err;
797
798 p = nlmsg_data(nlh);
799 err = verify_userspi_info(p);
800 if (err)
801 goto out_noput;
802
803 family = p->info.family;
804 daddr = &p->info.id.daddr;
805
806 x = NULL;
807 if (p->info.seq) {
808 x = xfrm_find_acq_byseq(p->info.seq);
809 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
810 xfrm_state_put(x);
811 x = NULL;
812 }
813 }
814
815 if (!x)
816 x = xfrm_find_acq(p->info.mode, p->info.reqid,
817 p->info.id.proto, daddr,
818 &p->info.saddr, 1,
819 family);
820 err = -ENOENT;
821 if (x == NULL)
822 goto out_noput;
823
824 resp_skb = ERR_PTR(-ENOENT);
825
826 spin_lock_bh(&x->lock);
827 if (x->km.state != XFRM_STATE_DEAD) {
828 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
829 if (x->id.spi)
830 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
831 }
832 spin_unlock_bh(&x->lock);
833
834 if (IS_ERR(resp_skb)) {
835 err = PTR_ERR(resp_skb);
836 goto out;
837 }
838
839 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
840
841 out:
842 xfrm_state_put(x);
843 out_noput:
844 return err;
845 }
846
847 static int verify_policy_dir(u8 dir)
848 {
849 switch (dir) {
850 case XFRM_POLICY_IN:
851 case XFRM_POLICY_OUT:
852 case XFRM_POLICY_FWD:
853 break;
854
855 default:
856 return -EINVAL;
857 }
858
859 return 0;
860 }
861
862 static int verify_policy_type(u8 type)
863 {
864 switch (type) {
865 case XFRM_POLICY_TYPE_MAIN:
866 #ifdef CONFIG_XFRM_SUB_POLICY
867 case XFRM_POLICY_TYPE_SUB:
868 #endif
869 break;
870
871 default:
872 return -EINVAL;
873 }
874
875 return 0;
876 }
877
878 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
879 {
880 switch (p->share) {
881 case XFRM_SHARE_ANY:
882 case XFRM_SHARE_SESSION:
883 case XFRM_SHARE_USER:
884 case XFRM_SHARE_UNIQUE:
885 break;
886
887 default:
888 return -EINVAL;
889 }
890
891 switch (p->action) {
892 case XFRM_POLICY_ALLOW:
893 case XFRM_POLICY_BLOCK:
894 break;
895
896 default:
897 return -EINVAL;
898 }
899
900 switch (p->sel.family) {
901 case AF_INET:
902 break;
903
904 case AF_INET6:
905 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
906 break;
907 #else
908 return -EAFNOSUPPORT;
909 #endif
910
911 default:
912 return -EINVAL;
913 }
914
915 return verify_policy_dir(p->dir);
916 }
917
918 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **xfrma)
919 {
920 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
921 struct xfrm_user_sec_ctx *uctx;
922
923 if (!rt)
924 return 0;
925
926 uctx = RTA_DATA(rt);
927 return security_xfrm_policy_alloc(pol, uctx);
928 }
929
930 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
931 int nr)
932 {
933 int i;
934
935 xp->xfrm_nr = nr;
936 for (i = 0; i < nr; i++, ut++) {
937 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
938
939 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
940 memcpy(&t->saddr, &ut->saddr,
941 sizeof(xfrm_address_t));
942 t->reqid = ut->reqid;
943 t->mode = ut->mode;
944 t->share = ut->share;
945 t->optional = ut->optional;
946 t->aalgos = ut->aalgos;
947 t->ealgos = ut->ealgos;
948 t->calgos = ut->calgos;
949 t->encap_family = ut->family;
950 }
951 }
952
953 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
954 {
955 int i;
956
957 if (nr > XFRM_MAX_DEPTH)
958 return -EINVAL;
959
960 for (i = 0; i < nr; i++) {
961 /* We never validated the ut->family value, so many
962 * applications simply leave it at zero. The check was
963 * never made and ut->family was ignored because all
964 * templates could be assumed to have the same family as
965 * the policy itself. Now that we will have ipv4-in-ipv6
966 * and ipv6-in-ipv4 tunnels, this is no longer true.
967 */
968 if (!ut[i].family)
969 ut[i].family = family;
970
971 switch (ut[i].family) {
972 case AF_INET:
973 break;
974 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
975 case AF_INET6:
976 break;
977 #endif
978 default:
979 return -EINVAL;
980 }
981 }
982
983 return 0;
984 }
985
986 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
987 {
988 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
989
990 if (!rt) {
991 pol->xfrm_nr = 0;
992 } else {
993 struct xfrm_user_tmpl *utmpl = RTA_DATA(rt);
994 int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
995 int err;
996
997 err = validate_tmpl(nr, utmpl, pol->family);
998 if (err)
999 return err;
1000
1001 copy_templates(pol, RTA_DATA(rt), nr);
1002 }
1003 return 0;
1004 }
1005
1006 static int copy_from_user_policy_type(u8 *tp, struct rtattr **xfrma)
1007 {
1008 struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1];
1009 struct xfrm_userpolicy_type *upt;
1010 u8 type = XFRM_POLICY_TYPE_MAIN;
1011 int err;
1012
1013 if (rt) {
1014 upt = RTA_DATA(rt);
1015 type = upt->type;
1016 }
1017
1018 err = verify_policy_type(type);
1019 if (err)
1020 return err;
1021
1022 *tp = type;
1023 return 0;
1024 }
1025
1026 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1027 {
1028 xp->priority = p->priority;
1029 xp->index = p->index;
1030 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1031 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1032 xp->action = p->action;
1033 xp->flags = p->flags;
1034 xp->family = p->sel.family;
1035 /* XXX xp->share = p->share; */
1036 }
1037
1038 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1039 {
1040 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1041 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1042 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1043 p->priority = xp->priority;
1044 p->index = xp->index;
1045 p->sel.family = xp->family;
1046 p->dir = dir;
1047 p->action = xp->action;
1048 p->flags = xp->flags;
1049 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1050 }
1051
1052 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
1053 {
1054 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
1055 int err;
1056
1057 if (!xp) {
1058 *errp = -ENOMEM;
1059 return NULL;
1060 }
1061
1062 copy_from_user_policy(xp, p);
1063
1064 err = copy_from_user_policy_type(&xp->type, xfrma);
1065 if (err)
1066 goto error;
1067
1068 if (!(err = copy_from_user_tmpl(xp, xfrma)))
1069 err = copy_from_user_sec_ctx(xp, xfrma);
1070 if (err)
1071 goto error;
1072
1073 return xp;
1074 error:
1075 *errp = err;
1076 kfree(xp);
1077 return NULL;
1078 }
1079
1080 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1081 struct rtattr **xfrma)
1082 {
1083 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1084 struct xfrm_policy *xp;
1085 struct km_event c;
1086 int err;
1087 int excl;
1088
1089 err = verify_newpolicy_info(p);
1090 if (err)
1091 return err;
1092 err = verify_sec_ctx_len(xfrma);
1093 if (err)
1094 return err;
1095
1096 xp = xfrm_policy_construct(p, xfrma, &err);
1097 if (!xp)
1098 return err;
1099
1100 /* shouldnt excl be based on nlh flags??
1101 * Aha! this is anti-netlink really i.e more pfkey derived
1102 * in netlink excl is a flag and you wouldnt need
1103 * a type XFRM_MSG_UPDPOLICY - JHS */
1104 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1105 err = xfrm_policy_insert(p->dir, xp, excl);
1106 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1107 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
1108
1109 if (err) {
1110 security_xfrm_policy_free(xp);
1111 kfree(xp);
1112 return err;
1113 }
1114
1115 c.event = nlh->nlmsg_type;
1116 c.seq = nlh->nlmsg_seq;
1117 c.pid = nlh->nlmsg_pid;
1118 km_policy_notify(xp, p->dir, &c);
1119
1120 xfrm_pol_put(xp);
1121
1122 return 0;
1123 }
1124
1125 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1126 {
1127 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1128 int i;
1129
1130 if (xp->xfrm_nr == 0)
1131 return 0;
1132
1133 for (i = 0; i < xp->xfrm_nr; i++) {
1134 struct xfrm_user_tmpl *up = &vec[i];
1135 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1136
1137 memcpy(&up->id, &kp->id, sizeof(up->id));
1138 up->family = kp->encap_family;
1139 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1140 up->reqid = kp->reqid;
1141 up->mode = kp->mode;
1142 up->share = kp->share;
1143 up->optional = kp->optional;
1144 up->aalgos = kp->aalgos;
1145 up->ealgos = kp->ealgos;
1146 up->calgos = kp->calgos;
1147 }
1148
1149 return nla_put(skb, XFRMA_TMPL,
1150 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1151 }
1152
1153 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1154 {
1155 if (x->security) {
1156 return copy_sec_ctx(x->security, skb);
1157 }
1158 return 0;
1159 }
1160
1161 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1162 {
1163 if (xp->security) {
1164 return copy_sec_ctx(xp->security, skb);
1165 }
1166 return 0;
1167 }
1168 static inline size_t userpolicy_type_attrsize(void)
1169 {
1170 #ifdef CONFIG_XFRM_SUB_POLICY
1171 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1172 #else
1173 return 0;
1174 #endif
1175 }
1176
1177 #ifdef CONFIG_XFRM_SUB_POLICY
1178 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1179 {
1180 struct xfrm_userpolicy_type upt = {
1181 .type = type,
1182 };
1183
1184 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1185 }
1186
1187 #else
1188 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1189 {
1190 return 0;
1191 }
1192 #endif
1193
1194 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1195 {
1196 struct xfrm_dump_info *sp = ptr;
1197 struct xfrm_userpolicy_info *p;
1198 struct sk_buff *in_skb = sp->in_skb;
1199 struct sk_buff *skb = sp->out_skb;
1200 struct nlmsghdr *nlh;
1201
1202 if (sp->this_idx < sp->start_idx)
1203 goto out;
1204
1205 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1206 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1207 if (nlh == NULL)
1208 return -EMSGSIZE;
1209
1210 p = nlmsg_data(nlh);
1211 copy_to_user_policy(xp, p, dir);
1212 if (copy_to_user_tmpl(xp, skb) < 0)
1213 goto nlmsg_failure;
1214 if (copy_to_user_sec_ctx(xp, skb))
1215 goto nlmsg_failure;
1216 if (copy_to_user_policy_type(xp->type, skb) < 0)
1217 goto nlmsg_failure;
1218
1219 nlmsg_end(skb, nlh);
1220 out:
1221 sp->this_idx++;
1222 return 0;
1223
1224 nlmsg_failure:
1225 nlmsg_cancel(skb, nlh);
1226 return -EMSGSIZE;
1227 }
1228
1229 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1230 {
1231 struct xfrm_dump_info info;
1232
1233 info.in_skb = cb->skb;
1234 info.out_skb = skb;
1235 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1236 info.nlmsg_flags = NLM_F_MULTI;
1237 info.this_idx = 0;
1238 info.start_idx = cb->args[0];
1239 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info);
1240 #ifdef CONFIG_XFRM_SUB_POLICY
1241 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info);
1242 #endif
1243 cb->args[0] = info.this_idx;
1244
1245 return skb->len;
1246 }
1247
1248 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1249 struct xfrm_policy *xp,
1250 int dir, u32 seq)
1251 {
1252 struct xfrm_dump_info info;
1253 struct sk_buff *skb;
1254
1255 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1256 if (!skb)
1257 return ERR_PTR(-ENOMEM);
1258
1259 info.in_skb = in_skb;
1260 info.out_skb = skb;
1261 info.nlmsg_seq = seq;
1262 info.nlmsg_flags = 0;
1263 info.this_idx = info.start_idx = 0;
1264
1265 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1266 kfree_skb(skb);
1267 return NULL;
1268 }
1269
1270 return skb;
1271 }
1272
1273 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1274 struct rtattr **xfrma)
1275 {
1276 struct xfrm_policy *xp;
1277 struct xfrm_userpolicy_id *p;
1278 u8 type = XFRM_POLICY_TYPE_MAIN;
1279 int err;
1280 struct km_event c;
1281 int delete;
1282
1283 p = nlmsg_data(nlh);
1284 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1285
1286 err = copy_from_user_policy_type(&type, xfrma);
1287 if (err)
1288 return err;
1289
1290 err = verify_policy_dir(p->dir);
1291 if (err)
1292 return err;
1293
1294 if (p->index)
1295 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
1296 else {
1297 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
1298 struct xfrm_policy tmp;
1299
1300 err = verify_sec_ctx_len(xfrma);
1301 if (err)
1302 return err;
1303
1304 memset(&tmp, 0, sizeof(struct xfrm_policy));
1305 if (rt) {
1306 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1307
1308 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1309 return err;
1310 }
1311 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1312 delete, &err);
1313 security_xfrm_policy_free(&tmp);
1314 }
1315 if (xp == NULL)
1316 return -ENOENT;
1317
1318 if (!delete) {
1319 struct sk_buff *resp_skb;
1320
1321 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1322 if (IS_ERR(resp_skb)) {
1323 err = PTR_ERR(resp_skb);
1324 } else {
1325 err = nlmsg_unicast(xfrm_nl, resp_skb,
1326 NETLINK_CB(skb).pid);
1327 }
1328 } else {
1329 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1330 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
1331
1332 if (err != 0)
1333 goto out;
1334
1335 c.data.byid = p->index;
1336 c.event = nlh->nlmsg_type;
1337 c.seq = nlh->nlmsg_seq;
1338 c.pid = nlh->nlmsg_pid;
1339 km_policy_notify(xp, p->dir, &c);
1340 }
1341
1342 out:
1343 xfrm_pol_put(xp);
1344 return err;
1345 }
1346
1347 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1348 struct rtattr **xfrma)
1349 {
1350 struct km_event c;
1351 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1352 struct xfrm_audit audit_info;
1353 int err;
1354
1355 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1356 audit_info.secid = NETLINK_CB(skb).sid;
1357 err = xfrm_state_flush(p->proto, &audit_info);
1358 if (err)
1359 return err;
1360 c.data.proto = p->proto;
1361 c.event = nlh->nlmsg_type;
1362 c.seq = nlh->nlmsg_seq;
1363 c.pid = nlh->nlmsg_pid;
1364 km_state_notify(NULL, &c);
1365
1366 return 0;
1367 }
1368
1369 static inline size_t xfrm_aevent_msgsize(void)
1370 {
1371 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1372 + nla_total_size(sizeof(struct xfrm_replay_state))
1373 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1374 + nla_total_size(4) /* XFRM_AE_RTHR */
1375 + nla_total_size(4); /* XFRM_AE_ETHR */
1376 }
1377
1378 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1379 {
1380 struct xfrm_aevent_id *id;
1381 struct nlmsghdr *nlh;
1382
1383 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1384 if (nlh == NULL)
1385 return -EMSGSIZE;
1386
1387 id = nlmsg_data(nlh);
1388 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1389 id->sa_id.spi = x->id.spi;
1390 id->sa_id.family = x->props.family;
1391 id->sa_id.proto = x->id.proto;
1392 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1393 id->reqid = x->props.reqid;
1394 id->flags = c->data.aevent;
1395
1396 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1397 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1398
1399 if (id->flags & XFRM_AE_RTHR)
1400 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1401
1402 if (id->flags & XFRM_AE_ETHR)
1403 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1404 x->replay_maxage * 10 / HZ);
1405
1406 return nlmsg_end(skb, nlh);
1407
1408 nla_put_failure:
1409 nlmsg_cancel(skb, nlh);
1410 return -EMSGSIZE;
1411 }
1412
1413 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1414 struct rtattr **xfrma)
1415 {
1416 struct xfrm_state *x;
1417 struct sk_buff *r_skb;
1418 int err;
1419 struct km_event c;
1420 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1421 struct xfrm_usersa_id *id = &p->sa_id;
1422
1423 r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1424 if (r_skb == NULL)
1425 return -ENOMEM;
1426
1427 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
1428 if (x == NULL) {
1429 kfree_skb(r_skb);
1430 return -ESRCH;
1431 }
1432
1433 /*
1434 * XXX: is this lock really needed - none of the other
1435 * gets lock (the concern is things getting updated
1436 * while we are still reading) - jhs
1437 */
1438 spin_lock_bh(&x->lock);
1439 c.data.aevent = p->flags;
1440 c.seq = nlh->nlmsg_seq;
1441 c.pid = nlh->nlmsg_pid;
1442
1443 if (build_aevent(r_skb, x, &c) < 0)
1444 BUG();
1445 err = nlmsg_unicast(xfrm_nl, r_skb, NETLINK_CB(skb).pid);
1446 spin_unlock_bh(&x->lock);
1447 xfrm_state_put(x);
1448 return err;
1449 }
1450
1451 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1452 struct rtattr **xfrma)
1453 {
1454 struct xfrm_state *x;
1455 struct km_event c;
1456 int err = - EINVAL;
1457 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1458 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
1459 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
1460
1461 if (!lt && !rp)
1462 return err;
1463
1464 /* pedantic mode - thou shalt sayeth replaceth */
1465 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1466 return err;
1467
1468 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1469 if (x == NULL)
1470 return -ESRCH;
1471
1472 if (x->km.state != XFRM_STATE_VALID)
1473 goto out;
1474
1475 spin_lock_bh(&x->lock);
1476 xfrm_update_ae_params(x, xfrma);
1477 spin_unlock_bh(&x->lock);
1478
1479 c.event = nlh->nlmsg_type;
1480 c.seq = nlh->nlmsg_seq;
1481 c.pid = nlh->nlmsg_pid;
1482 c.data.aevent = XFRM_AE_CU;
1483 km_state_notify(x, &c);
1484 err = 0;
1485 out:
1486 xfrm_state_put(x);
1487 return err;
1488 }
1489
1490 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1491 struct rtattr **xfrma)
1492 {
1493 struct km_event c;
1494 u8 type = XFRM_POLICY_TYPE_MAIN;
1495 int err;
1496 struct xfrm_audit audit_info;
1497
1498 err = copy_from_user_policy_type(&type, xfrma);
1499 if (err)
1500 return err;
1501
1502 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1503 audit_info.secid = NETLINK_CB(skb).sid;
1504 err = xfrm_policy_flush(type, &audit_info);
1505 if (err)
1506 return err;
1507 c.data.type = type;
1508 c.event = nlh->nlmsg_type;
1509 c.seq = nlh->nlmsg_seq;
1510 c.pid = nlh->nlmsg_pid;
1511 km_policy_notify(NULL, 0, &c);
1512 return 0;
1513 }
1514
1515 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1516 struct rtattr **xfrma)
1517 {
1518 struct xfrm_policy *xp;
1519 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1520 struct xfrm_userpolicy_info *p = &up->pol;
1521 u8 type = XFRM_POLICY_TYPE_MAIN;
1522 int err = -ENOENT;
1523
1524 err = copy_from_user_policy_type(&type, xfrma);
1525 if (err)
1526 return err;
1527
1528 if (p->index)
1529 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
1530 else {
1531 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
1532 struct xfrm_policy tmp;
1533
1534 err = verify_sec_ctx_len(xfrma);
1535 if (err)
1536 return err;
1537
1538 memset(&tmp, 0, sizeof(struct xfrm_policy));
1539 if (rt) {
1540 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1541
1542 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1543 return err;
1544 }
1545 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1546 0, &err);
1547 security_xfrm_policy_free(&tmp);
1548 }
1549
1550 if (xp == NULL)
1551 return -ENOENT;
1552 read_lock(&xp->lock);
1553 if (xp->dead) {
1554 read_unlock(&xp->lock);
1555 goto out;
1556 }
1557
1558 read_unlock(&xp->lock);
1559 err = 0;
1560 if (up->hard) {
1561 xfrm_policy_delete(xp, p->dir);
1562 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1563 AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL);
1564
1565 } else {
1566 // reset the timers here?
1567 printk("Dont know what to do with soft policy expire\n");
1568 }
1569 km_policy_expired(xp, p->dir, up->hard, current->pid);
1570
1571 out:
1572 xfrm_pol_put(xp);
1573 return err;
1574 }
1575
1576 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1577 struct rtattr **xfrma)
1578 {
1579 struct xfrm_state *x;
1580 int err;
1581 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1582 struct xfrm_usersa_info *p = &ue->state;
1583
1584 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
1585
1586 err = -ENOENT;
1587 if (x == NULL)
1588 return err;
1589
1590 spin_lock_bh(&x->lock);
1591 err = -EINVAL;
1592 if (x->km.state != XFRM_STATE_VALID)
1593 goto out;
1594 km_state_expired(x, ue->hard, current->pid);
1595
1596 if (ue->hard) {
1597 __xfrm_state_delete(x);
1598 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
1599 AUDIT_MAC_IPSEC_DELSA, 1, NULL, x);
1600 }
1601 err = 0;
1602 out:
1603 spin_unlock_bh(&x->lock);
1604 xfrm_state_put(x);
1605 return err;
1606 }
1607
1608 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1609 struct rtattr **xfrma)
1610 {
1611 struct xfrm_policy *xp;
1612 struct xfrm_user_tmpl *ut;
1613 int i;
1614 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
1615
1616 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1617 struct xfrm_state *x = xfrm_state_alloc();
1618 int err = -ENOMEM;
1619
1620 if (!x)
1621 return err;
1622
1623 err = verify_newpolicy_info(&ua->policy);
1624 if (err) {
1625 printk("BAD policy passed\n");
1626 kfree(x);
1627 return err;
1628 }
1629
1630 /* build an XP */
1631 xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err);
1632 if (!xp) {
1633 kfree(x);
1634 return err;
1635 }
1636
1637 memcpy(&x->id, &ua->id, sizeof(ua->id));
1638 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1639 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1640
1641 ut = RTA_DATA(rt);
1642 /* extract the templates and for each call km_key */
1643 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1644 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1645 memcpy(&x->id, &t->id, sizeof(x->id));
1646 x->props.mode = t->mode;
1647 x->props.reqid = t->reqid;
1648 x->props.family = ut->family;
1649 t->aalgos = ua->aalgos;
1650 t->ealgos = ua->ealgos;
1651 t->calgos = ua->calgos;
1652 err = km_query(x, t, xp);
1653
1654 }
1655
1656 kfree(x);
1657 kfree(xp);
1658
1659 return 0;
1660 }
1661
1662 #ifdef CONFIG_XFRM_MIGRATE
1663 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1664 struct rtattr **xfrma, int *num)
1665 {
1666 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1667 struct xfrm_user_migrate *um;
1668 int i, num_migrate;
1669
1670 um = RTA_DATA(rt);
1671 num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um);
1672
1673 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1674 return -EINVAL;
1675
1676 for (i = 0; i < num_migrate; i++, um++, ma++) {
1677 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1678 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1679 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1680 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1681
1682 ma->proto = um->proto;
1683 ma->mode = um->mode;
1684 ma->reqid = um->reqid;
1685
1686 ma->old_family = um->old_family;
1687 ma->new_family = um->new_family;
1688 }
1689
1690 *num = i;
1691 return 0;
1692 }
1693
1694 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1695 struct rtattr **xfrma)
1696 {
1697 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
1698 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1699 u8 type;
1700 int err;
1701 int n = 0;
1702
1703 if (xfrma[XFRMA_MIGRATE-1] == NULL)
1704 return -EINVAL;
1705
1706 err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
1707 if (err)
1708 return err;
1709
1710 err = copy_from_user_migrate((struct xfrm_migrate *)m,
1711 (struct rtattr **)xfrma, &n);
1712 if (err)
1713 return err;
1714
1715 if (!n)
1716 return 0;
1717
1718 xfrm_migrate(&pi->sel, pi->dir, type, m, n);
1719
1720 return 0;
1721 }
1722 #else
1723 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1724 struct rtattr **xfrma)
1725 {
1726 return -ENOPROTOOPT;
1727 }
1728 #endif
1729
1730 #ifdef CONFIG_XFRM_MIGRATE
1731 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1732 {
1733 struct xfrm_user_migrate um;
1734
1735 memset(&um, 0, sizeof(um));
1736 um.proto = m->proto;
1737 um.mode = m->mode;
1738 um.reqid = m->reqid;
1739 um.old_family = m->old_family;
1740 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1741 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1742 um.new_family = m->new_family;
1743 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1744 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1745
1746 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
1747 }
1748
1749 static inline size_t xfrm_migrate_msgsize(int num_migrate)
1750 {
1751 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
1752 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
1753 + userpolicy_type_attrsize();
1754 }
1755
1756 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1757 int num_migrate, struct xfrm_selector *sel,
1758 u8 dir, u8 type)
1759 {
1760 struct xfrm_migrate *mp;
1761 struct xfrm_userpolicy_id *pol_id;
1762 struct nlmsghdr *nlh;
1763 int i;
1764
1765 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
1766 if (nlh == NULL)
1767 return -EMSGSIZE;
1768
1769 pol_id = nlmsg_data(nlh);
1770 /* copy data from selector, dir, and type to the pol_id */
1771 memset(pol_id, 0, sizeof(*pol_id));
1772 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1773 pol_id->dir = dir;
1774
1775 if (copy_to_user_policy_type(type, skb) < 0)
1776 goto nlmsg_failure;
1777
1778 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1779 if (copy_to_user_migrate(mp, skb) < 0)
1780 goto nlmsg_failure;
1781 }
1782
1783 return nlmsg_end(skb, nlh);
1784 nlmsg_failure:
1785 nlmsg_cancel(skb, nlh);
1786 return -EMSGSIZE;
1787 }
1788
1789 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1790 struct xfrm_migrate *m, int num_migrate)
1791 {
1792 struct sk_buff *skb;
1793
1794 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate), GFP_ATOMIC);
1795 if (skb == NULL)
1796 return -ENOMEM;
1797
1798 /* build migrate */
1799 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
1800 BUG();
1801
1802 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
1803 }
1804 #else
1805 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1806 struct xfrm_migrate *m, int num_migrate)
1807 {
1808 return -ENOPROTOOPT;
1809 }
1810 #endif
1811
1812 #define XMSGSIZE(type) sizeof(struct type)
1813
1814 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1815 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1816 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1817 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1818 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1819 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1820 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1821 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1822 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1823 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1824 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1825 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1826 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1827 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1828 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
1829 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1830 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1831 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1832 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1833 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
1834 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
1835 };
1836
1837 #undef XMSGSIZE
1838
1839 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1840 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
1841 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
1842 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
1843 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
1844 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
1845 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
1846 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
1847 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
1848 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
1849 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
1850 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
1851 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
1852 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
1853 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
1854 };
1855
1856 static struct xfrm_link {
1857 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **);
1858 int (*dump)(struct sk_buff *, struct netlink_callback *);
1859 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1860 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1861 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1862 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1863 .dump = xfrm_dump_sa },
1864 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1865 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1866 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1867 .dump = xfrm_dump_policy },
1868 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1869 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1870 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1871 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1872 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1873 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1874 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1875 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1876 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1877 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1878 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1879 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1880 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
1881 };
1882
1883 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1884 {
1885 struct nlattr *xfrma[XFRMA_MAX+1];
1886 struct xfrm_link *link;
1887 int type, err;
1888
1889 type = nlh->nlmsg_type;
1890 if (type > XFRM_MSG_MAX)
1891 return -EINVAL;
1892
1893 type -= XFRM_MSG_BASE;
1894 link = &xfrm_dispatch[type];
1895
1896 /* All operations require privileges, even GET */
1897 if (security_netlink_recv(skb, CAP_NET_ADMIN))
1898 return -EPERM;
1899
1900 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
1901 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
1902 (nlh->nlmsg_flags & NLM_F_DUMP)) {
1903 if (link->dump == NULL)
1904 return -EINVAL;
1905
1906 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL);
1907 }
1908
1909 /* FIXME: Temporary hack, nlmsg_parse() starts at xfrma[1], old code
1910 * expects first attribute at xfrma[0] */
1911 err = nlmsg_parse(nlh, xfrm_msg_min[type], xfrma-1, XFRMA_MAX,
1912 xfrma_policy);
1913 if (err < 0)
1914 return err;
1915
1916 if (link->doit == NULL)
1917 return -EINVAL;
1918
1919 return link->doit(skb, nlh, (struct rtattr **) xfrma);
1920 }
1921
1922 static void xfrm_netlink_rcv(struct sock *sk, int len)
1923 {
1924 unsigned int qlen = 0;
1925
1926 do {
1927 mutex_lock(&xfrm_cfg_mutex);
1928 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
1929 mutex_unlock(&xfrm_cfg_mutex);
1930
1931 } while (qlen);
1932 }
1933
1934 static inline size_t xfrm_expire_msgsize(void)
1935 {
1936 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire));
1937 }
1938
1939 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1940 {
1941 struct xfrm_user_expire *ue;
1942 struct nlmsghdr *nlh;
1943
1944 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
1945 if (nlh == NULL)
1946 return -EMSGSIZE;
1947
1948 ue = nlmsg_data(nlh);
1949 copy_to_user_state(x, &ue->state);
1950 ue->hard = (c->data.hard != 0) ? 1 : 0;
1951
1952 return nlmsg_end(skb, nlh);
1953 }
1954
1955 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
1956 {
1957 struct sk_buff *skb;
1958
1959 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
1960 if (skb == NULL)
1961 return -ENOMEM;
1962
1963 if (build_expire(skb, x, c) < 0)
1964 BUG();
1965
1966 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
1967 }
1968
1969 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
1970 {
1971 struct sk_buff *skb;
1972
1973 skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1974 if (skb == NULL)
1975 return -ENOMEM;
1976
1977 if (build_aevent(skb, x, c) < 0)
1978 BUG();
1979
1980 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
1981 }
1982
1983 static int xfrm_notify_sa_flush(struct km_event *c)
1984 {
1985 struct xfrm_usersa_flush *p;
1986 struct nlmsghdr *nlh;
1987 struct sk_buff *skb;
1988 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
1989
1990 skb = nlmsg_new(len, GFP_ATOMIC);
1991 if (skb == NULL)
1992 return -ENOMEM;
1993
1994 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
1995 if (nlh == NULL) {
1996 kfree_skb(skb);
1997 return -EMSGSIZE;
1998 }
1999
2000 p = nlmsg_data(nlh);
2001 p->proto = c->data.proto;
2002
2003 nlmsg_end(skb, nlh);
2004
2005 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2006 }
2007
2008 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2009 {
2010 size_t l = 0;
2011 if (x->aalg)
2012 l += nla_total_size(alg_len(x->aalg));
2013 if (x->ealg)
2014 l += nla_total_size(alg_len(x->ealg));
2015 if (x->calg)
2016 l += nla_total_size(sizeof(*x->calg));
2017 if (x->encap)
2018 l += nla_total_size(sizeof(*x->encap));
2019
2020 return l;
2021 }
2022
2023 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2024 {
2025 struct xfrm_usersa_info *p;
2026 struct xfrm_usersa_id *id;
2027 struct nlmsghdr *nlh;
2028 struct sk_buff *skb;
2029 int len = xfrm_sa_len(x);
2030 int headlen;
2031
2032 headlen = sizeof(*p);
2033 if (c->event == XFRM_MSG_DELSA) {
2034 len += nla_total_size(headlen);
2035 headlen = sizeof(*id);
2036 }
2037 len += NLMSG_ALIGN(headlen);
2038
2039 skb = nlmsg_new(len, GFP_ATOMIC);
2040 if (skb == NULL)
2041 return -ENOMEM;
2042
2043 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2044 if (nlh == NULL)
2045 goto nla_put_failure;
2046
2047 p = nlmsg_data(nlh);
2048 if (c->event == XFRM_MSG_DELSA) {
2049 struct nlattr *attr;
2050
2051 id = nlmsg_data(nlh);
2052 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2053 id->spi = x->id.spi;
2054 id->family = x->props.family;
2055 id->proto = x->id.proto;
2056
2057 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2058 if (attr == NULL)
2059 goto nla_put_failure;
2060
2061 p = nla_data(attr);
2062 }
2063
2064 copy_to_user_state(x, p);
2065
2066 if (x->aalg)
2067 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);
2068 if (x->ealg)
2069 NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg);
2070 if (x->calg)
2071 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
2072
2073 if (x->encap)
2074 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
2075
2076 nlmsg_end(skb, nlh);
2077
2078 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2079
2080 nla_put_failure:
2081 kfree_skb(skb);
2082 return -1;
2083 }
2084
2085 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2086 {
2087
2088 switch (c->event) {
2089 case XFRM_MSG_EXPIRE:
2090 return xfrm_exp_state_notify(x, c);
2091 case XFRM_MSG_NEWAE:
2092 return xfrm_aevent_state_notify(x, c);
2093 case XFRM_MSG_DELSA:
2094 case XFRM_MSG_UPDSA:
2095 case XFRM_MSG_NEWSA:
2096 return xfrm_notify_sa(x, c);
2097 case XFRM_MSG_FLUSHSA:
2098 return xfrm_notify_sa_flush(c);
2099 default:
2100 printk("xfrm_user: Unknown SA event %d\n", c->event);
2101 break;
2102 }
2103
2104 return 0;
2105
2106 }
2107
2108 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2109 struct xfrm_policy *xp)
2110 {
2111 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2112 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2113 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2114 + userpolicy_type_attrsize();
2115 }
2116
2117 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2118 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2119 int dir)
2120 {
2121 struct xfrm_user_acquire *ua;
2122 struct nlmsghdr *nlh;
2123 __u32 seq = xfrm_get_acqseq();
2124
2125 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2126 if (nlh == NULL)
2127 return -EMSGSIZE;
2128
2129 ua = nlmsg_data(nlh);
2130 memcpy(&ua->id, &x->id, sizeof(ua->id));
2131 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2132 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2133 copy_to_user_policy(xp, &ua->policy, dir);
2134 ua->aalgos = xt->aalgos;
2135 ua->ealgos = xt->ealgos;
2136 ua->calgos = xt->calgos;
2137 ua->seq = x->km.seq = seq;
2138
2139 if (copy_to_user_tmpl(xp, skb) < 0)
2140 goto nlmsg_failure;
2141 if (copy_to_user_state_sec_ctx(x, skb))
2142 goto nlmsg_failure;
2143 if (copy_to_user_policy_type(xp->type, skb) < 0)
2144 goto nlmsg_failure;
2145
2146 return nlmsg_end(skb, nlh);
2147
2148 nlmsg_failure:
2149 nlmsg_cancel(skb, nlh);
2150 return -EMSGSIZE;
2151 }
2152
2153 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2154 struct xfrm_policy *xp, int dir)
2155 {
2156 struct sk_buff *skb;
2157
2158 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2159 if (skb == NULL)
2160 return -ENOMEM;
2161
2162 if (build_acquire(skb, x, xt, xp, dir) < 0)
2163 BUG();
2164
2165 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2166 }
2167
2168 /* User gives us xfrm_user_policy_info followed by an array of 0
2169 * or more templates.
2170 */
2171 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2172 u8 *data, int len, int *dir)
2173 {
2174 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2175 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2176 struct xfrm_policy *xp;
2177 int nr;
2178
2179 switch (sk->sk_family) {
2180 case AF_INET:
2181 if (opt != IP_XFRM_POLICY) {
2182 *dir = -EOPNOTSUPP;
2183 return NULL;
2184 }
2185 break;
2186 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2187 case AF_INET6:
2188 if (opt != IPV6_XFRM_POLICY) {
2189 *dir = -EOPNOTSUPP;
2190 return NULL;
2191 }
2192 break;
2193 #endif
2194 default:
2195 *dir = -EINVAL;
2196 return NULL;
2197 }
2198
2199 *dir = -EINVAL;
2200
2201 if (len < sizeof(*p) ||
2202 verify_newpolicy_info(p))
2203 return NULL;
2204
2205 nr = ((len - sizeof(*p)) / sizeof(*ut));
2206 if (validate_tmpl(nr, ut, p->sel.family))
2207 return NULL;
2208
2209 if (p->dir > XFRM_POLICY_OUT)
2210 return NULL;
2211
2212 xp = xfrm_policy_alloc(GFP_KERNEL);
2213 if (xp == NULL) {
2214 *dir = -ENOBUFS;
2215 return NULL;
2216 }
2217
2218 copy_from_user_policy(xp, p);
2219 xp->type = XFRM_POLICY_TYPE_MAIN;
2220 copy_templates(xp, ut, nr);
2221
2222 *dir = p->dir;
2223
2224 return xp;
2225 }
2226
2227 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2228 {
2229 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2230 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2231 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2232 + userpolicy_type_attrsize();
2233 }
2234
2235 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2236 int dir, struct km_event *c)
2237 {
2238 struct xfrm_user_polexpire *upe;
2239 struct nlmsghdr *nlh;
2240 int hard = c->data.hard;
2241
2242 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2243 if (nlh == NULL)
2244 return -EMSGSIZE;
2245
2246 upe = nlmsg_data(nlh);
2247 copy_to_user_policy(xp, &upe->pol, dir);
2248 if (copy_to_user_tmpl(xp, skb) < 0)
2249 goto nlmsg_failure;
2250 if (copy_to_user_sec_ctx(xp, skb))
2251 goto nlmsg_failure;
2252 if (copy_to_user_policy_type(xp->type, skb) < 0)
2253 goto nlmsg_failure;
2254 upe->hard = !!hard;
2255
2256 return nlmsg_end(skb, nlh);
2257
2258 nlmsg_failure:
2259 nlmsg_cancel(skb, nlh);
2260 return -EMSGSIZE;
2261 }
2262
2263 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2264 {
2265 struct sk_buff *skb;
2266
2267 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2268 if (skb == NULL)
2269 return -ENOMEM;
2270
2271 if (build_polexpire(skb, xp, dir, c) < 0)
2272 BUG();
2273
2274 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2275 }
2276
2277 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2278 {
2279 struct xfrm_userpolicy_info *p;
2280 struct xfrm_userpolicy_id *id;
2281 struct nlmsghdr *nlh;
2282 struct sk_buff *skb;
2283 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2284 int headlen;
2285
2286 headlen = sizeof(*p);
2287 if (c->event == XFRM_MSG_DELPOLICY) {
2288 len += nla_total_size(headlen);
2289 headlen = sizeof(*id);
2290 }
2291 len += userpolicy_type_attrsize();
2292 len += NLMSG_ALIGN(headlen);
2293
2294 skb = nlmsg_new(len, GFP_ATOMIC);
2295 if (skb == NULL)
2296 return -ENOMEM;
2297
2298 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2299 if (nlh == NULL)
2300 goto nlmsg_failure;
2301
2302 p = nlmsg_data(nlh);
2303 if (c->event == XFRM_MSG_DELPOLICY) {
2304 struct nlattr *attr;
2305
2306 id = nlmsg_data(nlh);
2307 memset(id, 0, sizeof(*id));
2308 id->dir = dir;
2309 if (c->data.byid)
2310 id->index = xp->index;
2311 else
2312 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2313
2314 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2315 if (attr == NULL)
2316 goto nlmsg_failure;
2317
2318 p = nla_data(attr);
2319 }
2320
2321 copy_to_user_policy(xp, p, dir);
2322 if (copy_to_user_tmpl(xp, skb) < 0)
2323 goto nlmsg_failure;
2324 if (copy_to_user_policy_type(xp->type, skb) < 0)
2325 goto nlmsg_failure;
2326
2327 nlmsg_end(skb, nlh);
2328
2329 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2330
2331 nlmsg_failure:
2332 kfree_skb(skb);
2333 return -1;
2334 }
2335
2336 static int xfrm_notify_policy_flush(struct km_event *c)
2337 {
2338 struct nlmsghdr *nlh;
2339 struct sk_buff *skb;
2340
2341 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2342 if (skb == NULL)
2343 return -ENOMEM;
2344
2345 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2346 if (nlh == NULL)
2347 goto nlmsg_failure;
2348 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2349 goto nlmsg_failure;
2350
2351 nlmsg_end(skb, nlh);
2352
2353 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2354
2355 nlmsg_failure:
2356 kfree_skb(skb);
2357 return -1;
2358 }
2359
2360 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2361 {
2362
2363 switch (c->event) {
2364 case XFRM_MSG_NEWPOLICY:
2365 case XFRM_MSG_UPDPOLICY:
2366 case XFRM_MSG_DELPOLICY:
2367 return xfrm_notify_policy(xp, dir, c);
2368 case XFRM_MSG_FLUSHPOLICY:
2369 return xfrm_notify_policy_flush(c);
2370 case XFRM_MSG_POLEXPIRE:
2371 return xfrm_exp_policy_notify(xp, dir, c);
2372 default:
2373 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2374 }
2375
2376 return 0;
2377
2378 }
2379
2380 static inline size_t xfrm_report_msgsize(void)
2381 {
2382 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2383 }
2384
2385 static int build_report(struct sk_buff *skb, u8 proto,
2386 struct xfrm_selector *sel, xfrm_address_t *addr)
2387 {
2388 struct xfrm_user_report *ur;
2389 struct nlmsghdr *nlh;
2390
2391 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2392 if (nlh == NULL)
2393 return -EMSGSIZE;
2394
2395 ur = nlmsg_data(nlh);
2396 ur->proto = proto;
2397 memcpy(&ur->sel, sel, sizeof(ur->sel));
2398
2399 if (addr)
2400 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2401
2402 return nlmsg_end(skb, nlh);
2403
2404 nla_put_failure:
2405 nlmsg_cancel(skb, nlh);
2406 return -EMSGSIZE;
2407 }
2408
2409 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel,
2410 xfrm_address_t *addr)
2411 {
2412 struct sk_buff *skb;
2413
2414 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2415 if (skb == NULL)
2416 return -ENOMEM;
2417
2418 if (build_report(skb, proto, sel, addr) < 0)
2419 BUG();
2420
2421 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2422 }
2423
2424 static struct xfrm_mgr netlink_mgr = {
2425 .id = "netlink",
2426 .notify = xfrm_send_state_notify,
2427 .acquire = xfrm_send_acquire,
2428 .compile_policy = xfrm_compile_policy,
2429 .notify_policy = xfrm_send_policy_notify,
2430 .report = xfrm_send_report,
2431 .migrate = xfrm_send_migrate,
2432 };
2433
2434 static int __init xfrm_user_init(void)
2435 {
2436 struct sock *nlsk;
2437
2438 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2439
2440 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
2441 xfrm_netlink_rcv, NULL, THIS_MODULE);
2442 if (nlsk == NULL)
2443 return -ENOMEM;
2444 rcu_assign_pointer(xfrm_nl, nlsk);
2445
2446 xfrm_register_km(&netlink_mgr);
2447
2448 return 0;
2449 }
2450
2451 static void __exit xfrm_user_exit(void)
2452 {
2453 struct sock *nlsk = xfrm_nl;
2454
2455 xfrm_unregister_km(&netlink_mgr);
2456 rcu_assign_pointer(xfrm_nl, NULL);
2457 synchronize_rcu();
2458 sock_release(nlsk->sk_socket);
2459 }
2460
2461 module_init(xfrm_user_init);
2462 module_exit(xfrm_user_exit);
2463 MODULE_LICENSE("GPL");
2464 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
2465