]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/xfrm/xfrm_state.c
Merge remote-tracking branches 'asoc/topic/rt5659', 'asoc/topic/rt5660', 'asoc/topic...
[mirror_ubuntu-hirsute-kernel.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24 #include <linux/ktime.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28
29 #include "xfrm_hash.h"
30
31 /* Each xfrm_state may be linked to two tables:
32
33 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
34 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
35 destination/tunnel endpoint. (output)
36 */
37
38 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
39
40 static inline unsigned int xfrm_dst_hash(struct net *net,
41 const xfrm_address_t *daddr,
42 const xfrm_address_t *saddr,
43 u32 reqid,
44 unsigned short family)
45 {
46 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
47 }
48
49 static inline unsigned int xfrm_src_hash(struct net *net,
50 const xfrm_address_t *daddr,
51 const xfrm_address_t *saddr,
52 unsigned short family)
53 {
54 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
55 }
56
57 static inline unsigned int
58 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
59 __be32 spi, u8 proto, unsigned short family)
60 {
61 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
62 }
63
64 static void xfrm_hash_transfer(struct hlist_head *list,
65 struct hlist_head *ndsttable,
66 struct hlist_head *nsrctable,
67 struct hlist_head *nspitable,
68 unsigned int nhashmask)
69 {
70 struct hlist_node *tmp;
71 struct xfrm_state *x;
72
73 hlist_for_each_entry_safe(x, tmp, list, bydst) {
74 unsigned int h;
75
76 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
77 x->props.reqid, x->props.family,
78 nhashmask);
79 hlist_add_head(&x->bydst, ndsttable+h);
80
81 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
82 x->props.family,
83 nhashmask);
84 hlist_add_head(&x->bysrc, nsrctable+h);
85
86 if (x->id.spi) {
87 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
88 x->id.proto, x->props.family,
89 nhashmask);
90 hlist_add_head(&x->byspi, nspitable+h);
91 }
92 }
93 }
94
95 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
96 {
97 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
98 }
99
100 static void xfrm_hash_resize(struct work_struct *work)
101 {
102 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
103 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
104 unsigned long nsize, osize;
105 unsigned int nhashmask, ohashmask;
106 int i;
107
108 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
109 ndst = xfrm_hash_alloc(nsize);
110 if (!ndst)
111 return;
112 nsrc = xfrm_hash_alloc(nsize);
113 if (!nsrc) {
114 xfrm_hash_free(ndst, nsize);
115 return;
116 }
117 nspi = xfrm_hash_alloc(nsize);
118 if (!nspi) {
119 xfrm_hash_free(ndst, nsize);
120 xfrm_hash_free(nsrc, nsize);
121 return;
122 }
123
124 spin_lock_bh(&net->xfrm.xfrm_state_lock);
125
126 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
127 for (i = net->xfrm.state_hmask; i >= 0; i--)
128 xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi,
129 nhashmask);
130
131 odst = net->xfrm.state_bydst;
132 osrc = net->xfrm.state_bysrc;
133 ospi = net->xfrm.state_byspi;
134 ohashmask = net->xfrm.state_hmask;
135
136 net->xfrm.state_bydst = ndst;
137 net->xfrm.state_bysrc = nsrc;
138 net->xfrm.state_byspi = nspi;
139 net->xfrm.state_hmask = nhashmask;
140
141 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
142
143 osize = (ohashmask + 1) * sizeof(struct hlist_head);
144 xfrm_hash_free(odst, osize);
145 xfrm_hash_free(osrc, osize);
146 xfrm_hash_free(ospi, osize);
147 }
148
149 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
150 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
151
152 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
153
154 int __xfrm_state_delete(struct xfrm_state *x);
155
156 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
157 bool km_is_alive(const struct km_event *c);
158 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
159
160 static DEFINE_SPINLOCK(xfrm_type_lock);
161 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
162 {
163 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
164 const struct xfrm_type **typemap;
165 int err = 0;
166
167 if (unlikely(afinfo == NULL))
168 return -EAFNOSUPPORT;
169 typemap = afinfo->type_map;
170 spin_lock_bh(&xfrm_type_lock);
171
172 if (likely(typemap[type->proto] == NULL))
173 typemap[type->proto] = type;
174 else
175 err = -EEXIST;
176 spin_unlock_bh(&xfrm_type_lock);
177 xfrm_state_put_afinfo(afinfo);
178 return err;
179 }
180 EXPORT_SYMBOL(xfrm_register_type);
181
182 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
183 {
184 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
185 const struct xfrm_type **typemap;
186 int err = 0;
187
188 if (unlikely(afinfo == NULL))
189 return -EAFNOSUPPORT;
190 typemap = afinfo->type_map;
191 spin_lock_bh(&xfrm_type_lock);
192
193 if (unlikely(typemap[type->proto] != type))
194 err = -ENOENT;
195 else
196 typemap[type->proto] = NULL;
197 spin_unlock_bh(&xfrm_type_lock);
198 xfrm_state_put_afinfo(afinfo);
199 return err;
200 }
201 EXPORT_SYMBOL(xfrm_unregister_type);
202
203 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
204 {
205 struct xfrm_state_afinfo *afinfo;
206 const struct xfrm_type **typemap;
207 const struct xfrm_type *type;
208 int modload_attempted = 0;
209
210 retry:
211 afinfo = xfrm_state_get_afinfo(family);
212 if (unlikely(afinfo == NULL))
213 return NULL;
214 typemap = afinfo->type_map;
215
216 type = typemap[proto];
217 if (unlikely(type && !try_module_get(type->owner)))
218 type = NULL;
219 if (!type && !modload_attempted) {
220 xfrm_state_put_afinfo(afinfo);
221 request_module("xfrm-type-%d-%d", family, proto);
222 modload_attempted = 1;
223 goto retry;
224 }
225
226 xfrm_state_put_afinfo(afinfo);
227 return type;
228 }
229
230 static void xfrm_put_type(const struct xfrm_type *type)
231 {
232 module_put(type->owner);
233 }
234
235 static DEFINE_SPINLOCK(xfrm_mode_lock);
236 int xfrm_register_mode(struct xfrm_mode *mode, int family)
237 {
238 struct xfrm_state_afinfo *afinfo;
239 struct xfrm_mode **modemap;
240 int err;
241
242 if (unlikely(mode->encap >= XFRM_MODE_MAX))
243 return -EINVAL;
244
245 afinfo = xfrm_state_get_afinfo(family);
246 if (unlikely(afinfo == NULL))
247 return -EAFNOSUPPORT;
248
249 err = -EEXIST;
250 modemap = afinfo->mode_map;
251 spin_lock_bh(&xfrm_mode_lock);
252 if (modemap[mode->encap])
253 goto out;
254
255 err = -ENOENT;
256 if (!try_module_get(afinfo->owner))
257 goto out;
258
259 mode->afinfo = afinfo;
260 modemap[mode->encap] = mode;
261 err = 0;
262
263 out:
264 spin_unlock_bh(&xfrm_mode_lock);
265 xfrm_state_put_afinfo(afinfo);
266 return err;
267 }
268 EXPORT_SYMBOL(xfrm_register_mode);
269
270 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
271 {
272 struct xfrm_state_afinfo *afinfo;
273 struct xfrm_mode **modemap;
274 int err;
275
276 if (unlikely(mode->encap >= XFRM_MODE_MAX))
277 return -EINVAL;
278
279 afinfo = xfrm_state_get_afinfo(family);
280 if (unlikely(afinfo == NULL))
281 return -EAFNOSUPPORT;
282
283 err = -ENOENT;
284 modemap = afinfo->mode_map;
285 spin_lock_bh(&xfrm_mode_lock);
286 if (likely(modemap[mode->encap] == mode)) {
287 modemap[mode->encap] = NULL;
288 module_put(mode->afinfo->owner);
289 err = 0;
290 }
291
292 spin_unlock_bh(&xfrm_mode_lock);
293 xfrm_state_put_afinfo(afinfo);
294 return err;
295 }
296 EXPORT_SYMBOL(xfrm_unregister_mode);
297
298 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
299 {
300 struct xfrm_state_afinfo *afinfo;
301 struct xfrm_mode *mode;
302 int modload_attempted = 0;
303
304 if (unlikely(encap >= XFRM_MODE_MAX))
305 return NULL;
306
307 retry:
308 afinfo = xfrm_state_get_afinfo(family);
309 if (unlikely(afinfo == NULL))
310 return NULL;
311
312 mode = afinfo->mode_map[encap];
313 if (unlikely(mode && !try_module_get(mode->owner)))
314 mode = NULL;
315 if (!mode && !modload_attempted) {
316 xfrm_state_put_afinfo(afinfo);
317 request_module("xfrm-mode-%d-%d", family, encap);
318 modload_attempted = 1;
319 goto retry;
320 }
321
322 xfrm_state_put_afinfo(afinfo);
323 return mode;
324 }
325
326 static void xfrm_put_mode(struct xfrm_mode *mode)
327 {
328 module_put(mode->owner);
329 }
330
331 static void xfrm_state_gc_destroy(struct xfrm_state *x)
332 {
333 tasklet_hrtimer_cancel(&x->mtimer);
334 del_timer_sync(&x->rtimer);
335 kfree(x->aead);
336 kfree(x->aalg);
337 kfree(x->ealg);
338 kfree(x->calg);
339 kfree(x->encap);
340 kfree(x->coaddr);
341 kfree(x->replay_esn);
342 kfree(x->preplay_esn);
343 if (x->inner_mode)
344 xfrm_put_mode(x->inner_mode);
345 if (x->inner_mode_iaf)
346 xfrm_put_mode(x->inner_mode_iaf);
347 if (x->outer_mode)
348 xfrm_put_mode(x->outer_mode);
349 if (x->type) {
350 x->type->destructor(x);
351 xfrm_put_type(x->type);
352 }
353 security_xfrm_state_free(x);
354 kfree(x);
355 }
356
357 static void xfrm_state_gc_task(struct work_struct *work)
358 {
359 struct net *net = container_of(work, struct net, xfrm.state_gc_work);
360 struct xfrm_state *x;
361 struct hlist_node *tmp;
362 struct hlist_head gc_list;
363
364 spin_lock_bh(&xfrm_state_gc_lock);
365 hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
366 spin_unlock_bh(&xfrm_state_gc_lock);
367
368 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
369 xfrm_state_gc_destroy(x);
370 }
371
372 static inline unsigned long make_jiffies(long secs)
373 {
374 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
375 return MAX_SCHEDULE_TIMEOUT-1;
376 else
377 return secs*HZ;
378 }
379
380 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
381 {
382 struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
383 struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
384 unsigned long now = get_seconds();
385 long next = LONG_MAX;
386 int warn = 0;
387 int err = 0;
388
389 spin_lock(&x->lock);
390 if (x->km.state == XFRM_STATE_DEAD)
391 goto out;
392 if (x->km.state == XFRM_STATE_EXPIRED)
393 goto expired;
394 if (x->lft.hard_add_expires_seconds) {
395 long tmo = x->lft.hard_add_expires_seconds +
396 x->curlft.add_time - now;
397 if (tmo <= 0) {
398 if (x->xflags & XFRM_SOFT_EXPIRE) {
399 /* enter hard expire without soft expire first?!
400 * setting a new date could trigger this.
401 * workarbound: fix x->curflt.add_time by below:
402 */
403 x->curlft.add_time = now - x->saved_tmo - 1;
404 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
405 } else
406 goto expired;
407 }
408 if (tmo < next)
409 next = tmo;
410 }
411 if (x->lft.hard_use_expires_seconds) {
412 long tmo = x->lft.hard_use_expires_seconds +
413 (x->curlft.use_time ? : now) - now;
414 if (tmo <= 0)
415 goto expired;
416 if (tmo < next)
417 next = tmo;
418 }
419 if (x->km.dying)
420 goto resched;
421 if (x->lft.soft_add_expires_seconds) {
422 long tmo = x->lft.soft_add_expires_seconds +
423 x->curlft.add_time - now;
424 if (tmo <= 0) {
425 warn = 1;
426 x->xflags &= ~XFRM_SOFT_EXPIRE;
427 } else if (tmo < next) {
428 next = tmo;
429 x->xflags |= XFRM_SOFT_EXPIRE;
430 x->saved_tmo = tmo;
431 }
432 }
433 if (x->lft.soft_use_expires_seconds) {
434 long tmo = x->lft.soft_use_expires_seconds +
435 (x->curlft.use_time ? : now) - now;
436 if (tmo <= 0)
437 warn = 1;
438 else if (tmo < next)
439 next = tmo;
440 }
441
442 x->km.dying = warn;
443 if (warn)
444 km_state_expired(x, 0, 0);
445 resched:
446 if (next != LONG_MAX) {
447 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
448 }
449
450 goto out;
451
452 expired:
453 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
454 x->km.state = XFRM_STATE_EXPIRED;
455
456 err = __xfrm_state_delete(x);
457 if (!err)
458 km_state_expired(x, 1, 0);
459
460 xfrm_audit_state_delete(x, err ? 0 : 1, true);
461
462 out:
463 spin_unlock(&x->lock);
464 return HRTIMER_NORESTART;
465 }
466
467 static void xfrm_replay_timer_handler(unsigned long data);
468
469 struct xfrm_state *xfrm_state_alloc(struct net *net)
470 {
471 struct xfrm_state *x;
472
473 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
474
475 if (x) {
476 write_pnet(&x->xs_net, net);
477 atomic_set(&x->refcnt, 1);
478 atomic_set(&x->tunnel_users, 0);
479 INIT_LIST_HEAD(&x->km.all);
480 INIT_HLIST_NODE(&x->bydst);
481 INIT_HLIST_NODE(&x->bysrc);
482 INIT_HLIST_NODE(&x->byspi);
483 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
484 CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
485 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
486 (unsigned long)x);
487 x->curlft.add_time = get_seconds();
488 x->lft.soft_byte_limit = XFRM_INF;
489 x->lft.soft_packet_limit = XFRM_INF;
490 x->lft.hard_byte_limit = XFRM_INF;
491 x->lft.hard_packet_limit = XFRM_INF;
492 x->replay_maxage = 0;
493 x->replay_maxdiff = 0;
494 x->inner_mode = NULL;
495 x->inner_mode_iaf = NULL;
496 spin_lock_init(&x->lock);
497 }
498 return x;
499 }
500 EXPORT_SYMBOL(xfrm_state_alloc);
501
502 void __xfrm_state_destroy(struct xfrm_state *x)
503 {
504 struct net *net = xs_net(x);
505
506 WARN_ON(x->km.state != XFRM_STATE_DEAD);
507
508 spin_lock_bh(&xfrm_state_gc_lock);
509 hlist_add_head(&x->gclist, &net->xfrm.state_gc_list);
510 spin_unlock_bh(&xfrm_state_gc_lock);
511 schedule_work(&net->xfrm.state_gc_work);
512 }
513 EXPORT_SYMBOL(__xfrm_state_destroy);
514
515 int __xfrm_state_delete(struct xfrm_state *x)
516 {
517 struct net *net = xs_net(x);
518 int err = -ESRCH;
519
520 if (x->km.state != XFRM_STATE_DEAD) {
521 x->km.state = XFRM_STATE_DEAD;
522 spin_lock(&net->xfrm.xfrm_state_lock);
523 list_del(&x->km.all);
524 hlist_del(&x->bydst);
525 hlist_del(&x->bysrc);
526 if (x->id.spi)
527 hlist_del(&x->byspi);
528 net->xfrm.state_num--;
529 spin_unlock(&net->xfrm.xfrm_state_lock);
530
531 /* All xfrm_state objects are created by xfrm_state_alloc.
532 * The xfrm_state_alloc call gives a reference, and that
533 * is what we are dropping here.
534 */
535 xfrm_state_put(x);
536 err = 0;
537 }
538
539 return err;
540 }
541 EXPORT_SYMBOL(__xfrm_state_delete);
542
543 int xfrm_state_delete(struct xfrm_state *x)
544 {
545 int err;
546
547 spin_lock_bh(&x->lock);
548 err = __xfrm_state_delete(x);
549 spin_unlock_bh(&x->lock);
550
551 return err;
552 }
553 EXPORT_SYMBOL(xfrm_state_delete);
554
555 #ifdef CONFIG_SECURITY_NETWORK_XFRM
556 static inline int
557 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
558 {
559 int i, err = 0;
560
561 for (i = 0; i <= net->xfrm.state_hmask; i++) {
562 struct xfrm_state *x;
563
564 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
565 if (xfrm_id_proto_match(x->id.proto, proto) &&
566 (err = security_xfrm_state_delete(x)) != 0) {
567 xfrm_audit_state_delete(x, 0, task_valid);
568 return err;
569 }
570 }
571 }
572
573 return err;
574 }
575 #else
576 static inline int
577 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
578 {
579 return 0;
580 }
581 #endif
582
583 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
584 {
585 int i, err = 0, cnt = 0;
586
587 spin_lock_bh(&net->xfrm.xfrm_state_lock);
588 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
589 if (err)
590 goto out;
591
592 err = -ESRCH;
593 for (i = 0; i <= net->xfrm.state_hmask; i++) {
594 struct xfrm_state *x;
595 restart:
596 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
597 if (!xfrm_state_kern(x) &&
598 xfrm_id_proto_match(x->id.proto, proto)) {
599 xfrm_state_hold(x);
600 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
601
602 err = xfrm_state_delete(x);
603 xfrm_audit_state_delete(x, err ? 0 : 1,
604 task_valid);
605 xfrm_state_put(x);
606 if (!err)
607 cnt++;
608
609 spin_lock_bh(&net->xfrm.xfrm_state_lock);
610 goto restart;
611 }
612 }
613 }
614 if (cnt)
615 err = 0;
616
617 out:
618 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
619 return err;
620 }
621 EXPORT_SYMBOL(xfrm_state_flush);
622
623 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
624 {
625 spin_lock_bh(&net->xfrm.xfrm_state_lock);
626 si->sadcnt = net->xfrm.state_num;
627 si->sadhcnt = net->xfrm.state_hmask;
628 si->sadhmcnt = xfrm_state_hashmax;
629 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
630 }
631 EXPORT_SYMBOL(xfrm_sad_getinfo);
632
633 static int
634 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
635 const struct xfrm_tmpl *tmpl,
636 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
637 unsigned short family)
638 {
639 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
640 if (!afinfo)
641 return -1;
642 afinfo->init_tempsel(&x->sel, fl);
643
644 if (family != tmpl->encap_family) {
645 xfrm_state_put_afinfo(afinfo);
646 afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
647 if (!afinfo)
648 return -1;
649 }
650 afinfo->init_temprop(x, tmpl, daddr, saddr);
651 xfrm_state_put_afinfo(afinfo);
652 return 0;
653 }
654
655 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
656 const xfrm_address_t *daddr,
657 __be32 spi, u8 proto,
658 unsigned short family)
659 {
660 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
661 struct xfrm_state *x;
662
663 hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) {
664 if (x->props.family != family ||
665 x->id.spi != spi ||
666 x->id.proto != proto ||
667 !xfrm_addr_equal(&x->id.daddr, daddr, family))
668 continue;
669
670 if ((mark & x->mark.m) != x->mark.v)
671 continue;
672 xfrm_state_hold(x);
673 return x;
674 }
675
676 return NULL;
677 }
678
679 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
680 const xfrm_address_t *daddr,
681 const xfrm_address_t *saddr,
682 u8 proto, unsigned short family)
683 {
684 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
685 struct xfrm_state *x;
686
687 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
688 if (x->props.family != family ||
689 x->id.proto != proto ||
690 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
691 !xfrm_addr_equal(&x->props.saddr, saddr, family))
692 continue;
693
694 if ((mark & x->mark.m) != x->mark.v)
695 continue;
696 xfrm_state_hold(x);
697 return x;
698 }
699
700 return NULL;
701 }
702
703 static inline struct xfrm_state *
704 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
705 {
706 struct net *net = xs_net(x);
707 u32 mark = x->mark.v & x->mark.m;
708
709 if (use_spi)
710 return __xfrm_state_lookup(net, mark, &x->id.daddr,
711 x->id.spi, x->id.proto, family);
712 else
713 return __xfrm_state_lookup_byaddr(net, mark,
714 &x->id.daddr,
715 &x->props.saddr,
716 x->id.proto, family);
717 }
718
719 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
720 {
721 if (have_hash_collision &&
722 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
723 net->xfrm.state_num > net->xfrm.state_hmask)
724 schedule_work(&net->xfrm.state_hash_work);
725 }
726
727 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
728 const struct flowi *fl, unsigned short family,
729 struct xfrm_state **best, int *acq_in_progress,
730 int *error)
731 {
732 /* Resolution logic:
733 * 1. There is a valid state with matching selector. Done.
734 * 2. Valid state with inappropriate selector. Skip.
735 *
736 * Entering area of "sysdeps".
737 *
738 * 3. If state is not valid, selector is temporary, it selects
739 * only session which triggered previous resolution. Key
740 * manager will do something to install a state with proper
741 * selector.
742 */
743 if (x->km.state == XFRM_STATE_VALID) {
744 if ((x->sel.family &&
745 !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
746 !security_xfrm_state_pol_flow_match(x, pol, fl))
747 return;
748
749 if (!*best ||
750 (*best)->km.dying > x->km.dying ||
751 ((*best)->km.dying == x->km.dying &&
752 (*best)->curlft.add_time < x->curlft.add_time))
753 *best = x;
754 } else if (x->km.state == XFRM_STATE_ACQ) {
755 *acq_in_progress = 1;
756 } else if (x->km.state == XFRM_STATE_ERROR ||
757 x->km.state == XFRM_STATE_EXPIRED) {
758 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
759 security_xfrm_state_pol_flow_match(x, pol, fl))
760 *error = -ESRCH;
761 }
762 }
763
764 struct xfrm_state *
765 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
766 const struct flowi *fl, struct xfrm_tmpl *tmpl,
767 struct xfrm_policy *pol, int *err,
768 unsigned short family)
769 {
770 static xfrm_address_t saddr_wildcard = { };
771 struct net *net = xp_net(pol);
772 unsigned int h, h_wildcard;
773 struct xfrm_state *x, *x0, *to_put;
774 int acquire_in_progress = 0;
775 int error = 0;
776 struct xfrm_state *best = NULL;
777 u32 mark = pol->mark.v & pol->mark.m;
778 unsigned short encap_family = tmpl->encap_family;
779 struct km_event c;
780
781 to_put = NULL;
782
783 spin_lock_bh(&net->xfrm.xfrm_state_lock);
784 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
785 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
786 if (x->props.family == encap_family &&
787 x->props.reqid == tmpl->reqid &&
788 (mark & x->mark.m) == x->mark.v &&
789 !(x->props.flags & XFRM_STATE_WILDRECV) &&
790 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
791 tmpl->mode == x->props.mode &&
792 tmpl->id.proto == x->id.proto &&
793 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
794 xfrm_state_look_at(pol, x, fl, encap_family,
795 &best, &acquire_in_progress, &error);
796 }
797 if (best || acquire_in_progress)
798 goto found;
799
800 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
801 hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) {
802 if (x->props.family == encap_family &&
803 x->props.reqid == tmpl->reqid &&
804 (mark & x->mark.m) == x->mark.v &&
805 !(x->props.flags & XFRM_STATE_WILDRECV) &&
806 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
807 tmpl->mode == x->props.mode &&
808 tmpl->id.proto == x->id.proto &&
809 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
810 xfrm_state_look_at(pol, x, fl, encap_family,
811 &best, &acquire_in_progress, &error);
812 }
813
814 found:
815 x = best;
816 if (!x && !error && !acquire_in_progress) {
817 if (tmpl->id.spi &&
818 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
819 tmpl->id.proto, encap_family)) != NULL) {
820 to_put = x0;
821 error = -EEXIST;
822 goto out;
823 }
824
825 c.net = net;
826 /* If the KMs have no listeners (yet...), avoid allocating an SA
827 * for each and every packet - garbage collection might not
828 * handle the flood.
829 */
830 if (!km_is_alive(&c)) {
831 error = -ESRCH;
832 goto out;
833 }
834
835 x = xfrm_state_alloc(net);
836 if (x == NULL) {
837 error = -ENOMEM;
838 goto out;
839 }
840 /* Initialize temporary state matching only
841 * to current session. */
842 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
843 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
844
845 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
846 if (error) {
847 x->km.state = XFRM_STATE_DEAD;
848 to_put = x;
849 x = NULL;
850 goto out;
851 }
852
853 if (km_query(x, tmpl, pol) == 0) {
854 x->km.state = XFRM_STATE_ACQ;
855 list_add(&x->km.all, &net->xfrm.state_all);
856 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
857 h = xfrm_src_hash(net, daddr, saddr, encap_family);
858 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
859 if (x->id.spi) {
860 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
861 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
862 }
863 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
864 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
865 net->xfrm.state_num++;
866 xfrm_hash_grow_check(net, x->bydst.next != NULL);
867 } else {
868 x->km.state = XFRM_STATE_DEAD;
869 to_put = x;
870 x = NULL;
871 error = -ESRCH;
872 }
873 }
874 out:
875 if (x)
876 xfrm_state_hold(x);
877 else
878 *err = acquire_in_progress ? -EAGAIN : error;
879 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
880 if (to_put)
881 xfrm_state_put(to_put);
882 return x;
883 }
884
885 struct xfrm_state *
886 xfrm_stateonly_find(struct net *net, u32 mark,
887 xfrm_address_t *daddr, xfrm_address_t *saddr,
888 unsigned short family, u8 mode, u8 proto, u32 reqid)
889 {
890 unsigned int h;
891 struct xfrm_state *rx = NULL, *x = NULL;
892
893 spin_lock_bh(&net->xfrm.xfrm_state_lock);
894 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
895 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
896 if (x->props.family == family &&
897 x->props.reqid == reqid &&
898 (mark & x->mark.m) == x->mark.v &&
899 !(x->props.flags & XFRM_STATE_WILDRECV) &&
900 xfrm_state_addr_check(x, daddr, saddr, family) &&
901 mode == x->props.mode &&
902 proto == x->id.proto &&
903 x->km.state == XFRM_STATE_VALID) {
904 rx = x;
905 break;
906 }
907 }
908
909 if (rx)
910 xfrm_state_hold(rx);
911 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
912
913
914 return rx;
915 }
916 EXPORT_SYMBOL(xfrm_stateonly_find);
917
918 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
919 unsigned short family)
920 {
921 struct xfrm_state *x;
922 struct xfrm_state_walk *w;
923
924 spin_lock_bh(&net->xfrm.xfrm_state_lock);
925 list_for_each_entry(w, &net->xfrm.state_all, all) {
926 x = container_of(w, struct xfrm_state, km);
927 if (x->props.family != family ||
928 x->id.spi != spi)
929 continue;
930
931 xfrm_state_hold(x);
932 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
933 return x;
934 }
935 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
936 return NULL;
937 }
938 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
939
940 static void __xfrm_state_insert(struct xfrm_state *x)
941 {
942 struct net *net = xs_net(x);
943 unsigned int h;
944
945 list_add(&x->km.all, &net->xfrm.state_all);
946
947 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
948 x->props.reqid, x->props.family);
949 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
950
951 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
952 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
953
954 if (x->id.spi) {
955 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
956 x->props.family);
957
958 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
959 }
960
961 tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
962 if (x->replay_maxage)
963 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
964
965 net->xfrm.state_num++;
966
967 xfrm_hash_grow_check(net, x->bydst.next != NULL);
968 }
969
970 /* net->xfrm.xfrm_state_lock is held */
971 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
972 {
973 struct net *net = xs_net(xnew);
974 unsigned short family = xnew->props.family;
975 u32 reqid = xnew->props.reqid;
976 struct xfrm_state *x;
977 unsigned int h;
978 u32 mark = xnew->mark.v & xnew->mark.m;
979
980 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
981 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
982 if (x->props.family == family &&
983 x->props.reqid == reqid &&
984 (mark & x->mark.m) == x->mark.v &&
985 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
986 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
987 x->genid++;
988 }
989 }
990
991 void xfrm_state_insert(struct xfrm_state *x)
992 {
993 struct net *net = xs_net(x);
994
995 spin_lock_bh(&net->xfrm.xfrm_state_lock);
996 __xfrm_state_bump_genids(x);
997 __xfrm_state_insert(x);
998 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
999 }
1000 EXPORT_SYMBOL(xfrm_state_insert);
1001
1002 /* net->xfrm.xfrm_state_lock is held */
1003 static struct xfrm_state *__find_acq_core(struct net *net,
1004 const struct xfrm_mark *m,
1005 unsigned short family, u8 mode,
1006 u32 reqid, u8 proto,
1007 const xfrm_address_t *daddr,
1008 const xfrm_address_t *saddr,
1009 int create)
1010 {
1011 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1012 struct xfrm_state *x;
1013 u32 mark = m->v & m->m;
1014
1015 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1016 if (x->props.reqid != reqid ||
1017 x->props.mode != mode ||
1018 x->props.family != family ||
1019 x->km.state != XFRM_STATE_ACQ ||
1020 x->id.spi != 0 ||
1021 x->id.proto != proto ||
1022 (mark & x->mark.m) != x->mark.v ||
1023 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1024 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1025 continue;
1026
1027 xfrm_state_hold(x);
1028 return x;
1029 }
1030
1031 if (!create)
1032 return NULL;
1033
1034 x = xfrm_state_alloc(net);
1035 if (likely(x)) {
1036 switch (family) {
1037 case AF_INET:
1038 x->sel.daddr.a4 = daddr->a4;
1039 x->sel.saddr.a4 = saddr->a4;
1040 x->sel.prefixlen_d = 32;
1041 x->sel.prefixlen_s = 32;
1042 x->props.saddr.a4 = saddr->a4;
1043 x->id.daddr.a4 = daddr->a4;
1044 break;
1045
1046 case AF_INET6:
1047 x->sel.daddr.in6 = daddr->in6;
1048 x->sel.saddr.in6 = saddr->in6;
1049 x->sel.prefixlen_d = 128;
1050 x->sel.prefixlen_s = 128;
1051 x->props.saddr.in6 = saddr->in6;
1052 x->id.daddr.in6 = daddr->in6;
1053 break;
1054 }
1055
1056 x->km.state = XFRM_STATE_ACQ;
1057 x->id.proto = proto;
1058 x->props.family = family;
1059 x->props.mode = mode;
1060 x->props.reqid = reqid;
1061 x->mark.v = m->v;
1062 x->mark.m = m->m;
1063 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1064 xfrm_state_hold(x);
1065 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1066 list_add(&x->km.all, &net->xfrm.state_all);
1067 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
1068 h = xfrm_src_hash(net, daddr, saddr, family);
1069 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
1070
1071 net->xfrm.state_num++;
1072
1073 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1074 }
1075
1076 return x;
1077 }
1078
1079 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1080
1081 int xfrm_state_add(struct xfrm_state *x)
1082 {
1083 struct net *net = xs_net(x);
1084 struct xfrm_state *x1, *to_put;
1085 int family;
1086 int err;
1087 u32 mark = x->mark.v & x->mark.m;
1088 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1089
1090 family = x->props.family;
1091
1092 to_put = NULL;
1093
1094 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1095
1096 x1 = __xfrm_state_locate(x, use_spi, family);
1097 if (x1) {
1098 to_put = x1;
1099 x1 = NULL;
1100 err = -EEXIST;
1101 goto out;
1102 }
1103
1104 if (use_spi && x->km.seq) {
1105 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1106 if (x1 && ((x1->id.proto != x->id.proto) ||
1107 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1108 to_put = x1;
1109 x1 = NULL;
1110 }
1111 }
1112
1113 if (use_spi && !x1)
1114 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1115 x->props.reqid, x->id.proto,
1116 &x->id.daddr, &x->props.saddr, 0);
1117
1118 __xfrm_state_bump_genids(x);
1119 __xfrm_state_insert(x);
1120 err = 0;
1121
1122 out:
1123 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1124
1125 if (x1) {
1126 xfrm_state_delete(x1);
1127 xfrm_state_put(x1);
1128 }
1129
1130 if (to_put)
1131 xfrm_state_put(to_put);
1132
1133 return err;
1134 }
1135 EXPORT_SYMBOL(xfrm_state_add);
1136
1137 #ifdef CONFIG_XFRM_MIGRATE
1138 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
1139 {
1140 struct net *net = xs_net(orig);
1141 struct xfrm_state *x = xfrm_state_alloc(net);
1142 if (!x)
1143 goto out;
1144
1145 memcpy(&x->id, &orig->id, sizeof(x->id));
1146 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1147 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1148 x->props.mode = orig->props.mode;
1149 x->props.replay_window = orig->props.replay_window;
1150 x->props.reqid = orig->props.reqid;
1151 x->props.family = orig->props.family;
1152 x->props.saddr = orig->props.saddr;
1153
1154 if (orig->aalg) {
1155 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1156 if (!x->aalg)
1157 goto error;
1158 }
1159 x->props.aalgo = orig->props.aalgo;
1160
1161 if (orig->aead) {
1162 x->aead = xfrm_algo_aead_clone(orig->aead);
1163 if (!x->aead)
1164 goto error;
1165 }
1166 if (orig->ealg) {
1167 x->ealg = xfrm_algo_clone(orig->ealg);
1168 if (!x->ealg)
1169 goto error;
1170 }
1171 x->props.ealgo = orig->props.ealgo;
1172
1173 if (orig->calg) {
1174 x->calg = xfrm_algo_clone(orig->calg);
1175 if (!x->calg)
1176 goto error;
1177 }
1178 x->props.calgo = orig->props.calgo;
1179
1180 if (orig->encap) {
1181 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1182 if (!x->encap)
1183 goto error;
1184 }
1185
1186 if (orig->coaddr) {
1187 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1188 GFP_KERNEL);
1189 if (!x->coaddr)
1190 goto error;
1191 }
1192
1193 if (orig->replay_esn) {
1194 if (xfrm_replay_clone(x, orig))
1195 goto error;
1196 }
1197
1198 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1199
1200 if (xfrm_init_state(x) < 0)
1201 goto error;
1202
1203 x->props.flags = orig->props.flags;
1204 x->props.extra_flags = orig->props.extra_flags;
1205
1206 x->tfcpad = orig->tfcpad;
1207 x->replay_maxdiff = orig->replay_maxdiff;
1208 x->replay_maxage = orig->replay_maxage;
1209 x->curlft.add_time = orig->curlft.add_time;
1210 x->km.state = orig->km.state;
1211 x->km.seq = orig->km.seq;
1212
1213 return x;
1214
1215 error:
1216 xfrm_state_put(x);
1217 out:
1218 return NULL;
1219 }
1220
1221 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
1222 {
1223 unsigned int h;
1224 struct xfrm_state *x = NULL;
1225
1226 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1227
1228 if (m->reqid) {
1229 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1230 m->reqid, m->old_family);
1231 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1232 if (x->props.mode != m->mode ||
1233 x->id.proto != m->proto)
1234 continue;
1235 if (m->reqid && x->props.reqid != m->reqid)
1236 continue;
1237 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1238 m->old_family) ||
1239 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1240 m->old_family))
1241 continue;
1242 xfrm_state_hold(x);
1243 break;
1244 }
1245 } else {
1246 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1247 m->old_family);
1248 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1249 if (x->props.mode != m->mode ||
1250 x->id.proto != m->proto)
1251 continue;
1252 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1253 m->old_family) ||
1254 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1255 m->old_family))
1256 continue;
1257 xfrm_state_hold(x);
1258 break;
1259 }
1260 }
1261
1262 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1263
1264 return x;
1265 }
1266 EXPORT_SYMBOL(xfrm_migrate_state_find);
1267
1268 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1269 struct xfrm_migrate *m)
1270 {
1271 struct xfrm_state *xc;
1272
1273 xc = xfrm_state_clone(x);
1274 if (!xc)
1275 return NULL;
1276
1277 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1278 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1279
1280 /* add state */
1281 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1282 /* a care is needed when the destination address of the
1283 state is to be updated as it is a part of triplet */
1284 xfrm_state_insert(xc);
1285 } else {
1286 if (xfrm_state_add(xc) < 0)
1287 goto error;
1288 }
1289
1290 return xc;
1291 error:
1292 xfrm_state_put(xc);
1293 return NULL;
1294 }
1295 EXPORT_SYMBOL(xfrm_state_migrate);
1296 #endif
1297
1298 int xfrm_state_update(struct xfrm_state *x)
1299 {
1300 struct xfrm_state *x1, *to_put;
1301 int err;
1302 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1303 struct net *net = xs_net(x);
1304
1305 to_put = NULL;
1306
1307 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1308 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1309
1310 err = -ESRCH;
1311 if (!x1)
1312 goto out;
1313
1314 if (xfrm_state_kern(x1)) {
1315 to_put = x1;
1316 err = -EEXIST;
1317 goto out;
1318 }
1319
1320 if (x1->km.state == XFRM_STATE_ACQ) {
1321 __xfrm_state_insert(x);
1322 x = NULL;
1323 }
1324 err = 0;
1325
1326 out:
1327 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1328
1329 if (to_put)
1330 xfrm_state_put(to_put);
1331
1332 if (err)
1333 return err;
1334
1335 if (!x) {
1336 xfrm_state_delete(x1);
1337 xfrm_state_put(x1);
1338 return 0;
1339 }
1340
1341 err = -EINVAL;
1342 spin_lock_bh(&x1->lock);
1343 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1344 if (x->encap && x1->encap)
1345 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1346 if (x->coaddr && x1->coaddr) {
1347 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1348 }
1349 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1350 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1351 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1352 x1->km.dying = 0;
1353
1354 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1355 if (x1->curlft.use_time)
1356 xfrm_state_check_expire(x1);
1357
1358 err = 0;
1359 x->km.state = XFRM_STATE_DEAD;
1360 __xfrm_state_put(x);
1361 }
1362 spin_unlock_bh(&x1->lock);
1363
1364 xfrm_state_put(x1);
1365
1366 return err;
1367 }
1368 EXPORT_SYMBOL(xfrm_state_update);
1369
1370 int xfrm_state_check_expire(struct xfrm_state *x)
1371 {
1372 if (!x->curlft.use_time)
1373 x->curlft.use_time = get_seconds();
1374
1375 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1376 x->curlft.packets >= x->lft.hard_packet_limit) {
1377 x->km.state = XFRM_STATE_EXPIRED;
1378 tasklet_hrtimer_start(&x->mtimer, ktime_set(0, 0), HRTIMER_MODE_REL);
1379 return -EINVAL;
1380 }
1381
1382 if (!x->km.dying &&
1383 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1384 x->curlft.packets >= x->lft.soft_packet_limit)) {
1385 x->km.dying = 1;
1386 km_state_expired(x, 0, 0);
1387 }
1388 return 0;
1389 }
1390 EXPORT_SYMBOL(xfrm_state_check_expire);
1391
1392 struct xfrm_state *
1393 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1394 u8 proto, unsigned short family)
1395 {
1396 struct xfrm_state *x;
1397
1398 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1399 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1400 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1401 return x;
1402 }
1403 EXPORT_SYMBOL(xfrm_state_lookup);
1404
1405 struct xfrm_state *
1406 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1407 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1408 u8 proto, unsigned short family)
1409 {
1410 struct xfrm_state *x;
1411
1412 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1413 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1414 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1415 return x;
1416 }
1417 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1418
1419 struct xfrm_state *
1420 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1421 u8 proto, const xfrm_address_t *daddr,
1422 const xfrm_address_t *saddr, int create, unsigned short family)
1423 {
1424 struct xfrm_state *x;
1425
1426 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1427 x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
1428 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1429
1430 return x;
1431 }
1432 EXPORT_SYMBOL(xfrm_find_acq);
1433
1434 #ifdef CONFIG_XFRM_SUB_POLICY
1435 int
1436 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1437 unsigned short family, struct net *net)
1438 {
1439 int err = 0;
1440 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1441 if (!afinfo)
1442 return -EAFNOSUPPORT;
1443
1444 spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1445 if (afinfo->tmpl_sort)
1446 err = afinfo->tmpl_sort(dst, src, n);
1447 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1448 xfrm_state_put_afinfo(afinfo);
1449 return err;
1450 }
1451 EXPORT_SYMBOL(xfrm_tmpl_sort);
1452
1453 int
1454 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1455 unsigned short family)
1456 {
1457 int err = 0;
1458 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1459 struct net *net = xs_net(*src);
1460
1461 if (!afinfo)
1462 return -EAFNOSUPPORT;
1463
1464 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1465 if (afinfo->state_sort)
1466 err = afinfo->state_sort(dst, src, n);
1467 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1468 xfrm_state_put_afinfo(afinfo);
1469 return err;
1470 }
1471 EXPORT_SYMBOL(xfrm_state_sort);
1472 #endif
1473
1474 /* Silly enough, but I'm lazy to build resolution list */
1475
1476 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1477 {
1478 int i;
1479
1480 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1481 struct xfrm_state *x;
1482
1483 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1484 if (x->km.seq == seq &&
1485 (mark & x->mark.m) == x->mark.v &&
1486 x->km.state == XFRM_STATE_ACQ) {
1487 xfrm_state_hold(x);
1488 return x;
1489 }
1490 }
1491 }
1492 return NULL;
1493 }
1494
1495 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1496 {
1497 struct xfrm_state *x;
1498
1499 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1500 x = __xfrm_find_acq_byseq(net, mark, seq);
1501 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1502 return x;
1503 }
1504 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1505
1506 u32 xfrm_get_acqseq(void)
1507 {
1508 u32 res;
1509 static atomic_t acqseq;
1510
1511 do {
1512 res = atomic_inc_return(&acqseq);
1513 } while (!res);
1514
1515 return res;
1516 }
1517 EXPORT_SYMBOL(xfrm_get_acqseq);
1518
1519 int verify_spi_info(u8 proto, u32 min, u32 max)
1520 {
1521 switch (proto) {
1522 case IPPROTO_AH:
1523 case IPPROTO_ESP:
1524 break;
1525
1526 case IPPROTO_COMP:
1527 /* IPCOMP spi is 16-bits. */
1528 if (max >= 0x10000)
1529 return -EINVAL;
1530 break;
1531
1532 default:
1533 return -EINVAL;
1534 }
1535
1536 if (min > max)
1537 return -EINVAL;
1538
1539 return 0;
1540 }
1541 EXPORT_SYMBOL(verify_spi_info);
1542
1543 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1544 {
1545 struct net *net = xs_net(x);
1546 unsigned int h;
1547 struct xfrm_state *x0;
1548 int err = -ENOENT;
1549 __be32 minspi = htonl(low);
1550 __be32 maxspi = htonl(high);
1551 u32 mark = x->mark.v & x->mark.m;
1552
1553 spin_lock_bh(&x->lock);
1554 if (x->km.state == XFRM_STATE_DEAD)
1555 goto unlock;
1556
1557 err = 0;
1558 if (x->id.spi)
1559 goto unlock;
1560
1561 err = -ENOENT;
1562
1563 if (minspi == maxspi) {
1564 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1565 if (x0) {
1566 xfrm_state_put(x0);
1567 goto unlock;
1568 }
1569 x->id.spi = minspi;
1570 } else {
1571 u32 spi = 0;
1572 for (h = 0; h < high-low+1; h++) {
1573 spi = low + prandom_u32()%(high-low+1);
1574 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1575 if (x0 == NULL) {
1576 x->id.spi = htonl(spi);
1577 break;
1578 }
1579 xfrm_state_put(x0);
1580 }
1581 }
1582 if (x->id.spi) {
1583 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1584 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1585 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
1586 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1587
1588 err = 0;
1589 }
1590
1591 unlock:
1592 spin_unlock_bh(&x->lock);
1593
1594 return err;
1595 }
1596 EXPORT_SYMBOL(xfrm_alloc_spi);
1597
1598 static bool __xfrm_state_filter_match(struct xfrm_state *x,
1599 struct xfrm_address_filter *filter)
1600 {
1601 if (filter) {
1602 if ((filter->family == AF_INET ||
1603 filter->family == AF_INET6) &&
1604 x->props.family != filter->family)
1605 return false;
1606
1607 return addr_match(&x->props.saddr, &filter->saddr,
1608 filter->splen) &&
1609 addr_match(&x->id.daddr, &filter->daddr,
1610 filter->dplen);
1611 }
1612 return true;
1613 }
1614
1615 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1616 int (*func)(struct xfrm_state *, int, void*),
1617 void *data)
1618 {
1619 struct xfrm_state *state;
1620 struct xfrm_state_walk *x;
1621 int err = 0;
1622
1623 if (walk->seq != 0 && list_empty(&walk->all))
1624 return 0;
1625
1626 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1627 if (list_empty(&walk->all))
1628 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1629 else
1630 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
1631 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1632 if (x->state == XFRM_STATE_DEAD)
1633 continue;
1634 state = container_of(x, struct xfrm_state, km);
1635 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1636 continue;
1637 if (!__xfrm_state_filter_match(state, walk->filter))
1638 continue;
1639 err = func(state, walk->seq, data);
1640 if (err) {
1641 list_move_tail(&walk->all, &x->all);
1642 goto out;
1643 }
1644 walk->seq++;
1645 }
1646 if (walk->seq == 0) {
1647 err = -ENOENT;
1648 goto out;
1649 }
1650 list_del_init(&walk->all);
1651 out:
1652 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1653 return err;
1654 }
1655 EXPORT_SYMBOL(xfrm_state_walk);
1656
1657 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1658 struct xfrm_address_filter *filter)
1659 {
1660 INIT_LIST_HEAD(&walk->all);
1661 walk->proto = proto;
1662 walk->state = XFRM_STATE_DEAD;
1663 walk->seq = 0;
1664 walk->filter = filter;
1665 }
1666 EXPORT_SYMBOL(xfrm_state_walk_init);
1667
1668 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
1669 {
1670 kfree(walk->filter);
1671
1672 if (list_empty(&walk->all))
1673 return;
1674
1675 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1676 list_del(&walk->all);
1677 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1678 }
1679 EXPORT_SYMBOL(xfrm_state_walk_done);
1680
1681 static void xfrm_replay_timer_handler(unsigned long data)
1682 {
1683 struct xfrm_state *x = (struct xfrm_state *)data;
1684
1685 spin_lock(&x->lock);
1686
1687 if (x->km.state == XFRM_STATE_VALID) {
1688 if (xfrm_aevent_is_on(xs_net(x)))
1689 x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
1690 else
1691 x->xflags |= XFRM_TIME_DEFER;
1692 }
1693
1694 spin_unlock(&x->lock);
1695 }
1696
1697 static LIST_HEAD(xfrm_km_list);
1698
1699 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1700 {
1701 struct xfrm_mgr *km;
1702
1703 rcu_read_lock();
1704 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1705 if (km->notify_policy)
1706 km->notify_policy(xp, dir, c);
1707 rcu_read_unlock();
1708 }
1709
1710 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1711 {
1712 struct xfrm_mgr *km;
1713 rcu_read_lock();
1714 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1715 if (km->notify)
1716 km->notify(x, c);
1717 rcu_read_unlock();
1718 }
1719
1720 EXPORT_SYMBOL(km_policy_notify);
1721 EXPORT_SYMBOL(km_state_notify);
1722
1723 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
1724 {
1725 struct km_event c;
1726
1727 c.data.hard = hard;
1728 c.portid = portid;
1729 c.event = XFRM_MSG_EXPIRE;
1730 km_state_notify(x, &c);
1731 }
1732
1733 EXPORT_SYMBOL(km_state_expired);
1734 /*
1735 * We send to all registered managers regardless of failure
1736 * We are happy with one success
1737 */
1738 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1739 {
1740 int err = -EINVAL, acqret;
1741 struct xfrm_mgr *km;
1742
1743 rcu_read_lock();
1744 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1745 acqret = km->acquire(x, t, pol);
1746 if (!acqret)
1747 err = acqret;
1748 }
1749 rcu_read_unlock();
1750 return err;
1751 }
1752 EXPORT_SYMBOL(km_query);
1753
1754 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1755 {
1756 int err = -EINVAL;
1757 struct xfrm_mgr *km;
1758
1759 rcu_read_lock();
1760 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1761 if (km->new_mapping)
1762 err = km->new_mapping(x, ipaddr, sport);
1763 if (!err)
1764 break;
1765 }
1766 rcu_read_unlock();
1767 return err;
1768 }
1769 EXPORT_SYMBOL(km_new_mapping);
1770
1771 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
1772 {
1773 struct km_event c;
1774
1775 c.data.hard = hard;
1776 c.portid = portid;
1777 c.event = XFRM_MSG_POLEXPIRE;
1778 km_policy_notify(pol, dir, &c);
1779 }
1780 EXPORT_SYMBOL(km_policy_expired);
1781
1782 #ifdef CONFIG_XFRM_MIGRATE
1783 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1784 const struct xfrm_migrate *m, int num_migrate,
1785 const struct xfrm_kmaddress *k)
1786 {
1787 int err = -EINVAL;
1788 int ret;
1789 struct xfrm_mgr *km;
1790
1791 rcu_read_lock();
1792 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1793 if (km->migrate) {
1794 ret = km->migrate(sel, dir, type, m, num_migrate, k);
1795 if (!ret)
1796 err = ret;
1797 }
1798 }
1799 rcu_read_unlock();
1800 return err;
1801 }
1802 EXPORT_SYMBOL(km_migrate);
1803 #endif
1804
1805 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1806 {
1807 int err = -EINVAL;
1808 int ret;
1809 struct xfrm_mgr *km;
1810
1811 rcu_read_lock();
1812 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1813 if (km->report) {
1814 ret = km->report(net, proto, sel, addr);
1815 if (!ret)
1816 err = ret;
1817 }
1818 }
1819 rcu_read_unlock();
1820 return err;
1821 }
1822 EXPORT_SYMBOL(km_report);
1823
1824 bool km_is_alive(const struct km_event *c)
1825 {
1826 struct xfrm_mgr *km;
1827 bool is_alive = false;
1828
1829 rcu_read_lock();
1830 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1831 if (km->is_alive && km->is_alive(c)) {
1832 is_alive = true;
1833 break;
1834 }
1835 }
1836 rcu_read_unlock();
1837
1838 return is_alive;
1839 }
1840 EXPORT_SYMBOL(km_is_alive);
1841
1842 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1843 {
1844 int err;
1845 u8 *data;
1846 struct xfrm_mgr *km;
1847 struct xfrm_policy *pol = NULL;
1848
1849 if (optlen <= 0 || optlen > PAGE_SIZE)
1850 return -EMSGSIZE;
1851
1852 data = kmalloc(optlen, GFP_KERNEL);
1853 if (!data)
1854 return -ENOMEM;
1855
1856 err = -EFAULT;
1857 if (copy_from_user(data, optval, optlen))
1858 goto out;
1859
1860 err = -EINVAL;
1861 rcu_read_lock();
1862 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1863 pol = km->compile_policy(sk, optname, data,
1864 optlen, &err);
1865 if (err >= 0)
1866 break;
1867 }
1868 rcu_read_unlock();
1869
1870 if (err >= 0) {
1871 xfrm_sk_policy_insert(sk, err, pol);
1872 xfrm_pol_put(pol);
1873 err = 0;
1874 }
1875
1876 out:
1877 kfree(data);
1878 return err;
1879 }
1880 EXPORT_SYMBOL(xfrm_user_policy);
1881
1882 static DEFINE_SPINLOCK(xfrm_km_lock);
1883
1884 int xfrm_register_km(struct xfrm_mgr *km)
1885 {
1886 spin_lock_bh(&xfrm_km_lock);
1887 list_add_tail_rcu(&km->list, &xfrm_km_list);
1888 spin_unlock_bh(&xfrm_km_lock);
1889 return 0;
1890 }
1891 EXPORT_SYMBOL(xfrm_register_km);
1892
1893 int xfrm_unregister_km(struct xfrm_mgr *km)
1894 {
1895 spin_lock_bh(&xfrm_km_lock);
1896 list_del_rcu(&km->list);
1897 spin_unlock_bh(&xfrm_km_lock);
1898 synchronize_rcu();
1899 return 0;
1900 }
1901 EXPORT_SYMBOL(xfrm_unregister_km);
1902
1903 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1904 {
1905 int err = 0;
1906 if (unlikely(afinfo == NULL))
1907 return -EINVAL;
1908 if (unlikely(afinfo->family >= NPROTO))
1909 return -EAFNOSUPPORT;
1910 spin_lock_bh(&xfrm_state_afinfo_lock);
1911 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1912 err = -EEXIST;
1913 else
1914 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
1915 spin_unlock_bh(&xfrm_state_afinfo_lock);
1916 return err;
1917 }
1918 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1919
1920 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1921 {
1922 int err = 0;
1923 if (unlikely(afinfo == NULL))
1924 return -EINVAL;
1925 if (unlikely(afinfo->family >= NPROTO))
1926 return -EAFNOSUPPORT;
1927 spin_lock_bh(&xfrm_state_afinfo_lock);
1928 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1929 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1930 err = -EINVAL;
1931 else
1932 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
1933 }
1934 spin_unlock_bh(&xfrm_state_afinfo_lock);
1935 synchronize_rcu();
1936 return err;
1937 }
1938 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1939
1940 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1941 {
1942 struct xfrm_state_afinfo *afinfo;
1943 if (unlikely(family >= NPROTO))
1944 return NULL;
1945 rcu_read_lock();
1946 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
1947 if (unlikely(!afinfo))
1948 rcu_read_unlock();
1949 return afinfo;
1950 }
1951
1952 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1953 {
1954 rcu_read_unlock();
1955 }
1956
1957 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1958 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1959 {
1960 if (x->tunnel) {
1961 struct xfrm_state *t = x->tunnel;
1962
1963 if (atomic_read(&t->tunnel_users) == 2)
1964 xfrm_state_delete(t);
1965 atomic_dec(&t->tunnel_users);
1966 xfrm_state_put(t);
1967 x->tunnel = NULL;
1968 }
1969 }
1970 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1971
1972 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1973 {
1974 int res;
1975
1976 spin_lock_bh(&x->lock);
1977 if (x->km.state == XFRM_STATE_VALID &&
1978 x->type && x->type->get_mtu)
1979 res = x->type->get_mtu(x, mtu);
1980 else
1981 res = mtu - x->props.header_len;
1982 spin_unlock_bh(&x->lock);
1983 return res;
1984 }
1985
1986 int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
1987 {
1988 struct xfrm_state_afinfo *afinfo;
1989 struct xfrm_mode *inner_mode;
1990 int family = x->props.family;
1991 int err;
1992
1993 err = -EAFNOSUPPORT;
1994 afinfo = xfrm_state_get_afinfo(family);
1995 if (!afinfo)
1996 goto error;
1997
1998 err = 0;
1999 if (afinfo->init_flags)
2000 err = afinfo->init_flags(x);
2001
2002 xfrm_state_put_afinfo(afinfo);
2003
2004 if (err)
2005 goto error;
2006
2007 err = -EPROTONOSUPPORT;
2008
2009 if (x->sel.family != AF_UNSPEC) {
2010 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2011 if (inner_mode == NULL)
2012 goto error;
2013
2014 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2015 family != x->sel.family) {
2016 xfrm_put_mode(inner_mode);
2017 goto error;
2018 }
2019
2020 x->inner_mode = inner_mode;
2021 } else {
2022 struct xfrm_mode *inner_mode_iaf;
2023 int iafamily = AF_INET;
2024
2025 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2026 if (inner_mode == NULL)
2027 goto error;
2028
2029 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2030 xfrm_put_mode(inner_mode);
2031 goto error;
2032 }
2033 x->inner_mode = inner_mode;
2034
2035 if (x->props.family == AF_INET)
2036 iafamily = AF_INET6;
2037
2038 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2039 if (inner_mode_iaf) {
2040 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2041 x->inner_mode_iaf = inner_mode_iaf;
2042 else
2043 xfrm_put_mode(inner_mode_iaf);
2044 }
2045 }
2046
2047 x->type = xfrm_get_type(x->id.proto, family);
2048 if (x->type == NULL)
2049 goto error;
2050
2051 err = x->type->init_state(x);
2052 if (err)
2053 goto error;
2054
2055 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2056 if (x->outer_mode == NULL) {
2057 err = -EPROTONOSUPPORT;
2058 goto error;
2059 }
2060
2061 if (init_replay) {
2062 err = xfrm_init_replay(x);
2063 if (err)
2064 goto error;
2065 }
2066
2067 x->km.state = XFRM_STATE_VALID;
2068
2069 error:
2070 return err;
2071 }
2072
2073 EXPORT_SYMBOL(__xfrm_init_state);
2074
2075 int xfrm_init_state(struct xfrm_state *x)
2076 {
2077 return __xfrm_init_state(x, true);
2078 }
2079
2080 EXPORT_SYMBOL(xfrm_init_state);
2081
2082 int __net_init xfrm_state_init(struct net *net)
2083 {
2084 unsigned int sz;
2085
2086 INIT_LIST_HEAD(&net->xfrm.state_all);
2087
2088 sz = sizeof(struct hlist_head) * 8;
2089
2090 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2091 if (!net->xfrm.state_bydst)
2092 goto out_bydst;
2093 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2094 if (!net->xfrm.state_bysrc)
2095 goto out_bysrc;
2096 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2097 if (!net->xfrm.state_byspi)
2098 goto out_byspi;
2099 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2100
2101 net->xfrm.state_num = 0;
2102 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2103 INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
2104 INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
2105 spin_lock_init(&net->xfrm.xfrm_state_lock);
2106 return 0;
2107
2108 out_byspi:
2109 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2110 out_bysrc:
2111 xfrm_hash_free(net->xfrm.state_bydst, sz);
2112 out_bydst:
2113 return -ENOMEM;
2114 }
2115
2116 void xfrm_state_fini(struct net *net)
2117 {
2118 unsigned int sz;
2119
2120 flush_work(&net->xfrm.state_hash_work);
2121 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2122 flush_work(&net->xfrm.state_gc_work);
2123
2124 WARN_ON(!list_empty(&net->xfrm.state_all));
2125
2126 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2127 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2128 xfrm_hash_free(net->xfrm.state_byspi, sz);
2129 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2130 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2131 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2132 xfrm_hash_free(net->xfrm.state_bydst, sz);
2133 }
2134
2135 #ifdef CONFIG_AUDITSYSCALL
2136 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2137 struct audit_buffer *audit_buf)
2138 {
2139 struct xfrm_sec_ctx *ctx = x->security;
2140 u32 spi = ntohl(x->id.spi);
2141
2142 if (ctx)
2143 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2144 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2145
2146 switch (x->props.family) {
2147 case AF_INET:
2148 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2149 &x->props.saddr.a4, &x->id.daddr.a4);
2150 break;
2151 case AF_INET6:
2152 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2153 x->props.saddr.a6, x->id.daddr.a6);
2154 break;
2155 }
2156
2157 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2158 }
2159
2160 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2161 struct audit_buffer *audit_buf)
2162 {
2163 const struct iphdr *iph4;
2164 const struct ipv6hdr *iph6;
2165
2166 switch (family) {
2167 case AF_INET:
2168 iph4 = ip_hdr(skb);
2169 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2170 &iph4->saddr, &iph4->daddr);
2171 break;
2172 case AF_INET6:
2173 iph6 = ipv6_hdr(skb);
2174 audit_log_format(audit_buf,
2175 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2176 &iph6->saddr, &iph6->daddr,
2177 iph6->flow_lbl[0] & 0x0f,
2178 iph6->flow_lbl[1],
2179 iph6->flow_lbl[2]);
2180 break;
2181 }
2182 }
2183
2184 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2185 {
2186 struct audit_buffer *audit_buf;
2187
2188 audit_buf = xfrm_audit_start("SAD-add");
2189 if (audit_buf == NULL)
2190 return;
2191 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2192 xfrm_audit_helper_sainfo(x, audit_buf);
2193 audit_log_format(audit_buf, " res=%u", result);
2194 audit_log_end(audit_buf);
2195 }
2196 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2197
2198 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2199 {
2200 struct audit_buffer *audit_buf;
2201
2202 audit_buf = xfrm_audit_start("SAD-delete");
2203 if (audit_buf == NULL)
2204 return;
2205 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2206 xfrm_audit_helper_sainfo(x, audit_buf);
2207 audit_log_format(audit_buf, " res=%u", result);
2208 audit_log_end(audit_buf);
2209 }
2210 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2211
2212 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2213 struct sk_buff *skb)
2214 {
2215 struct audit_buffer *audit_buf;
2216 u32 spi;
2217
2218 audit_buf = xfrm_audit_start("SA-replay-overflow");
2219 if (audit_buf == NULL)
2220 return;
2221 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2222 /* don't record the sequence number because it's inherent in this kind
2223 * of audit message */
2224 spi = ntohl(x->id.spi);
2225 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2226 audit_log_end(audit_buf);
2227 }
2228 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2229
2230 void xfrm_audit_state_replay(struct xfrm_state *x,
2231 struct sk_buff *skb, __be32 net_seq)
2232 {
2233 struct audit_buffer *audit_buf;
2234 u32 spi;
2235
2236 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2237 if (audit_buf == NULL)
2238 return;
2239 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2240 spi = ntohl(x->id.spi);
2241 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2242 spi, spi, ntohl(net_seq));
2243 audit_log_end(audit_buf);
2244 }
2245 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2246
2247 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2248 {
2249 struct audit_buffer *audit_buf;
2250
2251 audit_buf = xfrm_audit_start("SA-notfound");
2252 if (audit_buf == NULL)
2253 return;
2254 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2255 audit_log_end(audit_buf);
2256 }
2257 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2258
2259 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2260 __be32 net_spi, __be32 net_seq)
2261 {
2262 struct audit_buffer *audit_buf;
2263 u32 spi;
2264
2265 audit_buf = xfrm_audit_start("SA-notfound");
2266 if (audit_buf == NULL)
2267 return;
2268 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2269 spi = ntohl(net_spi);
2270 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2271 spi, spi, ntohl(net_seq));
2272 audit_log_end(audit_buf);
2273 }
2274 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2275
2276 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2277 struct sk_buff *skb, u8 proto)
2278 {
2279 struct audit_buffer *audit_buf;
2280 __be32 net_spi;
2281 __be32 net_seq;
2282
2283 audit_buf = xfrm_audit_start("SA-icv-failure");
2284 if (audit_buf == NULL)
2285 return;
2286 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2287 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2288 u32 spi = ntohl(net_spi);
2289 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2290 spi, spi, ntohl(net_seq));
2291 }
2292 audit_log_end(audit_buf);
2293 }
2294 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2295 #endif /* CONFIG_AUDITSYSCALL */