]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/xfrm/xfrm_state.c
Merge branch 'linux-next' of git://git.infradead.org/~dedekind/ubifs-2.6
[mirror_ubuntu-zesty-kernel.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
49 *
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
61
62 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
63 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
64
65 #ifdef CONFIG_AUDITSYSCALL
66 static void xfrm_audit_state_replay(struct xfrm_state *x,
67 struct sk_buff *skb, __be32 net_seq);
68 #else
69 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
70 #endif /* CONFIG_AUDITSYSCALL */
71
72 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
73 xfrm_address_t *saddr,
74 u32 reqid,
75 unsigned short family)
76 {
77 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
78 }
79
80 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
81 xfrm_address_t *saddr,
82 unsigned short family)
83 {
84 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
85 }
86
87 static inline unsigned int
88 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
89 {
90 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
91 }
92
93 static void xfrm_hash_transfer(struct hlist_head *list,
94 struct hlist_head *ndsttable,
95 struct hlist_head *nsrctable,
96 struct hlist_head *nspitable,
97 unsigned int nhashmask)
98 {
99 struct hlist_node *entry, *tmp;
100 struct xfrm_state *x;
101
102 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
103 unsigned int h;
104
105 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
106 x->props.reqid, x->props.family,
107 nhashmask);
108 hlist_add_head(&x->bydst, ndsttable+h);
109
110 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
111 x->props.family,
112 nhashmask);
113 hlist_add_head(&x->bysrc, nsrctable+h);
114
115 if (x->id.spi) {
116 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
117 x->id.proto, x->props.family,
118 nhashmask);
119 hlist_add_head(&x->byspi, nspitable+h);
120 }
121 }
122 }
123
124 static unsigned long xfrm_hash_new_size(void)
125 {
126 return ((xfrm_state_hmask + 1) << 1) *
127 sizeof(struct hlist_head);
128 }
129
130 static DEFINE_MUTEX(hash_resize_mutex);
131
132 static void xfrm_hash_resize(struct work_struct *__unused)
133 {
134 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
135 unsigned long nsize, osize;
136 unsigned int nhashmask, ohashmask;
137 int i;
138
139 mutex_lock(&hash_resize_mutex);
140
141 nsize = xfrm_hash_new_size();
142 ndst = xfrm_hash_alloc(nsize);
143 if (!ndst)
144 goto out_unlock;
145 nsrc = xfrm_hash_alloc(nsize);
146 if (!nsrc) {
147 xfrm_hash_free(ndst, nsize);
148 goto out_unlock;
149 }
150 nspi = xfrm_hash_alloc(nsize);
151 if (!nspi) {
152 xfrm_hash_free(ndst, nsize);
153 xfrm_hash_free(nsrc, nsize);
154 goto out_unlock;
155 }
156
157 spin_lock_bh(&xfrm_state_lock);
158
159 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
160 for (i = xfrm_state_hmask; i >= 0; i--)
161 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
162 nhashmask);
163
164 odst = xfrm_state_bydst;
165 osrc = xfrm_state_bysrc;
166 ospi = xfrm_state_byspi;
167 ohashmask = xfrm_state_hmask;
168
169 xfrm_state_bydst = ndst;
170 xfrm_state_bysrc = nsrc;
171 xfrm_state_byspi = nspi;
172 xfrm_state_hmask = nhashmask;
173
174 spin_unlock_bh(&xfrm_state_lock);
175
176 osize = (ohashmask + 1) * sizeof(struct hlist_head);
177 xfrm_hash_free(odst, osize);
178 xfrm_hash_free(osrc, osize);
179 xfrm_hash_free(ospi, osize);
180
181 out_unlock:
182 mutex_unlock(&hash_resize_mutex);
183 }
184
185 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
186
187 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
188 EXPORT_SYMBOL(km_waitq);
189
190 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
191 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
192
193 static struct work_struct xfrm_state_gc_work;
194 static HLIST_HEAD(xfrm_state_gc_list);
195 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
196
197 int __xfrm_state_delete(struct xfrm_state *x);
198
199 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
200 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
201
202 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
203 {
204 struct xfrm_state_afinfo *afinfo;
205 if (unlikely(family >= NPROTO))
206 return NULL;
207 write_lock_bh(&xfrm_state_afinfo_lock);
208 afinfo = xfrm_state_afinfo[family];
209 if (unlikely(!afinfo))
210 write_unlock_bh(&xfrm_state_afinfo_lock);
211 return afinfo;
212 }
213
214 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
215 __releases(xfrm_state_afinfo_lock)
216 {
217 write_unlock_bh(&xfrm_state_afinfo_lock);
218 }
219
220 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
221 {
222 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
223 const struct xfrm_type **typemap;
224 int err = 0;
225
226 if (unlikely(afinfo == NULL))
227 return -EAFNOSUPPORT;
228 typemap = afinfo->type_map;
229
230 if (likely(typemap[type->proto] == NULL))
231 typemap[type->proto] = type;
232 else
233 err = -EEXIST;
234 xfrm_state_unlock_afinfo(afinfo);
235 return err;
236 }
237 EXPORT_SYMBOL(xfrm_register_type);
238
239 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
240 {
241 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
242 const struct xfrm_type **typemap;
243 int err = 0;
244
245 if (unlikely(afinfo == NULL))
246 return -EAFNOSUPPORT;
247 typemap = afinfo->type_map;
248
249 if (unlikely(typemap[type->proto] != type))
250 err = -ENOENT;
251 else
252 typemap[type->proto] = NULL;
253 xfrm_state_unlock_afinfo(afinfo);
254 return err;
255 }
256 EXPORT_SYMBOL(xfrm_unregister_type);
257
258 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
259 {
260 struct xfrm_state_afinfo *afinfo;
261 const struct xfrm_type **typemap;
262 const struct xfrm_type *type;
263 int modload_attempted = 0;
264
265 retry:
266 afinfo = xfrm_state_get_afinfo(family);
267 if (unlikely(afinfo == NULL))
268 return NULL;
269 typemap = afinfo->type_map;
270
271 type = typemap[proto];
272 if (unlikely(type && !try_module_get(type->owner)))
273 type = NULL;
274 if (!type && !modload_attempted) {
275 xfrm_state_put_afinfo(afinfo);
276 request_module("xfrm-type-%d-%d", family, proto);
277 modload_attempted = 1;
278 goto retry;
279 }
280
281 xfrm_state_put_afinfo(afinfo);
282 return type;
283 }
284
285 static void xfrm_put_type(const struct xfrm_type *type)
286 {
287 module_put(type->owner);
288 }
289
290 int xfrm_register_mode(struct xfrm_mode *mode, int family)
291 {
292 struct xfrm_state_afinfo *afinfo;
293 struct xfrm_mode **modemap;
294 int err;
295
296 if (unlikely(mode->encap >= XFRM_MODE_MAX))
297 return -EINVAL;
298
299 afinfo = xfrm_state_lock_afinfo(family);
300 if (unlikely(afinfo == NULL))
301 return -EAFNOSUPPORT;
302
303 err = -EEXIST;
304 modemap = afinfo->mode_map;
305 if (modemap[mode->encap])
306 goto out;
307
308 err = -ENOENT;
309 if (!try_module_get(afinfo->owner))
310 goto out;
311
312 mode->afinfo = afinfo;
313 modemap[mode->encap] = mode;
314 err = 0;
315
316 out:
317 xfrm_state_unlock_afinfo(afinfo);
318 return err;
319 }
320 EXPORT_SYMBOL(xfrm_register_mode);
321
322 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
323 {
324 struct xfrm_state_afinfo *afinfo;
325 struct xfrm_mode **modemap;
326 int err;
327
328 if (unlikely(mode->encap >= XFRM_MODE_MAX))
329 return -EINVAL;
330
331 afinfo = xfrm_state_lock_afinfo(family);
332 if (unlikely(afinfo == NULL))
333 return -EAFNOSUPPORT;
334
335 err = -ENOENT;
336 modemap = afinfo->mode_map;
337 if (likely(modemap[mode->encap] == mode)) {
338 modemap[mode->encap] = NULL;
339 module_put(mode->afinfo->owner);
340 err = 0;
341 }
342
343 xfrm_state_unlock_afinfo(afinfo);
344 return err;
345 }
346 EXPORT_SYMBOL(xfrm_unregister_mode);
347
348 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
349 {
350 struct xfrm_state_afinfo *afinfo;
351 struct xfrm_mode *mode;
352 int modload_attempted = 0;
353
354 if (unlikely(encap >= XFRM_MODE_MAX))
355 return NULL;
356
357 retry:
358 afinfo = xfrm_state_get_afinfo(family);
359 if (unlikely(afinfo == NULL))
360 return NULL;
361
362 mode = afinfo->mode_map[encap];
363 if (unlikely(mode && !try_module_get(mode->owner)))
364 mode = NULL;
365 if (!mode && !modload_attempted) {
366 xfrm_state_put_afinfo(afinfo);
367 request_module("xfrm-mode-%d-%d", family, encap);
368 modload_attempted = 1;
369 goto retry;
370 }
371
372 xfrm_state_put_afinfo(afinfo);
373 return mode;
374 }
375
376 static void xfrm_put_mode(struct xfrm_mode *mode)
377 {
378 module_put(mode->owner);
379 }
380
381 static void xfrm_state_gc_destroy(struct xfrm_state *x)
382 {
383 del_timer_sync(&x->timer);
384 del_timer_sync(&x->rtimer);
385 kfree(x->aalg);
386 kfree(x->ealg);
387 kfree(x->calg);
388 kfree(x->encap);
389 kfree(x->coaddr);
390 if (x->inner_mode)
391 xfrm_put_mode(x->inner_mode);
392 if (x->inner_mode_iaf)
393 xfrm_put_mode(x->inner_mode_iaf);
394 if (x->outer_mode)
395 xfrm_put_mode(x->outer_mode);
396 if (x->type) {
397 x->type->destructor(x);
398 xfrm_put_type(x->type);
399 }
400 security_xfrm_state_free(x);
401 kfree(x);
402 }
403
404 static void xfrm_state_gc_task(struct work_struct *data)
405 {
406 struct xfrm_state *x;
407 struct hlist_node *entry, *tmp;
408 struct hlist_head gc_list;
409
410 spin_lock_bh(&xfrm_state_gc_lock);
411 gc_list.first = xfrm_state_gc_list.first;
412 INIT_HLIST_HEAD(&xfrm_state_gc_list);
413 spin_unlock_bh(&xfrm_state_gc_lock);
414
415 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
416 xfrm_state_gc_destroy(x);
417
418 wake_up(&km_waitq);
419 }
420
421 static inline unsigned long make_jiffies(long secs)
422 {
423 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
424 return MAX_SCHEDULE_TIMEOUT-1;
425 else
426 return secs*HZ;
427 }
428
429 static void xfrm_timer_handler(unsigned long data)
430 {
431 struct xfrm_state *x = (struct xfrm_state*)data;
432 unsigned long now = get_seconds();
433 long next = LONG_MAX;
434 int warn = 0;
435 int err = 0;
436
437 spin_lock(&x->lock);
438 if (x->km.state == XFRM_STATE_DEAD)
439 goto out;
440 if (x->km.state == XFRM_STATE_EXPIRED)
441 goto expired;
442 if (x->lft.hard_add_expires_seconds) {
443 long tmo = x->lft.hard_add_expires_seconds +
444 x->curlft.add_time - now;
445 if (tmo <= 0)
446 goto expired;
447 if (tmo < next)
448 next = tmo;
449 }
450 if (x->lft.hard_use_expires_seconds) {
451 long tmo = x->lft.hard_use_expires_seconds +
452 (x->curlft.use_time ? : now) - now;
453 if (tmo <= 0)
454 goto expired;
455 if (tmo < next)
456 next = tmo;
457 }
458 if (x->km.dying)
459 goto resched;
460 if (x->lft.soft_add_expires_seconds) {
461 long tmo = x->lft.soft_add_expires_seconds +
462 x->curlft.add_time - now;
463 if (tmo <= 0)
464 warn = 1;
465 else if (tmo < next)
466 next = tmo;
467 }
468 if (x->lft.soft_use_expires_seconds) {
469 long tmo = x->lft.soft_use_expires_seconds +
470 (x->curlft.use_time ? : now) - now;
471 if (tmo <= 0)
472 warn = 1;
473 else if (tmo < next)
474 next = tmo;
475 }
476
477 x->km.dying = warn;
478 if (warn)
479 km_state_expired(x, 0, 0);
480 resched:
481 if (next != LONG_MAX)
482 mod_timer(&x->timer, jiffies + make_jiffies(next));
483
484 goto out;
485
486 expired:
487 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
488 x->km.state = XFRM_STATE_EXPIRED;
489 wake_up(&km_waitq);
490 next = 2;
491 goto resched;
492 }
493
494 err = __xfrm_state_delete(x);
495 if (!err && x->id.spi)
496 km_state_expired(x, 1, 0);
497
498 xfrm_audit_state_delete(x, err ? 0 : 1,
499 audit_get_loginuid(current),
500 audit_get_sessionid(current), 0);
501
502 out:
503 spin_unlock(&x->lock);
504 }
505
506 static void xfrm_replay_timer_handler(unsigned long data);
507
508 struct xfrm_state *xfrm_state_alloc(void)
509 {
510 struct xfrm_state *x;
511
512 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
513
514 if (x) {
515 atomic_set(&x->refcnt, 1);
516 atomic_set(&x->tunnel_users, 0);
517 INIT_LIST_HEAD(&x->all);
518 INIT_HLIST_NODE(&x->bydst);
519 INIT_HLIST_NODE(&x->bysrc);
520 INIT_HLIST_NODE(&x->byspi);
521 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
522 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
523 (unsigned long)x);
524 x->curlft.add_time = get_seconds();
525 x->lft.soft_byte_limit = XFRM_INF;
526 x->lft.soft_packet_limit = XFRM_INF;
527 x->lft.hard_byte_limit = XFRM_INF;
528 x->lft.hard_packet_limit = XFRM_INF;
529 x->replay_maxage = 0;
530 x->replay_maxdiff = 0;
531 x->inner_mode = NULL;
532 x->inner_mode_iaf = NULL;
533 spin_lock_init(&x->lock);
534 }
535 return x;
536 }
537 EXPORT_SYMBOL(xfrm_state_alloc);
538
539 void __xfrm_state_destroy(struct xfrm_state *x)
540 {
541 WARN_ON(x->km.state != XFRM_STATE_DEAD);
542
543 spin_lock_bh(&xfrm_state_lock);
544 list_del(&x->all);
545 spin_unlock_bh(&xfrm_state_lock);
546
547 spin_lock_bh(&xfrm_state_gc_lock);
548 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
549 spin_unlock_bh(&xfrm_state_gc_lock);
550 schedule_work(&xfrm_state_gc_work);
551 }
552 EXPORT_SYMBOL(__xfrm_state_destroy);
553
554 int __xfrm_state_delete(struct xfrm_state *x)
555 {
556 int err = -ESRCH;
557
558 if (x->km.state != XFRM_STATE_DEAD) {
559 x->km.state = XFRM_STATE_DEAD;
560 spin_lock(&xfrm_state_lock);
561 hlist_del(&x->bydst);
562 hlist_del(&x->bysrc);
563 if (x->id.spi)
564 hlist_del(&x->byspi);
565 xfrm_state_num--;
566 spin_unlock(&xfrm_state_lock);
567
568 /* All xfrm_state objects are created by xfrm_state_alloc.
569 * The xfrm_state_alloc call gives a reference, and that
570 * is what we are dropping here.
571 */
572 xfrm_state_put(x);
573 err = 0;
574 }
575
576 return err;
577 }
578 EXPORT_SYMBOL(__xfrm_state_delete);
579
580 int xfrm_state_delete(struct xfrm_state *x)
581 {
582 int err;
583
584 spin_lock_bh(&x->lock);
585 err = __xfrm_state_delete(x);
586 spin_unlock_bh(&x->lock);
587
588 return err;
589 }
590 EXPORT_SYMBOL(xfrm_state_delete);
591
592 #ifdef CONFIG_SECURITY_NETWORK_XFRM
593 static inline int
594 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
595 {
596 int i, err = 0;
597
598 for (i = 0; i <= xfrm_state_hmask; i++) {
599 struct hlist_node *entry;
600 struct xfrm_state *x;
601
602 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
603 if (xfrm_id_proto_match(x->id.proto, proto) &&
604 (err = security_xfrm_state_delete(x)) != 0) {
605 xfrm_audit_state_delete(x, 0,
606 audit_info->loginuid,
607 audit_info->sessionid,
608 audit_info->secid);
609 return err;
610 }
611 }
612 }
613
614 return err;
615 }
616 #else
617 static inline int
618 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
619 {
620 return 0;
621 }
622 #endif
623
624 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
625 {
626 int i, err = 0;
627
628 spin_lock_bh(&xfrm_state_lock);
629 err = xfrm_state_flush_secctx_check(proto, audit_info);
630 if (err)
631 goto out;
632
633 for (i = 0; i <= xfrm_state_hmask; i++) {
634 struct hlist_node *entry;
635 struct xfrm_state *x;
636 restart:
637 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
638 if (!xfrm_state_kern(x) &&
639 xfrm_id_proto_match(x->id.proto, proto)) {
640 xfrm_state_hold(x);
641 spin_unlock_bh(&xfrm_state_lock);
642
643 err = xfrm_state_delete(x);
644 xfrm_audit_state_delete(x, err ? 0 : 1,
645 audit_info->loginuid,
646 audit_info->sessionid,
647 audit_info->secid);
648 xfrm_state_put(x);
649
650 spin_lock_bh(&xfrm_state_lock);
651 goto restart;
652 }
653 }
654 }
655 err = 0;
656
657 out:
658 spin_unlock_bh(&xfrm_state_lock);
659 wake_up(&km_waitq);
660 return err;
661 }
662 EXPORT_SYMBOL(xfrm_state_flush);
663
664 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
665 {
666 spin_lock_bh(&xfrm_state_lock);
667 si->sadcnt = xfrm_state_num;
668 si->sadhcnt = xfrm_state_hmask;
669 si->sadhmcnt = xfrm_state_hashmax;
670 spin_unlock_bh(&xfrm_state_lock);
671 }
672 EXPORT_SYMBOL(xfrm_sad_getinfo);
673
674 static int
675 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
676 struct xfrm_tmpl *tmpl,
677 xfrm_address_t *daddr, xfrm_address_t *saddr,
678 unsigned short family)
679 {
680 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
681 if (!afinfo)
682 return -1;
683 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
684 xfrm_state_put_afinfo(afinfo);
685 return 0;
686 }
687
688 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
689 {
690 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
691 struct xfrm_state *x;
692 struct hlist_node *entry;
693
694 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
695 if (x->props.family != family ||
696 x->id.spi != spi ||
697 x->id.proto != proto)
698 continue;
699
700 switch (family) {
701 case AF_INET:
702 if (x->id.daddr.a4 != daddr->a4)
703 continue;
704 break;
705 case AF_INET6:
706 if (!ipv6_addr_equal((struct in6_addr *)daddr,
707 (struct in6_addr *)
708 x->id.daddr.a6))
709 continue;
710 break;
711 }
712
713 xfrm_state_hold(x);
714 return x;
715 }
716
717 return NULL;
718 }
719
720 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
721 {
722 unsigned int h = xfrm_src_hash(daddr, saddr, family);
723 struct xfrm_state *x;
724 struct hlist_node *entry;
725
726 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
727 if (x->props.family != family ||
728 x->id.proto != proto)
729 continue;
730
731 switch (family) {
732 case AF_INET:
733 if (x->id.daddr.a4 != daddr->a4 ||
734 x->props.saddr.a4 != saddr->a4)
735 continue;
736 break;
737 case AF_INET6:
738 if (!ipv6_addr_equal((struct in6_addr *)daddr,
739 (struct in6_addr *)
740 x->id.daddr.a6) ||
741 !ipv6_addr_equal((struct in6_addr *)saddr,
742 (struct in6_addr *)
743 x->props.saddr.a6))
744 continue;
745 break;
746 }
747
748 xfrm_state_hold(x);
749 return x;
750 }
751
752 return NULL;
753 }
754
755 static inline struct xfrm_state *
756 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
757 {
758 if (use_spi)
759 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
760 x->id.proto, family);
761 else
762 return __xfrm_state_lookup_byaddr(&x->id.daddr,
763 &x->props.saddr,
764 x->id.proto, family);
765 }
766
767 static void xfrm_hash_grow_check(int have_hash_collision)
768 {
769 if (have_hash_collision &&
770 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
771 xfrm_state_num > xfrm_state_hmask)
772 schedule_work(&xfrm_hash_work);
773 }
774
775 struct xfrm_state *
776 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
777 struct flowi *fl, struct xfrm_tmpl *tmpl,
778 struct xfrm_policy *pol, int *err,
779 unsigned short family)
780 {
781 unsigned int h;
782 struct hlist_node *entry;
783 struct xfrm_state *x, *x0, *to_put;
784 int acquire_in_progress = 0;
785 int error = 0;
786 struct xfrm_state *best = NULL;
787
788 to_put = NULL;
789
790 spin_lock_bh(&xfrm_state_lock);
791 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
792 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
793 if (x->props.family == family &&
794 x->props.reqid == tmpl->reqid &&
795 !(x->props.flags & XFRM_STATE_WILDRECV) &&
796 xfrm_state_addr_check(x, daddr, saddr, family) &&
797 tmpl->mode == x->props.mode &&
798 tmpl->id.proto == x->id.proto &&
799 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
800 /* Resolution logic:
801 1. There is a valid state with matching selector.
802 Done.
803 2. Valid state with inappropriate selector. Skip.
804
805 Entering area of "sysdeps".
806
807 3. If state is not valid, selector is temporary,
808 it selects only session which triggered
809 previous resolution. Key manager will do
810 something to install a state with proper
811 selector.
812 */
813 if (x->km.state == XFRM_STATE_VALID) {
814 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
815 !security_xfrm_state_pol_flow_match(x, pol, fl))
816 continue;
817 if (!best ||
818 best->km.dying > x->km.dying ||
819 (best->km.dying == x->km.dying &&
820 best->curlft.add_time < x->curlft.add_time))
821 best = x;
822 } else if (x->km.state == XFRM_STATE_ACQ) {
823 acquire_in_progress = 1;
824 } else if (x->km.state == XFRM_STATE_ERROR ||
825 x->km.state == XFRM_STATE_EXPIRED) {
826 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
827 security_xfrm_state_pol_flow_match(x, pol, fl))
828 error = -ESRCH;
829 }
830 }
831 }
832
833 x = best;
834 if (!x && !error && !acquire_in_progress) {
835 if (tmpl->id.spi &&
836 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
837 tmpl->id.proto, family)) != NULL) {
838 to_put = x0;
839 error = -EEXIST;
840 goto out;
841 }
842 x = xfrm_state_alloc();
843 if (x == NULL) {
844 error = -ENOMEM;
845 goto out;
846 }
847 /* Initialize temporary selector matching only
848 * to current session. */
849 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
850
851 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
852 if (error) {
853 x->km.state = XFRM_STATE_DEAD;
854 to_put = x;
855 x = NULL;
856 goto out;
857 }
858
859 if (km_query(x, tmpl, pol) == 0) {
860 x->km.state = XFRM_STATE_ACQ;
861 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
862 h = xfrm_src_hash(daddr, saddr, family);
863 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
864 if (x->id.spi) {
865 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
866 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
867 }
868 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
869 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
870 add_timer(&x->timer);
871 xfrm_state_num++;
872 xfrm_hash_grow_check(x->bydst.next != NULL);
873 } else {
874 x->km.state = XFRM_STATE_DEAD;
875 to_put = x;
876 x = NULL;
877 error = -ESRCH;
878 }
879 }
880 out:
881 if (x)
882 xfrm_state_hold(x);
883 else
884 *err = acquire_in_progress ? -EAGAIN : error;
885 spin_unlock_bh(&xfrm_state_lock);
886 if (to_put)
887 xfrm_state_put(to_put);
888 return x;
889 }
890
891 struct xfrm_state *
892 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
893 unsigned short family, u8 mode, u8 proto, u32 reqid)
894 {
895 unsigned int h;
896 struct xfrm_state *rx = NULL, *x = NULL;
897 struct hlist_node *entry;
898
899 spin_lock(&xfrm_state_lock);
900 h = xfrm_dst_hash(daddr, saddr, reqid, family);
901 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
902 if (x->props.family == family &&
903 x->props.reqid == reqid &&
904 !(x->props.flags & XFRM_STATE_WILDRECV) &&
905 xfrm_state_addr_check(x, daddr, saddr, family) &&
906 mode == x->props.mode &&
907 proto == x->id.proto &&
908 x->km.state == XFRM_STATE_VALID) {
909 rx = x;
910 break;
911 }
912 }
913
914 if (rx)
915 xfrm_state_hold(rx);
916 spin_unlock(&xfrm_state_lock);
917
918
919 return rx;
920 }
921 EXPORT_SYMBOL(xfrm_stateonly_find);
922
923 static void __xfrm_state_insert(struct xfrm_state *x)
924 {
925 unsigned int h;
926
927 x->genid = ++xfrm_state_genid;
928
929 list_add_tail(&x->all, &xfrm_state_all);
930
931 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
932 x->props.reqid, x->props.family);
933 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
934
935 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
936 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
937
938 if (x->id.spi) {
939 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
940 x->props.family);
941
942 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
943 }
944
945 mod_timer(&x->timer, jiffies + HZ);
946 if (x->replay_maxage)
947 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
948
949 wake_up(&km_waitq);
950
951 xfrm_state_num++;
952
953 xfrm_hash_grow_check(x->bydst.next != NULL);
954 }
955
956 /* xfrm_state_lock is held */
957 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
958 {
959 unsigned short family = xnew->props.family;
960 u32 reqid = xnew->props.reqid;
961 struct xfrm_state *x;
962 struct hlist_node *entry;
963 unsigned int h;
964
965 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
966 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
967 if (x->props.family == family &&
968 x->props.reqid == reqid &&
969 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
970 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
971 x->genid = xfrm_state_genid;
972 }
973 }
974
975 void xfrm_state_insert(struct xfrm_state *x)
976 {
977 spin_lock_bh(&xfrm_state_lock);
978 __xfrm_state_bump_genids(x);
979 __xfrm_state_insert(x);
980 spin_unlock_bh(&xfrm_state_lock);
981 }
982 EXPORT_SYMBOL(xfrm_state_insert);
983
984 /* xfrm_state_lock is held */
985 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
986 {
987 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
988 struct hlist_node *entry;
989 struct xfrm_state *x;
990
991 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
992 if (x->props.reqid != reqid ||
993 x->props.mode != mode ||
994 x->props.family != family ||
995 x->km.state != XFRM_STATE_ACQ ||
996 x->id.spi != 0 ||
997 x->id.proto != proto)
998 continue;
999
1000 switch (family) {
1001 case AF_INET:
1002 if (x->id.daddr.a4 != daddr->a4 ||
1003 x->props.saddr.a4 != saddr->a4)
1004 continue;
1005 break;
1006 case AF_INET6:
1007 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
1008 (struct in6_addr *)daddr) ||
1009 !ipv6_addr_equal((struct in6_addr *)
1010 x->props.saddr.a6,
1011 (struct in6_addr *)saddr))
1012 continue;
1013 break;
1014 }
1015
1016 xfrm_state_hold(x);
1017 return x;
1018 }
1019
1020 if (!create)
1021 return NULL;
1022
1023 x = xfrm_state_alloc();
1024 if (likely(x)) {
1025 switch (family) {
1026 case AF_INET:
1027 x->sel.daddr.a4 = daddr->a4;
1028 x->sel.saddr.a4 = saddr->a4;
1029 x->sel.prefixlen_d = 32;
1030 x->sel.prefixlen_s = 32;
1031 x->props.saddr.a4 = saddr->a4;
1032 x->id.daddr.a4 = daddr->a4;
1033 break;
1034
1035 case AF_INET6:
1036 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1037 (struct in6_addr *)daddr);
1038 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1039 (struct in6_addr *)saddr);
1040 x->sel.prefixlen_d = 128;
1041 x->sel.prefixlen_s = 128;
1042 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1043 (struct in6_addr *)saddr);
1044 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1045 (struct in6_addr *)daddr);
1046 break;
1047 }
1048
1049 x->km.state = XFRM_STATE_ACQ;
1050 x->id.proto = proto;
1051 x->props.family = family;
1052 x->props.mode = mode;
1053 x->props.reqid = reqid;
1054 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1055 xfrm_state_hold(x);
1056 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1057 add_timer(&x->timer);
1058 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1059 h = xfrm_src_hash(daddr, saddr, family);
1060 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1061
1062 xfrm_state_num++;
1063
1064 xfrm_hash_grow_check(x->bydst.next != NULL);
1065 }
1066
1067 return x;
1068 }
1069
1070 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1071
1072 int xfrm_state_add(struct xfrm_state *x)
1073 {
1074 struct xfrm_state *x1, *to_put;
1075 int family;
1076 int err;
1077 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1078
1079 family = x->props.family;
1080
1081 to_put = NULL;
1082
1083 spin_lock_bh(&xfrm_state_lock);
1084
1085 x1 = __xfrm_state_locate(x, use_spi, family);
1086 if (x1) {
1087 to_put = x1;
1088 x1 = NULL;
1089 err = -EEXIST;
1090 goto out;
1091 }
1092
1093 if (use_spi && x->km.seq) {
1094 x1 = __xfrm_find_acq_byseq(x->km.seq);
1095 if (x1 && ((x1->id.proto != x->id.proto) ||
1096 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1097 to_put = x1;
1098 x1 = NULL;
1099 }
1100 }
1101
1102 if (use_spi && !x1)
1103 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1104 x->id.proto,
1105 &x->id.daddr, &x->props.saddr, 0);
1106
1107 __xfrm_state_bump_genids(x);
1108 __xfrm_state_insert(x);
1109 err = 0;
1110
1111 out:
1112 spin_unlock_bh(&xfrm_state_lock);
1113
1114 if (x1) {
1115 xfrm_state_delete(x1);
1116 xfrm_state_put(x1);
1117 }
1118
1119 if (to_put)
1120 xfrm_state_put(to_put);
1121
1122 return err;
1123 }
1124 EXPORT_SYMBOL(xfrm_state_add);
1125
1126 #ifdef CONFIG_XFRM_MIGRATE
1127 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1128 {
1129 int err = -ENOMEM;
1130 struct xfrm_state *x = xfrm_state_alloc();
1131 if (!x)
1132 goto error;
1133
1134 memcpy(&x->id, &orig->id, sizeof(x->id));
1135 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1136 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1137 x->props.mode = orig->props.mode;
1138 x->props.replay_window = orig->props.replay_window;
1139 x->props.reqid = orig->props.reqid;
1140 x->props.family = orig->props.family;
1141 x->props.saddr = orig->props.saddr;
1142
1143 if (orig->aalg) {
1144 x->aalg = xfrm_algo_clone(orig->aalg);
1145 if (!x->aalg)
1146 goto error;
1147 }
1148 x->props.aalgo = orig->props.aalgo;
1149
1150 if (orig->ealg) {
1151 x->ealg = xfrm_algo_clone(orig->ealg);
1152 if (!x->ealg)
1153 goto error;
1154 }
1155 x->props.ealgo = orig->props.ealgo;
1156
1157 if (orig->calg) {
1158 x->calg = xfrm_algo_clone(orig->calg);
1159 if (!x->calg)
1160 goto error;
1161 }
1162 x->props.calgo = orig->props.calgo;
1163
1164 if (orig->encap) {
1165 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1166 if (!x->encap)
1167 goto error;
1168 }
1169
1170 if (orig->coaddr) {
1171 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1172 GFP_KERNEL);
1173 if (!x->coaddr)
1174 goto error;
1175 }
1176
1177 err = xfrm_init_state(x);
1178 if (err)
1179 goto error;
1180
1181 x->props.flags = orig->props.flags;
1182
1183 x->curlft.add_time = orig->curlft.add_time;
1184 x->km.state = orig->km.state;
1185 x->km.seq = orig->km.seq;
1186
1187 return x;
1188
1189 error:
1190 if (errp)
1191 *errp = err;
1192 if (x) {
1193 kfree(x->aalg);
1194 kfree(x->ealg);
1195 kfree(x->calg);
1196 kfree(x->encap);
1197 kfree(x->coaddr);
1198 }
1199 kfree(x);
1200 return NULL;
1201 }
1202
1203 /* xfrm_state_lock is held */
1204 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1205 {
1206 unsigned int h;
1207 struct xfrm_state *x;
1208 struct hlist_node *entry;
1209
1210 if (m->reqid) {
1211 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1212 m->reqid, m->old_family);
1213 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1214 if (x->props.mode != m->mode ||
1215 x->id.proto != m->proto)
1216 continue;
1217 if (m->reqid && x->props.reqid != m->reqid)
1218 continue;
1219 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1220 m->old_family) ||
1221 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1222 m->old_family))
1223 continue;
1224 xfrm_state_hold(x);
1225 return x;
1226 }
1227 } else {
1228 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1229 m->old_family);
1230 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1231 if (x->props.mode != m->mode ||
1232 x->id.proto != m->proto)
1233 continue;
1234 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1235 m->old_family) ||
1236 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1237 m->old_family))
1238 continue;
1239 xfrm_state_hold(x);
1240 return x;
1241 }
1242 }
1243
1244 return NULL;
1245 }
1246 EXPORT_SYMBOL(xfrm_migrate_state_find);
1247
1248 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1249 struct xfrm_migrate *m)
1250 {
1251 struct xfrm_state *xc;
1252 int err;
1253
1254 xc = xfrm_state_clone(x, &err);
1255 if (!xc)
1256 return NULL;
1257
1258 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1259 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1260
1261 /* add state */
1262 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1263 /* a care is needed when the destination address of the
1264 state is to be updated as it is a part of triplet */
1265 xfrm_state_insert(xc);
1266 } else {
1267 if ((err = xfrm_state_add(xc)) < 0)
1268 goto error;
1269 }
1270
1271 return xc;
1272 error:
1273 kfree(xc);
1274 return NULL;
1275 }
1276 EXPORT_SYMBOL(xfrm_state_migrate);
1277 #endif
1278
1279 int xfrm_state_update(struct xfrm_state *x)
1280 {
1281 struct xfrm_state *x1, *to_put;
1282 int err;
1283 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1284
1285 to_put = NULL;
1286
1287 spin_lock_bh(&xfrm_state_lock);
1288 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1289
1290 err = -ESRCH;
1291 if (!x1)
1292 goto out;
1293
1294 if (xfrm_state_kern(x1)) {
1295 to_put = x1;
1296 err = -EEXIST;
1297 goto out;
1298 }
1299
1300 if (x1->km.state == XFRM_STATE_ACQ) {
1301 __xfrm_state_insert(x);
1302 x = NULL;
1303 }
1304 err = 0;
1305
1306 out:
1307 spin_unlock_bh(&xfrm_state_lock);
1308
1309 if (to_put)
1310 xfrm_state_put(to_put);
1311
1312 if (err)
1313 return err;
1314
1315 if (!x) {
1316 xfrm_state_delete(x1);
1317 xfrm_state_put(x1);
1318 return 0;
1319 }
1320
1321 err = -EINVAL;
1322 spin_lock_bh(&x1->lock);
1323 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1324 if (x->encap && x1->encap)
1325 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1326 if (x->coaddr && x1->coaddr) {
1327 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1328 }
1329 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1330 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1331 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1332 x1->km.dying = 0;
1333
1334 mod_timer(&x1->timer, jiffies + HZ);
1335 if (x1->curlft.use_time)
1336 xfrm_state_check_expire(x1);
1337
1338 err = 0;
1339 }
1340 spin_unlock_bh(&x1->lock);
1341
1342 xfrm_state_put(x1);
1343
1344 return err;
1345 }
1346 EXPORT_SYMBOL(xfrm_state_update);
1347
1348 int xfrm_state_check_expire(struct xfrm_state *x)
1349 {
1350 if (!x->curlft.use_time)
1351 x->curlft.use_time = get_seconds();
1352
1353 if (x->km.state != XFRM_STATE_VALID)
1354 return -EINVAL;
1355
1356 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1357 x->curlft.packets >= x->lft.hard_packet_limit) {
1358 x->km.state = XFRM_STATE_EXPIRED;
1359 mod_timer(&x->timer, jiffies);
1360 return -EINVAL;
1361 }
1362
1363 if (!x->km.dying &&
1364 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1365 x->curlft.packets >= x->lft.soft_packet_limit)) {
1366 x->km.dying = 1;
1367 km_state_expired(x, 0, 0);
1368 }
1369 return 0;
1370 }
1371 EXPORT_SYMBOL(xfrm_state_check_expire);
1372
1373 struct xfrm_state *
1374 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1375 unsigned short family)
1376 {
1377 struct xfrm_state *x;
1378
1379 spin_lock_bh(&xfrm_state_lock);
1380 x = __xfrm_state_lookup(daddr, spi, proto, family);
1381 spin_unlock_bh(&xfrm_state_lock);
1382 return x;
1383 }
1384 EXPORT_SYMBOL(xfrm_state_lookup);
1385
1386 struct xfrm_state *
1387 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1388 u8 proto, unsigned short family)
1389 {
1390 struct xfrm_state *x;
1391
1392 spin_lock_bh(&xfrm_state_lock);
1393 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1394 spin_unlock_bh(&xfrm_state_lock);
1395 return x;
1396 }
1397 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1398
1399 struct xfrm_state *
1400 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1401 xfrm_address_t *daddr, xfrm_address_t *saddr,
1402 int create, unsigned short family)
1403 {
1404 struct xfrm_state *x;
1405
1406 spin_lock_bh(&xfrm_state_lock);
1407 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1408 spin_unlock_bh(&xfrm_state_lock);
1409
1410 return x;
1411 }
1412 EXPORT_SYMBOL(xfrm_find_acq);
1413
1414 #ifdef CONFIG_XFRM_SUB_POLICY
1415 int
1416 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1417 unsigned short family)
1418 {
1419 int err = 0;
1420 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1421 if (!afinfo)
1422 return -EAFNOSUPPORT;
1423
1424 spin_lock_bh(&xfrm_state_lock);
1425 if (afinfo->tmpl_sort)
1426 err = afinfo->tmpl_sort(dst, src, n);
1427 spin_unlock_bh(&xfrm_state_lock);
1428 xfrm_state_put_afinfo(afinfo);
1429 return err;
1430 }
1431 EXPORT_SYMBOL(xfrm_tmpl_sort);
1432
1433 int
1434 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1435 unsigned short family)
1436 {
1437 int err = 0;
1438 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1439 if (!afinfo)
1440 return -EAFNOSUPPORT;
1441
1442 spin_lock_bh(&xfrm_state_lock);
1443 if (afinfo->state_sort)
1444 err = afinfo->state_sort(dst, src, n);
1445 spin_unlock_bh(&xfrm_state_lock);
1446 xfrm_state_put_afinfo(afinfo);
1447 return err;
1448 }
1449 EXPORT_SYMBOL(xfrm_state_sort);
1450 #endif
1451
1452 /* Silly enough, but I'm lazy to build resolution list */
1453
1454 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1455 {
1456 int i;
1457
1458 for (i = 0; i <= xfrm_state_hmask; i++) {
1459 struct hlist_node *entry;
1460 struct xfrm_state *x;
1461
1462 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1463 if (x->km.seq == seq &&
1464 x->km.state == XFRM_STATE_ACQ) {
1465 xfrm_state_hold(x);
1466 return x;
1467 }
1468 }
1469 }
1470 return NULL;
1471 }
1472
1473 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1474 {
1475 struct xfrm_state *x;
1476
1477 spin_lock_bh(&xfrm_state_lock);
1478 x = __xfrm_find_acq_byseq(seq);
1479 spin_unlock_bh(&xfrm_state_lock);
1480 return x;
1481 }
1482 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1483
1484 u32 xfrm_get_acqseq(void)
1485 {
1486 u32 res;
1487 static u32 acqseq;
1488 static DEFINE_SPINLOCK(acqseq_lock);
1489
1490 spin_lock_bh(&acqseq_lock);
1491 res = (++acqseq ? : ++acqseq);
1492 spin_unlock_bh(&acqseq_lock);
1493 return res;
1494 }
1495 EXPORT_SYMBOL(xfrm_get_acqseq);
1496
1497 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1498 {
1499 unsigned int h;
1500 struct xfrm_state *x0;
1501 int err = -ENOENT;
1502 __be32 minspi = htonl(low);
1503 __be32 maxspi = htonl(high);
1504
1505 spin_lock_bh(&x->lock);
1506 if (x->km.state == XFRM_STATE_DEAD)
1507 goto unlock;
1508
1509 err = 0;
1510 if (x->id.spi)
1511 goto unlock;
1512
1513 err = -ENOENT;
1514
1515 if (minspi == maxspi) {
1516 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1517 if (x0) {
1518 xfrm_state_put(x0);
1519 goto unlock;
1520 }
1521 x->id.spi = minspi;
1522 } else {
1523 u32 spi = 0;
1524 for (h=0; h<high-low+1; h++) {
1525 spi = low + net_random()%(high-low+1);
1526 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1527 if (x0 == NULL) {
1528 x->id.spi = htonl(spi);
1529 break;
1530 }
1531 xfrm_state_put(x0);
1532 }
1533 }
1534 if (x->id.spi) {
1535 spin_lock_bh(&xfrm_state_lock);
1536 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1537 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1538 spin_unlock_bh(&xfrm_state_lock);
1539
1540 err = 0;
1541 }
1542
1543 unlock:
1544 spin_unlock_bh(&x->lock);
1545
1546 return err;
1547 }
1548 EXPORT_SYMBOL(xfrm_alloc_spi);
1549
1550 int xfrm_state_walk(struct xfrm_state_walk *walk,
1551 int (*func)(struct xfrm_state *, int, void*),
1552 void *data)
1553 {
1554 struct xfrm_state *old, *x, *last = NULL;
1555 int err = 0;
1556
1557 if (walk->state == NULL && walk->count != 0)
1558 return 0;
1559
1560 old = x = walk->state;
1561 walk->state = NULL;
1562 spin_lock_bh(&xfrm_state_lock);
1563 if (x == NULL)
1564 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1565 list_for_each_entry_from(x, &xfrm_state_all, all) {
1566 if (x->km.state == XFRM_STATE_DEAD)
1567 continue;
1568 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1569 continue;
1570 if (last) {
1571 err = func(last, walk->count, data);
1572 if (err) {
1573 xfrm_state_hold(last);
1574 walk->state = last;
1575 goto out;
1576 }
1577 }
1578 last = x;
1579 walk->count++;
1580 }
1581 if (walk->count == 0) {
1582 err = -ENOENT;
1583 goto out;
1584 }
1585 if (last)
1586 err = func(last, 0, data);
1587 out:
1588 spin_unlock_bh(&xfrm_state_lock);
1589 if (old != NULL)
1590 xfrm_state_put(old);
1591 return err;
1592 }
1593 EXPORT_SYMBOL(xfrm_state_walk);
1594
1595
1596 void xfrm_replay_notify(struct xfrm_state *x, int event)
1597 {
1598 struct km_event c;
1599 /* we send notify messages in case
1600 * 1. we updated on of the sequence numbers, and the seqno difference
1601 * is at least x->replay_maxdiff, in this case we also update the
1602 * timeout of our timer function
1603 * 2. if x->replay_maxage has elapsed since last update,
1604 * and there were changes
1605 *
1606 * The state structure must be locked!
1607 */
1608
1609 switch (event) {
1610 case XFRM_REPLAY_UPDATE:
1611 if (x->replay_maxdiff &&
1612 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1613 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1614 if (x->xflags & XFRM_TIME_DEFER)
1615 event = XFRM_REPLAY_TIMEOUT;
1616 else
1617 return;
1618 }
1619
1620 break;
1621
1622 case XFRM_REPLAY_TIMEOUT:
1623 if ((x->replay.seq == x->preplay.seq) &&
1624 (x->replay.bitmap == x->preplay.bitmap) &&
1625 (x->replay.oseq == x->preplay.oseq)) {
1626 x->xflags |= XFRM_TIME_DEFER;
1627 return;
1628 }
1629
1630 break;
1631 }
1632
1633 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1634 c.event = XFRM_MSG_NEWAE;
1635 c.data.aevent = event;
1636 km_state_notify(x, &c);
1637
1638 if (x->replay_maxage &&
1639 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1640 x->xflags &= ~XFRM_TIME_DEFER;
1641 }
1642
1643 static void xfrm_replay_timer_handler(unsigned long data)
1644 {
1645 struct xfrm_state *x = (struct xfrm_state*)data;
1646
1647 spin_lock(&x->lock);
1648
1649 if (x->km.state == XFRM_STATE_VALID) {
1650 if (xfrm_aevent_is_on())
1651 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1652 else
1653 x->xflags |= XFRM_TIME_DEFER;
1654 }
1655
1656 spin_unlock(&x->lock);
1657 }
1658
1659 int xfrm_replay_check(struct xfrm_state *x,
1660 struct sk_buff *skb, __be32 net_seq)
1661 {
1662 u32 diff;
1663 u32 seq = ntohl(net_seq);
1664
1665 if (unlikely(seq == 0))
1666 goto err;
1667
1668 if (likely(seq > x->replay.seq))
1669 return 0;
1670
1671 diff = x->replay.seq - seq;
1672 if (diff >= min_t(unsigned int, x->props.replay_window,
1673 sizeof(x->replay.bitmap) * 8)) {
1674 x->stats.replay_window++;
1675 goto err;
1676 }
1677
1678 if (x->replay.bitmap & (1U << diff)) {
1679 x->stats.replay++;
1680 goto err;
1681 }
1682 return 0;
1683
1684 err:
1685 xfrm_audit_state_replay(x, skb, net_seq);
1686 return -EINVAL;
1687 }
1688
1689 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1690 {
1691 u32 diff;
1692 u32 seq = ntohl(net_seq);
1693
1694 if (seq > x->replay.seq) {
1695 diff = seq - x->replay.seq;
1696 if (diff < x->props.replay_window)
1697 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1698 else
1699 x->replay.bitmap = 1;
1700 x->replay.seq = seq;
1701 } else {
1702 diff = x->replay.seq - seq;
1703 x->replay.bitmap |= (1U << diff);
1704 }
1705
1706 if (xfrm_aevent_is_on())
1707 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1708 }
1709
1710 static LIST_HEAD(xfrm_km_list);
1711 static DEFINE_RWLOCK(xfrm_km_lock);
1712
1713 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1714 {
1715 struct xfrm_mgr *km;
1716
1717 read_lock(&xfrm_km_lock);
1718 list_for_each_entry(km, &xfrm_km_list, list)
1719 if (km->notify_policy)
1720 km->notify_policy(xp, dir, c);
1721 read_unlock(&xfrm_km_lock);
1722 }
1723
1724 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1725 {
1726 struct xfrm_mgr *km;
1727 read_lock(&xfrm_km_lock);
1728 list_for_each_entry(km, &xfrm_km_list, list)
1729 if (km->notify)
1730 km->notify(x, c);
1731 read_unlock(&xfrm_km_lock);
1732 }
1733
1734 EXPORT_SYMBOL(km_policy_notify);
1735 EXPORT_SYMBOL(km_state_notify);
1736
1737 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1738 {
1739 struct km_event c;
1740
1741 c.data.hard = hard;
1742 c.pid = pid;
1743 c.event = XFRM_MSG_EXPIRE;
1744 km_state_notify(x, &c);
1745
1746 if (hard)
1747 wake_up(&km_waitq);
1748 }
1749
1750 EXPORT_SYMBOL(km_state_expired);
1751 /*
1752 * We send to all registered managers regardless of failure
1753 * We are happy with one success
1754 */
1755 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1756 {
1757 int err = -EINVAL, acqret;
1758 struct xfrm_mgr *km;
1759
1760 read_lock(&xfrm_km_lock);
1761 list_for_each_entry(km, &xfrm_km_list, list) {
1762 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1763 if (!acqret)
1764 err = acqret;
1765 }
1766 read_unlock(&xfrm_km_lock);
1767 return err;
1768 }
1769 EXPORT_SYMBOL(km_query);
1770
1771 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1772 {
1773 int err = -EINVAL;
1774 struct xfrm_mgr *km;
1775
1776 read_lock(&xfrm_km_lock);
1777 list_for_each_entry(km, &xfrm_km_list, list) {
1778 if (km->new_mapping)
1779 err = km->new_mapping(x, ipaddr, sport);
1780 if (!err)
1781 break;
1782 }
1783 read_unlock(&xfrm_km_lock);
1784 return err;
1785 }
1786 EXPORT_SYMBOL(km_new_mapping);
1787
1788 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1789 {
1790 struct km_event c;
1791
1792 c.data.hard = hard;
1793 c.pid = pid;
1794 c.event = XFRM_MSG_POLEXPIRE;
1795 km_policy_notify(pol, dir, &c);
1796
1797 if (hard)
1798 wake_up(&km_waitq);
1799 }
1800 EXPORT_SYMBOL(km_policy_expired);
1801
1802 #ifdef CONFIG_XFRM_MIGRATE
1803 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1804 struct xfrm_migrate *m, int num_migrate)
1805 {
1806 int err = -EINVAL;
1807 int ret;
1808 struct xfrm_mgr *km;
1809
1810 read_lock(&xfrm_km_lock);
1811 list_for_each_entry(km, &xfrm_km_list, list) {
1812 if (km->migrate) {
1813 ret = km->migrate(sel, dir, type, m, num_migrate);
1814 if (!ret)
1815 err = ret;
1816 }
1817 }
1818 read_unlock(&xfrm_km_lock);
1819 return err;
1820 }
1821 EXPORT_SYMBOL(km_migrate);
1822 #endif
1823
1824 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1825 {
1826 int err = -EINVAL;
1827 int ret;
1828 struct xfrm_mgr *km;
1829
1830 read_lock(&xfrm_km_lock);
1831 list_for_each_entry(km, &xfrm_km_list, list) {
1832 if (km->report) {
1833 ret = km->report(proto, sel, addr);
1834 if (!ret)
1835 err = ret;
1836 }
1837 }
1838 read_unlock(&xfrm_km_lock);
1839 return err;
1840 }
1841 EXPORT_SYMBOL(km_report);
1842
1843 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1844 {
1845 int err;
1846 u8 *data;
1847 struct xfrm_mgr *km;
1848 struct xfrm_policy *pol = NULL;
1849
1850 if (optlen <= 0 || optlen > PAGE_SIZE)
1851 return -EMSGSIZE;
1852
1853 data = kmalloc(optlen, GFP_KERNEL);
1854 if (!data)
1855 return -ENOMEM;
1856
1857 err = -EFAULT;
1858 if (copy_from_user(data, optval, optlen))
1859 goto out;
1860
1861 err = -EINVAL;
1862 read_lock(&xfrm_km_lock);
1863 list_for_each_entry(km, &xfrm_km_list, list) {
1864 pol = km->compile_policy(sk, optname, data,
1865 optlen, &err);
1866 if (err >= 0)
1867 break;
1868 }
1869 read_unlock(&xfrm_km_lock);
1870
1871 if (err >= 0) {
1872 xfrm_sk_policy_insert(sk, err, pol);
1873 xfrm_pol_put(pol);
1874 err = 0;
1875 }
1876
1877 out:
1878 kfree(data);
1879 return err;
1880 }
1881 EXPORT_SYMBOL(xfrm_user_policy);
1882
1883 int xfrm_register_km(struct xfrm_mgr *km)
1884 {
1885 write_lock_bh(&xfrm_km_lock);
1886 list_add_tail(&km->list, &xfrm_km_list);
1887 write_unlock_bh(&xfrm_km_lock);
1888 return 0;
1889 }
1890 EXPORT_SYMBOL(xfrm_register_km);
1891
1892 int xfrm_unregister_km(struct xfrm_mgr *km)
1893 {
1894 write_lock_bh(&xfrm_km_lock);
1895 list_del(&km->list);
1896 write_unlock_bh(&xfrm_km_lock);
1897 return 0;
1898 }
1899 EXPORT_SYMBOL(xfrm_unregister_km);
1900
1901 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1902 {
1903 int err = 0;
1904 if (unlikely(afinfo == NULL))
1905 return -EINVAL;
1906 if (unlikely(afinfo->family >= NPROTO))
1907 return -EAFNOSUPPORT;
1908 write_lock_bh(&xfrm_state_afinfo_lock);
1909 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1910 err = -ENOBUFS;
1911 else
1912 xfrm_state_afinfo[afinfo->family] = afinfo;
1913 write_unlock_bh(&xfrm_state_afinfo_lock);
1914 return err;
1915 }
1916 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1917
1918 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1919 {
1920 int err = 0;
1921 if (unlikely(afinfo == NULL))
1922 return -EINVAL;
1923 if (unlikely(afinfo->family >= NPROTO))
1924 return -EAFNOSUPPORT;
1925 write_lock_bh(&xfrm_state_afinfo_lock);
1926 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1927 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1928 err = -EINVAL;
1929 else
1930 xfrm_state_afinfo[afinfo->family] = NULL;
1931 }
1932 write_unlock_bh(&xfrm_state_afinfo_lock);
1933 return err;
1934 }
1935 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1936
1937 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1938 {
1939 struct xfrm_state_afinfo *afinfo;
1940 if (unlikely(family >= NPROTO))
1941 return NULL;
1942 read_lock(&xfrm_state_afinfo_lock);
1943 afinfo = xfrm_state_afinfo[family];
1944 if (unlikely(!afinfo))
1945 read_unlock(&xfrm_state_afinfo_lock);
1946 return afinfo;
1947 }
1948
1949 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1950 __releases(xfrm_state_afinfo_lock)
1951 {
1952 read_unlock(&xfrm_state_afinfo_lock);
1953 }
1954
1955 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1956 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1957 {
1958 if (x->tunnel) {
1959 struct xfrm_state *t = x->tunnel;
1960
1961 if (atomic_read(&t->tunnel_users) == 2)
1962 xfrm_state_delete(t);
1963 atomic_dec(&t->tunnel_users);
1964 xfrm_state_put(t);
1965 x->tunnel = NULL;
1966 }
1967 }
1968 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1969
1970 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1971 {
1972 int res;
1973
1974 spin_lock_bh(&x->lock);
1975 if (x->km.state == XFRM_STATE_VALID &&
1976 x->type && x->type->get_mtu)
1977 res = x->type->get_mtu(x, mtu);
1978 else
1979 res = mtu - x->props.header_len;
1980 spin_unlock_bh(&x->lock);
1981 return res;
1982 }
1983
1984 int xfrm_init_state(struct xfrm_state *x)
1985 {
1986 struct xfrm_state_afinfo *afinfo;
1987 struct xfrm_mode *inner_mode;
1988 int family = x->props.family;
1989 int err;
1990
1991 err = -EAFNOSUPPORT;
1992 afinfo = xfrm_state_get_afinfo(family);
1993 if (!afinfo)
1994 goto error;
1995
1996 err = 0;
1997 if (afinfo->init_flags)
1998 err = afinfo->init_flags(x);
1999
2000 xfrm_state_put_afinfo(afinfo);
2001
2002 if (err)
2003 goto error;
2004
2005 err = -EPROTONOSUPPORT;
2006
2007 if (x->sel.family != AF_UNSPEC) {
2008 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2009 if (inner_mode == NULL)
2010 goto error;
2011
2012 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2013 family != x->sel.family) {
2014 xfrm_put_mode(inner_mode);
2015 goto error;
2016 }
2017
2018 x->inner_mode = inner_mode;
2019 } else {
2020 struct xfrm_mode *inner_mode_iaf;
2021
2022 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2023 if (inner_mode == NULL)
2024 goto error;
2025
2026 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2027 xfrm_put_mode(inner_mode);
2028 goto error;
2029 }
2030
2031 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2032 if (inner_mode_iaf == NULL)
2033 goto error;
2034
2035 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2036 xfrm_put_mode(inner_mode_iaf);
2037 goto error;
2038 }
2039
2040 if (x->props.family == AF_INET) {
2041 x->inner_mode = inner_mode;
2042 x->inner_mode_iaf = inner_mode_iaf;
2043 } else {
2044 x->inner_mode = inner_mode_iaf;
2045 x->inner_mode_iaf = inner_mode;
2046 }
2047 }
2048
2049 x->type = xfrm_get_type(x->id.proto, family);
2050 if (x->type == NULL)
2051 goto error;
2052
2053 err = x->type->init_state(x);
2054 if (err)
2055 goto error;
2056
2057 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2058 if (x->outer_mode == NULL)
2059 goto error;
2060
2061 x->km.state = XFRM_STATE_VALID;
2062
2063 error:
2064 return err;
2065 }
2066
2067 EXPORT_SYMBOL(xfrm_init_state);
2068
2069 void __init xfrm_state_init(void)
2070 {
2071 unsigned int sz;
2072
2073 sz = sizeof(struct hlist_head) * 8;
2074
2075 xfrm_state_bydst = xfrm_hash_alloc(sz);
2076 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2077 xfrm_state_byspi = xfrm_hash_alloc(sz);
2078 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2079 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2080 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2081
2082 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2083 }
2084
2085 #ifdef CONFIG_AUDITSYSCALL
2086 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2087 struct audit_buffer *audit_buf)
2088 {
2089 struct xfrm_sec_ctx *ctx = x->security;
2090 u32 spi = ntohl(x->id.spi);
2091
2092 if (ctx)
2093 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2094 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2095
2096 switch(x->props.family) {
2097 case AF_INET:
2098 audit_log_format(audit_buf,
2099 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2100 NIPQUAD(x->props.saddr.a4),
2101 NIPQUAD(x->id.daddr.a4));
2102 break;
2103 case AF_INET6:
2104 audit_log_format(audit_buf,
2105 " src=" NIP6_FMT " dst=" NIP6_FMT,
2106 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2107 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2108 break;
2109 }
2110
2111 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2112 }
2113
2114 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2115 struct audit_buffer *audit_buf)
2116 {
2117 struct iphdr *iph4;
2118 struct ipv6hdr *iph6;
2119
2120 switch (family) {
2121 case AF_INET:
2122 iph4 = ip_hdr(skb);
2123 audit_log_format(audit_buf,
2124 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2125 NIPQUAD(iph4->saddr),
2126 NIPQUAD(iph4->daddr));
2127 break;
2128 case AF_INET6:
2129 iph6 = ipv6_hdr(skb);
2130 audit_log_format(audit_buf,
2131 " src=" NIP6_FMT " dst=" NIP6_FMT
2132 " flowlbl=0x%x%02x%02x",
2133 NIP6(iph6->saddr),
2134 NIP6(iph6->daddr),
2135 iph6->flow_lbl[0] & 0x0f,
2136 iph6->flow_lbl[1],
2137 iph6->flow_lbl[2]);
2138 break;
2139 }
2140 }
2141
2142 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2143 uid_t auid, u32 sessionid, u32 secid)
2144 {
2145 struct audit_buffer *audit_buf;
2146
2147 audit_buf = xfrm_audit_start("SAD-add");
2148 if (audit_buf == NULL)
2149 return;
2150 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2151 xfrm_audit_helper_sainfo(x, audit_buf);
2152 audit_log_format(audit_buf, " res=%u", result);
2153 audit_log_end(audit_buf);
2154 }
2155 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2156
2157 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2158 uid_t auid, u32 sessionid, u32 secid)
2159 {
2160 struct audit_buffer *audit_buf;
2161
2162 audit_buf = xfrm_audit_start("SAD-delete");
2163 if (audit_buf == NULL)
2164 return;
2165 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2166 xfrm_audit_helper_sainfo(x, audit_buf);
2167 audit_log_format(audit_buf, " res=%u", result);
2168 audit_log_end(audit_buf);
2169 }
2170 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2171
2172 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2173 struct sk_buff *skb)
2174 {
2175 struct audit_buffer *audit_buf;
2176 u32 spi;
2177
2178 audit_buf = xfrm_audit_start("SA-replay-overflow");
2179 if (audit_buf == NULL)
2180 return;
2181 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2182 /* don't record the sequence number because it's inherent in this kind
2183 * of audit message */
2184 spi = ntohl(x->id.spi);
2185 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2186 audit_log_end(audit_buf);
2187 }
2188 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2189
2190 static void xfrm_audit_state_replay(struct xfrm_state *x,
2191 struct sk_buff *skb, __be32 net_seq)
2192 {
2193 struct audit_buffer *audit_buf;
2194 u32 spi;
2195
2196 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2197 if (audit_buf == NULL)
2198 return;
2199 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2200 spi = ntohl(x->id.spi);
2201 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2202 spi, spi, ntohl(net_seq));
2203 audit_log_end(audit_buf);
2204 }
2205
2206 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2207 {
2208 struct audit_buffer *audit_buf;
2209
2210 audit_buf = xfrm_audit_start("SA-notfound");
2211 if (audit_buf == NULL)
2212 return;
2213 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2214 audit_log_end(audit_buf);
2215 }
2216 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2217
2218 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2219 __be32 net_spi, __be32 net_seq)
2220 {
2221 struct audit_buffer *audit_buf;
2222 u32 spi;
2223
2224 audit_buf = xfrm_audit_start("SA-notfound");
2225 if (audit_buf == NULL)
2226 return;
2227 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2228 spi = ntohl(net_spi);
2229 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2230 spi, spi, ntohl(net_seq));
2231 audit_log_end(audit_buf);
2232 }
2233 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2234
2235 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2236 struct sk_buff *skb, u8 proto)
2237 {
2238 struct audit_buffer *audit_buf;
2239 __be32 net_spi;
2240 __be32 net_seq;
2241
2242 audit_buf = xfrm_audit_start("SA-icv-failure");
2243 if (audit_buf == NULL)
2244 return;
2245 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2246 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2247 u32 spi = ntohl(net_spi);
2248 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2249 spi, spi, ntohl(net_seq));
2250 }
2251 audit_log_end(audit_buf);
2252 }
2253 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2254 #endif /* CONFIG_AUDITSYSCALL */