]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/xfrm/xfrm_state.c
f7c0951c9fd98003a4636d8149702eaf2e3fcc3d
[mirror_ubuntu-artful-kernel.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
23
24 #include "xfrm_hash.h"
25
26 struct sock *xfrm_nl;
27 EXPORT_SYMBOL(xfrm_nl);
28
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
31
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
34
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
36
37 /* Each xfrm_state may be linked to two tables:
38
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
42 */
43
44 static DEFINE_SPINLOCK(xfrm_state_lock);
45
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
48 *
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
51 */
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
59
60 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
61 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
62
63 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
64 xfrm_address_t *saddr,
65 u32 reqid,
66 unsigned short family)
67 {
68 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
69 }
70
71 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 unsigned short family)
74 {
75 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
76 }
77
78 static inline unsigned int
79 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
80 {
81 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
82 }
83
84 static void xfrm_hash_transfer(struct hlist_head *list,
85 struct hlist_head *ndsttable,
86 struct hlist_head *nsrctable,
87 struct hlist_head *nspitable,
88 unsigned int nhashmask)
89 {
90 struct hlist_node *entry, *tmp;
91 struct xfrm_state *x;
92
93 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
94 unsigned int h;
95
96 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
97 x->props.reqid, x->props.family,
98 nhashmask);
99 hlist_add_head(&x->bydst, ndsttable+h);
100
101 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
102 x->props.family,
103 nhashmask);
104 hlist_add_head(&x->bysrc, nsrctable+h);
105
106 if (x->id.spi) {
107 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
108 x->id.proto, x->props.family,
109 nhashmask);
110 hlist_add_head(&x->byspi, nspitable+h);
111 }
112 }
113 }
114
115 static unsigned long xfrm_hash_new_size(void)
116 {
117 return ((xfrm_state_hmask + 1) << 1) *
118 sizeof(struct hlist_head);
119 }
120
121 static DEFINE_MUTEX(hash_resize_mutex);
122
123 static void xfrm_hash_resize(struct work_struct *__unused)
124 {
125 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
126 unsigned long nsize, osize;
127 unsigned int nhashmask, ohashmask;
128 int i;
129
130 mutex_lock(&hash_resize_mutex);
131
132 nsize = xfrm_hash_new_size();
133 ndst = xfrm_hash_alloc(nsize);
134 if (!ndst)
135 goto out_unlock;
136 nsrc = xfrm_hash_alloc(nsize);
137 if (!nsrc) {
138 xfrm_hash_free(ndst, nsize);
139 goto out_unlock;
140 }
141 nspi = xfrm_hash_alloc(nsize);
142 if (!nspi) {
143 xfrm_hash_free(ndst, nsize);
144 xfrm_hash_free(nsrc, nsize);
145 goto out_unlock;
146 }
147
148 spin_lock_bh(&xfrm_state_lock);
149
150 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
151 for (i = xfrm_state_hmask; i >= 0; i--)
152 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
153 nhashmask);
154
155 odst = xfrm_state_bydst;
156 osrc = xfrm_state_bysrc;
157 ospi = xfrm_state_byspi;
158 ohashmask = xfrm_state_hmask;
159
160 xfrm_state_bydst = ndst;
161 xfrm_state_bysrc = nsrc;
162 xfrm_state_byspi = nspi;
163 xfrm_state_hmask = nhashmask;
164
165 spin_unlock_bh(&xfrm_state_lock);
166
167 osize = (ohashmask + 1) * sizeof(struct hlist_head);
168 xfrm_hash_free(odst, osize);
169 xfrm_hash_free(osrc, osize);
170 xfrm_hash_free(ospi, osize);
171
172 out_unlock:
173 mutex_unlock(&hash_resize_mutex);
174 }
175
176 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
177
178 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
179 EXPORT_SYMBOL(km_waitq);
180
181 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
182 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
183
184 static struct work_struct xfrm_state_gc_work;
185 static HLIST_HEAD(xfrm_state_gc_list);
186 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
187
188 int __xfrm_state_delete(struct xfrm_state *x);
189
190 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
191 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
192
193 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
194 {
195 struct xfrm_state_afinfo *afinfo;
196 if (unlikely(family >= NPROTO))
197 return NULL;
198 write_lock_bh(&xfrm_state_afinfo_lock);
199 afinfo = xfrm_state_afinfo[family];
200 if (unlikely(!afinfo))
201 write_unlock_bh(&xfrm_state_afinfo_lock);
202 return afinfo;
203 }
204
205 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
206 {
207 write_unlock_bh(&xfrm_state_afinfo_lock);
208 }
209
210 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
211 {
212 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
213 struct xfrm_type **typemap;
214 int err = 0;
215
216 if (unlikely(afinfo == NULL))
217 return -EAFNOSUPPORT;
218 typemap = afinfo->type_map;
219
220 if (likely(typemap[type->proto] == NULL))
221 typemap[type->proto] = type;
222 else
223 err = -EEXIST;
224 xfrm_state_unlock_afinfo(afinfo);
225 return err;
226 }
227 EXPORT_SYMBOL(xfrm_register_type);
228
229 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
230 {
231 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
232 struct xfrm_type **typemap;
233 int err = 0;
234
235 if (unlikely(afinfo == NULL))
236 return -EAFNOSUPPORT;
237 typemap = afinfo->type_map;
238
239 if (unlikely(typemap[type->proto] != type))
240 err = -ENOENT;
241 else
242 typemap[type->proto] = NULL;
243 xfrm_state_unlock_afinfo(afinfo);
244 return err;
245 }
246 EXPORT_SYMBOL(xfrm_unregister_type);
247
248 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
249 {
250 struct xfrm_state_afinfo *afinfo;
251 struct xfrm_type **typemap;
252 struct xfrm_type *type;
253 int modload_attempted = 0;
254
255 retry:
256 afinfo = xfrm_state_get_afinfo(family);
257 if (unlikely(afinfo == NULL))
258 return NULL;
259 typemap = afinfo->type_map;
260
261 type = typemap[proto];
262 if (unlikely(type && !try_module_get(type->owner)))
263 type = NULL;
264 if (!type && !modload_attempted) {
265 xfrm_state_put_afinfo(afinfo);
266 request_module("xfrm-type-%d-%d", family, proto);
267 modload_attempted = 1;
268 goto retry;
269 }
270
271 xfrm_state_put_afinfo(afinfo);
272 return type;
273 }
274
275 static void xfrm_put_type(struct xfrm_type *type)
276 {
277 module_put(type->owner);
278 }
279
280 int xfrm_register_mode(struct xfrm_mode *mode, int family)
281 {
282 struct xfrm_state_afinfo *afinfo;
283 struct xfrm_mode **modemap;
284 int err;
285
286 if (unlikely(mode->encap >= XFRM_MODE_MAX))
287 return -EINVAL;
288
289 afinfo = xfrm_state_lock_afinfo(family);
290 if (unlikely(afinfo == NULL))
291 return -EAFNOSUPPORT;
292
293 err = -EEXIST;
294 modemap = afinfo->mode_map;
295 if (modemap[mode->encap])
296 goto out;
297
298 err = -ENOENT;
299 if (!try_module_get(afinfo->owner))
300 goto out;
301
302 mode->afinfo = afinfo;
303 modemap[mode->encap] = mode;
304 err = 0;
305
306 out:
307 xfrm_state_unlock_afinfo(afinfo);
308 return err;
309 }
310 EXPORT_SYMBOL(xfrm_register_mode);
311
312 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
313 {
314 struct xfrm_state_afinfo *afinfo;
315 struct xfrm_mode **modemap;
316 int err;
317
318 if (unlikely(mode->encap >= XFRM_MODE_MAX))
319 return -EINVAL;
320
321 afinfo = xfrm_state_lock_afinfo(family);
322 if (unlikely(afinfo == NULL))
323 return -EAFNOSUPPORT;
324
325 err = -ENOENT;
326 modemap = afinfo->mode_map;
327 if (likely(modemap[mode->encap] == mode)) {
328 modemap[mode->encap] = NULL;
329 module_put(mode->afinfo->owner);
330 err = 0;
331 }
332
333 xfrm_state_unlock_afinfo(afinfo);
334 return err;
335 }
336 EXPORT_SYMBOL(xfrm_unregister_mode);
337
338 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
339 {
340 struct xfrm_state_afinfo *afinfo;
341 struct xfrm_mode *mode;
342 int modload_attempted = 0;
343
344 if (unlikely(encap >= XFRM_MODE_MAX))
345 return NULL;
346
347 retry:
348 afinfo = xfrm_state_get_afinfo(family);
349 if (unlikely(afinfo == NULL))
350 return NULL;
351
352 mode = afinfo->mode_map[encap];
353 if (unlikely(mode && !try_module_get(mode->owner)))
354 mode = NULL;
355 if (!mode && !modload_attempted) {
356 xfrm_state_put_afinfo(afinfo);
357 request_module("xfrm-mode-%d-%d", family, encap);
358 modload_attempted = 1;
359 goto retry;
360 }
361
362 xfrm_state_put_afinfo(afinfo);
363 return mode;
364 }
365
366 static void xfrm_put_mode(struct xfrm_mode *mode)
367 {
368 module_put(mode->owner);
369 }
370
371 static void xfrm_state_gc_destroy(struct xfrm_state *x)
372 {
373 del_timer_sync(&x->timer);
374 del_timer_sync(&x->rtimer);
375 kfree(x->aalg);
376 kfree(x->ealg);
377 kfree(x->calg);
378 kfree(x->encap);
379 kfree(x->coaddr);
380 if (x->inner_mode)
381 xfrm_put_mode(x->inner_mode);
382 if (x->outer_mode)
383 xfrm_put_mode(x->outer_mode);
384 if (x->type) {
385 x->type->destructor(x);
386 xfrm_put_type(x->type);
387 }
388 security_xfrm_state_free(x);
389 kfree(x);
390 }
391
392 static void xfrm_state_gc_task(struct work_struct *data)
393 {
394 struct xfrm_state *x;
395 struct hlist_node *entry, *tmp;
396 struct hlist_head gc_list;
397
398 spin_lock_bh(&xfrm_state_gc_lock);
399 gc_list.first = xfrm_state_gc_list.first;
400 INIT_HLIST_HEAD(&xfrm_state_gc_list);
401 spin_unlock_bh(&xfrm_state_gc_lock);
402
403 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
404 xfrm_state_gc_destroy(x);
405
406 wake_up(&km_waitq);
407 }
408
409 static inline unsigned long make_jiffies(long secs)
410 {
411 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
412 return MAX_SCHEDULE_TIMEOUT-1;
413 else
414 return secs*HZ;
415 }
416
417 static void xfrm_timer_handler(unsigned long data)
418 {
419 struct xfrm_state *x = (struct xfrm_state*)data;
420 unsigned long now = get_seconds();
421 long next = LONG_MAX;
422 int warn = 0;
423 int err = 0;
424
425 spin_lock(&x->lock);
426 if (x->km.state == XFRM_STATE_DEAD)
427 goto out;
428 if (x->km.state == XFRM_STATE_EXPIRED)
429 goto expired;
430 if (x->lft.hard_add_expires_seconds) {
431 long tmo = x->lft.hard_add_expires_seconds +
432 x->curlft.add_time - now;
433 if (tmo <= 0)
434 goto expired;
435 if (tmo < next)
436 next = tmo;
437 }
438 if (x->lft.hard_use_expires_seconds) {
439 long tmo = x->lft.hard_use_expires_seconds +
440 (x->curlft.use_time ? : now) - now;
441 if (tmo <= 0)
442 goto expired;
443 if (tmo < next)
444 next = tmo;
445 }
446 if (x->km.dying)
447 goto resched;
448 if (x->lft.soft_add_expires_seconds) {
449 long tmo = x->lft.soft_add_expires_seconds +
450 x->curlft.add_time - now;
451 if (tmo <= 0)
452 warn = 1;
453 else if (tmo < next)
454 next = tmo;
455 }
456 if (x->lft.soft_use_expires_seconds) {
457 long tmo = x->lft.soft_use_expires_seconds +
458 (x->curlft.use_time ? : now) - now;
459 if (tmo <= 0)
460 warn = 1;
461 else if (tmo < next)
462 next = tmo;
463 }
464
465 x->km.dying = warn;
466 if (warn)
467 km_state_expired(x, 0, 0);
468 resched:
469 if (next != LONG_MAX)
470 mod_timer(&x->timer, jiffies + make_jiffies(next));
471
472 goto out;
473
474 expired:
475 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
476 x->km.state = XFRM_STATE_EXPIRED;
477 wake_up(&km_waitq);
478 next = 2;
479 goto resched;
480 }
481
482 err = __xfrm_state_delete(x);
483 if (!err && x->id.spi)
484 km_state_expired(x, 1, 0);
485
486 xfrm_audit_state_delete(x, err ? 0 : 1,
487 audit_get_loginuid(current->audit_context), 0);
488
489 out:
490 spin_unlock(&x->lock);
491 }
492
493 static void xfrm_replay_timer_handler(unsigned long data);
494
495 struct xfrm_state *xfrm_state_alloc(void)
496 {
497 struct xfrm_state *x;
498
499 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
500
501 if (x) {
502 atomic_set(&x->refcnt, 1);
503 atomic_set(&x->tunnel_users, 0);
504 INIT_HLIST_NODE(&x->bydst);
505 INIT_HLIST_NODE(&x->bysrc);
506 INIT_HLIST_NODE(&x->byspi);
507 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
508 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
509 (unsigned long)x);
510 x->curlft.add_time = get_seconds();
511 x->lft.soft_byte_limit = XFRM_INF;
512 x->lft.soft_packet_limit = XFRM_INF;
513 x->lft.hard_byte_limit = XFRM_INF;
514 x->lft.hard_packet_limit = XFRM_INF;
515 x->replay_maxage = 0;
516 x->replay_maxdiff = 0;
517 spin_lock_init(&x->lock);
518 }
519 return x;
520 }
521 EXPORT_SYMBOL(xfrm_state_alloc);
522
523 void __xfrm_state_destroy(struct xfrm_state *x)
524 {
525 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
526
527 spin_lock_bh(&xfrm_state_gc_lock);
528 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
529 spin_unlock_bh(&xfrm_state_gc_lock);
530 schedule_work(&xfrm_state_gc_work);
531 }
532 EXPORT_SYMBOL(__xfrm_state_destroy);
533
534 int __xfrm_state_delete(struct xfrm_state *x)
535 {
536 int err = -ESRCH;
537
538 if (x->km.state != XFRM_STATE_DEAD) {
539 x->km.state = XFRM_STATE_DEAD;
540 spin_lock(&xfrm_state_lock);
541 hlist_del(&x->bydst);
542 hlist_del(&x->bysrc);
543 if (x->id.spi)
544 hlist_del(&x->byspi);
545 xfrm_state_num--;
546 spin_unlock(&xfrm_state_lock);
547
548 /* All xfrm_state objects are created by xfrm_state_alloc.
549 * The xfrm_state_alloc call gives a reference, and that
550 * is what we are dropping here.
551 */
552 xfrm_state_put(x);
553 err = 0;
554 }
555
556 return err;
557 }
558 EXPORT_SYMBOL(__xfrm_state_delete);
559
560 int xfrm_state_delete(struct xfrm_state *x)
561 {
562 int err;
563
564 spin_lock_bh(&x->lock);
565 err = __xfrm_state_delete(x);
566 spin_unlock_bh(&x->lock);
567
568 return err;
569 }
570 EXPORT_SYMBOL(xfrm_state_delete);
571
572 #ifdef CONFIG_SECURITY_NETWORK_XFRM
573 static inline int
574 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
575 {
576 int i, err = 0;
577
578 for (i = 0; i <= xfrm_state_hmask; i++) {
579 struct hlist_node *entry;
580 struct xfrm_state *x;
581
582 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
583 if (xfrm_id_proto_match(x->id.proto, proto) &&
584 (err = security_xfrm_state_delete(x)) != 0) {
585 xfrm_audit_state_delete(x, 0,
586 audit_info->loginuid,
587 audit_info->secid);
588 return err;
589 }
590 }
591 }
592
593 return err;
594 }
595 #else
596 static inline int
597 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
598 {
599 return 0;
600 }
601 #endif
602
603 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
604 {
605 int i, err = 0;
606
607 spin_lock_bh(&xfrm_state_lock);
608 err = xfrm_state_flush_secctx_check(proto, audit_info);
609 if (err)
610 goto out;
611
612 for (i = 0; i <= xfrm_state_hmask; i++) {
613 struct hlist_node *entry;
614 struct xfrm_state *x;
615 restart:
616 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
617 if (!xfrm_state_kern(x) &&
618 xfrm_id_proto_match(x->id.proto, proto)) {
619 xfrm_state_hold(x);
620 spin_unlock_bh(&xfrm_state_lock);
621
622 err = xfrm_state_delete(x);
623 xfrm_audit_state_delete(x, err ? 0 : 1,
624 audit_info->loginuid,
625 audit_info->secid);
626 xfrm_state_put(x);
627
628 spin_lock_bh(&xfrm_state_lock);
629 goto restart;
630 }
631 }
632 }
633 err = 0;
634
635 out:
636 spin_unlock_bh(&xfrm_state_lock);
637 wake_up(&km_waitq);
638 return err;
639 }
640 EXPORT_SYMBOL(xfrm_state_flush);
641
642 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
643 {
644 spin_lock_bh(&xfrm_state_lock);
645 si->sadcnt = xfrm_state_num;
646 si->sadhcnt = xfrm_state_hmask;
647 si->sadhmcnt = xfrm_state_hashmax;
648 spin_unlock_bh(&xfrm_state_lock);
649 }
650 EXPORT_SYMBOL(xfrm_sad_getinfo);
651
652 static int
653 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
654 struct xfrm_tmpl *tmpl,
655 xfrm_address_t *daddr, xfrm_address_t *saddr,
656 unsigned short family)
657 {
658 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
659 if (!afinfo)
660 return -1;
661 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
662 xfrm_state_put_afinfo(afinfo);
663 return 0;
664 }
665
666 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
667 {
668 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
669 struct xfrm_state *x;
670 struct hlist_node *entry;
671
672 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
673 if (x->props.family != family ||
674 x->id.spi != spi ||
675 x->id.proto != proto)
676 continue;
677
678 switch (family) {
679 case AF_INET:
680 if (x->id.daddr.a4 != daddr->a4)
681 continue;
682 break;
683 case AF_INET6:
684 if (!ipv6_addr_equal((struct in6_addr *)daddr,
685 (struct in6_addr *)
686 x->id.daddr.a6))
687 continue;
688 break;
689 }
690
691 xfrm_state_hold(x);
692 return x;
693 }
694
695 return NULL;
696 }
697
698 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
699 {
700 unsigned int h = xfrm_src_hash(daddr, saddr, family);
701 struct xfrm_state *x;
702 struct hlist_node *entry;
703
704 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
705 if (x->props.family != family ||
706 x->id.proto != proto)
707 continue;
708
709 switch (family) {
710 case AF_INET:
711 if (x->id.daddr.a4 != daddr->a4 ||
712 x->props.saddr.a4 != saddr->a4)
713 continue;
714 break;
715 case AF_INET6:
716 if (!ipv6_addr_equal((struct in6_addr *)daddr,
717 (struct in6_addr *)
718 x->id.daddr.a6) ||
719 !ipv6_addr_equal((struct in6_addr *)saddr,
720 (struct in6_addr *)
721 x->props.saddr.a6))
722 continue;
723 break;
724 }
725
726 xfrm_state_hold(x);
727 return x;
728 }
729
730 return NULL;
731 }
732
733 static inline struct xfrm_state *
734 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
735 {
736 if (use_spi)
737 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
738 x->id.proto, family);
739 else
740 return __xfrm_state_lookup_byaddr(&x->id.daddr,
741 &x->props.saddr,
742 x->id.proto, family);
743 }
744
745 static void xfrm_hash_grow_check(int have_hash_collision)
746 {
747 if (have_hash_collision &&
748 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
749 xfrm_state_num > xfrm_state_hmask)
750 schedule_work(&xfrm_hash_work);
751 }
752
753 struct xfrm_state *
754 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
755 struct flowi *fl, struct xfrm_tmpl *tmpl,
756 struct xfrm_policy *pol, int *err,
757 unsigned short family)
758 {
759 unsigned int h;
760 struct hlist_node *entry;
761 struct xfrm_state *x, *x0;
762 int acquire_in_progress = 0;
763 int error = 0;
764 struct xfrm_state *best = NULL;
765
766 spin_lock_bh(&xfrm_state_lock);
767 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
768 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
769 if (x->props.family == family &&
770 x->props.reqid == tmpl->reqid &&
771 !(x->props.flags & XFRM_STATE_WILDRECV) &&
772 xfrm_state_addr_check(x, daddr, saddr, family) &&
773 tmpl->mode == x->props.mode &&
774 tmpl->id.proto == x->id.proto &&
775 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
776 /* Resolution logic:
777 1. There is a valid state with matching selector.
778 Done.
779 2. Valid state with inappropriate selector. Skip.
780
781 Entering area of "sysdeps".
782
783 3. If state is not valid, selector is temporary,
784 it selects only session which triggered
785 previous resolution. Key manager will do
786 something to install a state with proper
787 selector.
788 */
789 if (x->km.state == XFRM_STATE_VALID) {
790 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
791 !security_xfrm_state_pol_flow_match(x, pol, fl))
792 continue;
793 if (!best ||
794 best->km.dying > x->km.dying ||
795 (best->km.dying == x->km.dying &&
796 best->curlft.add_time < x->curlft.add_time))
797 best = x;
798 } else if (x->km.state == XFRM_STATE_ACQ) {
799 acquire_in_progress = 1;
800 } else if (x->km.state == XFRM_STATE_ERROR ||
801 x->km.state == XFRM_STATE_EXPIRED) {
802 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
803 security_xfrm_state_pol_flow_match(x, pol, fl))
804 error = -ESRCH;
805 }
806 }
807 }
808
809 x = best;
810 if (!x && !error && !acquire_in_progress) {
811 if (tmpl->id.spi &&
812 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
813 tmpl->id.proto, family)) != NULL) {
814 xfrm_state_put(x0);
815 error = -EEXIST;
816 goto out;
817 }
818 x = xfrm_state_alloc();
819 if (x == NULL) {
820 error = -ENOMEM;
821 goto out;
822 }
823 /* Initialize temporary selector matching only
824 * to current session. */
825 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
826
827 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
828 if (error) {
829 x->km.state = XFRM_STATE_DEAD;
830 xfrm_state_put(x);
831 x = NULL;
832 goto out;
833 }
834
835 if (km_query(x, tmpl, pol) == 0) {
836 x->km.state = XFRM_STATE_ACQ;
837 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
838 h = xfrm_src_hash(daddr, saddr, family);
839 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
840 if (x->id.spi) {
841 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
842 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
843 }
844 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
845 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
846 add_timer(&x->timer);
847 xfrm_state_num++;
848 xfrm_hash_grow_check(x->bydst.next != NULL);
849 } else {
850 x->km.state = XFRM_STATE_DEAD;
851 xfrm_state_put(x);
852 x = NULL;
853 error = -ESRCH;
854 }
855 }
856 out:
857 if (x)
858 xfrm_state_hold(x);
859 else
860 *err = acquire_in_progress ? -EAGAIN : error;
861 spin_unlock_bh(&xfrm_state_lock);
862 return x;
863 }
864
865 struct xfrm_state *
866 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
867 unsigned short family, u8 mode, u8 proto, u32 reqid)
868 {
869 unsigned int h;
870 struct xfrm_state *rx = NULL, *x = NULL;
871 struct hlist_node *entry;
872
873 spin_lock(&xfrm_state_lock);
874 h = xfrm_dst_hash(daddr, saddr, reqid, family);
875 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
876 if (x->props.family == family &&
877 x->props.reqid == reqid &&
878 !(x->props.flags & XFRM_STATE_WILDRECV) &&
879 xfrm_state_addr_check(x, daddr, saddr, family) &&
880 mode == x->props.mode &&
881 proto == x->id.proto &&
882 x->km.state == XFRM_STATE_VALID) {
883 rx = x;
884 break;
885 }
886 }
887
888 if (rx)
889 xfrm_state_hold(rx);
890 spin_unlock(&xfrm_state_lock);
891
892
893 return rx;
894 }
895 EXPORT_SYMBOL(xfrm_stateonly_find);
896
897 static void __xfrm_state_insert(struct xfrm_state *x)
898 {
899 unsigned int h;
900
901 x->genid = ++xfrm_state_genid;
902
903 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
904 x->props.reqid, x->props.family);
905 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
906
907 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
908 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
909
910 if (x->id.spi) {
911 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
912 x->props.family);
913
914 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
915 }
916
917 mod_timer(&x->timer, jiffies + HZ);
918 if (x->replay_maxage)
919 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
920
921 wake_up(&km_waitq);
922
923 xfrm_state_num++;
924
925 xfrm_hash_grow_check(x->bydst.next != NULL);
926 }
927
928 /* xfrm_state_lock is held */
929 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
930 {
931 unsigned short family = xnew->props.family;
932 u32 reqid = xnew->props.reqid;
933 struct xfrm_state *x;
934 struct hlist_node *entry;
935 unsigned int h;
936
937 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
938 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
939 if (x->props.family == family &&
940 x->props.reqid == reqid &&
941 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
942 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
943 x->genid = xfrm_state_genid;
944 }
945 }
946
947 void xfrm_state_insert(struct xfrm_state *x)
948 {
949 spin_lock_bh(&xfrm_state_lock);
950 __xfrm_state_bump_genids(x);
951 __xfrm_state_insert(x);
952 spin_unlock_bh(&xfrm_state_lock);
953 }
954 EXPORT_SYMBOL(xfrm_state_insert);
955
956 /* xfrm_state_lock is held */
957 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
958 {
959 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
960 struct hlist_node *entry;
961 struct xfrm_state *x;
962
963 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
964 if (x->props.reqid != reqid ||
965 x->props.mode != mode ||
966 x->props.family != family ||
967 x->km.state != XFRM_STATE_ACQ ||
968 x->id.spi != 0 ||
969 x->id.proto != proto)
970 continue;
971
972 switch (family) {
973 case AF_INET:
974 if (x->id.daddr.a4 != daddr->a4 ||
975 x->props.saddr.a4 != saddr->a4)
976 continue;
977 break;
978 case AF_INET6:
979 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
980 (struct in6_addr *)daddr) ||
981 !ipv6_addr_equal((struct in6_addr *)
982 x->props.saddr.a6,
983 (struct in6_addr *)saddr))
984 continue;
985 break;
986 }
987
988 xfrm_state_hold(x);
989 return x;
990 }
991
992 if (!create)
993 return NULL;
994
995 x = xfrm_state_alloc();
996 if (likely(x)) {
997 switch (family) {
998 case AF_INET:
999 x->sel.daddr.a4 = daddr->a4;
1000 x->sel.saddr.a4 = saddr->a4;
1001 x->sel.prefixlen_d = 32;
1002 x->sel.prefixlen_s = 32;
1003 x->props.saddr.a4 = saddr->a4;
1004 x->id.daddr.a4 = daddr->a4;
1005 break;
1006
1007 case AF_INET6:
1008 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1009 (struct in6_addr *)daddr);
1010 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1011 (struct in6_addr *)saddr);
1012 x->sel.prefixlen_d = 128;
1013 x->sel.prefixlen_s = 128;
1014 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1015 (struct in6_addr *)saddr);
1016 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1017 (struct in6_addr *)daddr);
1018 break;
1019 }
1020
1021 x->km.state = XFRM_STATE_ACQ;
1022 x->id.proto = proto;
1023 x->props.family = family;
1024 x->props.mode = mode;
1025 x->props.reqid = reqid;
1026 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1027 xfrm_state_hold(x);
1028 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1029 add_timer(&x->timer);
1030 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1031 h = xfrm_src_hash(daddr, saddr, family);
1032 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1033
1034 xfrm_state_num++;
1035
1036 xfrm_hash_grow_check(x->bydst.next != NULL);
1037 }
1038
1039 return x;
1040 }
1041
1042 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1043
1044 int xfrm_state_add(struct xfrm_state *x)
1045 {
1046 struct xfrm_state *x1;
1047 int family;
1048 int err;
1049 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1050
1051 family = x->props.family;
1052
1053 spin_lock_bh(&xfrm_state_lock);
1054
1055 x1 = __xfrm_state_locate(x, use_spi, family);
1056 if (x1) {
1057 xfrm_state_put(x1);
1058 x1 = NULL;
1059 err = -EEXIST;
1060 goto out;
1061 }
1062
1063 if (use_spi && x->km.seq) {
1064 x1 = __xfrm_find_acq_byseq(x->km.seq);
1065 if (x1 && ((x1->id.proto != x->id.proto) ||
1066 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1067 xfrm_state_put(x1);
1068 x1 = NULL;
1069 }
1070 }
1071
1072 if (use_spi && !x1)
1073 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1074 x->id.proto,
1075 &x->id.daddr, &x->props.saddr, 0);
1076
1077 __xfrm_state_bump_genids(x);
1078 __xfrm_state_insert(x);
1079 err = 0;
1080
1081 out:
1082 spin_unlock_bh(&xfrm_state_lock);
1083
1084 if (x1) {
1085 xfrm_state_delete(x1);
1086 xfrm_state_put(x1);
1087 }
1088
1089 return err;
1090 }
1091 EXPORT_SYMBOL(xfrm_state_add);
1092
1093 #ifdef CONFIG_XFRM_MIGRATE
1094 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1095 {
1096 int err = -ENOMEM;
1097 struct xfrm_state *x = xfrm_state_alloc();
1098 if (!x)
1099 goto error;
1100
1101 memcpy(&x->id, &orig->id, sizeof(x->id));
1102 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1103 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1104 x->props.mode = orig->props.mode;
1105 x->props.replay_window = orig->props.replay_window;
1106 x->props.reqid = orig->props.reqid;
1107 x->props.family = orig->props.family;
1108 x->props.saddr = orig->props.saddr;
1109
1110 if (orig->aalg) {
1111 x->aalg = xfrm_algo_clone(orig->aalg);
1112 if (!x->aalg)
1113 goto error;
1114 }
1115 x->props.aalgo = orig->props.aalgo;
1116
1117 if (orig->ealg) {
1118 x->ealg = xfrm_algo_clone(orig->ealg);
1119 if (!x->ealg)
1120 goto error;
1121 }
1122 x->props.ealgo = orig->props.ealgo;
1123
1124 if (orig->calg) {
1125 x->calg = xfrm_algo_clone(orig->calg);
1126 if (!x->calg)
1127 goto error;
1128 }
1129 x->props.calgo = orig->props.calgo;
1130
1131 if (orig->encap) {
1132 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1133 if (!x->encap)
1134 goto error;
1135 }
1136
1137 if (orig->coaddr) {
1138 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1139 GFP_KERNEL);
1140 if (!x->coaddr)
1141 goto error;
1142 }
1143
1144 err = xfrm_init_state(x);
1145 if (err)
1146 goto error;
1147
1148 x->props.flags = orig->props.flags;
1149
1150 x->curlft.add_time = orig->curlft.add_time;
1151 x->km.state = orig->km.state;
1152 x->km.seq = orig->km.seq;
1153
1154 return x;
1155
1156 error:
1157 if (errp)
1158 *errp = err;
1159 if (x) {
1160 kfree(x->aalg);
1161 kfree(x->ealg);
1162 kfree(x->calg);
1163 kfree(x->encap);
1164 kfree(x->coaddr);
1165 }
1166 kfree(x);
1167 return NULL;
1168 }
1169 EXPORT_SYMBOL(xfrm_state_clone);
1170
1171 /* xfrm_state_lock is held */
1172 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1173 {
1174 unsigned int h;
1175 struct xfrm_state *x;
1176 struct hlist_node *entry;
1177
1178 if (m->reqid) {
1179 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1180 m->reqid, m->old_family);
1181 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1182 if (x->props.mode != m->mode ||
1183 x->id.proto != m->proto)
1184 continue;
1185 if (m->reqid && x->props.reqid != m->reqid)
1186 continue;
1187 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1188 m->old_family) ||
1189 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1190 m->old_family))
1191 continue;
1192 xfrm_state_hold(x);
1193 return x;
1194 }
1195 } else {
1196 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1197 m->old_family);
1198 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1199 if (x->props.mode != m->mode ||
1200 x->id.proto != m->proto)
1201 continue;
1202 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1203 m->old_family) ||
1204 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1205 m->old_family))
1206 continue;
1207 xfrm_state_hold(x);
1208 return x;
1209 }
1210 }
1211
1212 return NULL;
1213 }
1214 EXPORT_SYMBOL(xfrm_migrate_state_find);
1215
1216 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1217 struct xfrm_migrate *m)
1218 {
1219 struct xfrm_state *xc;
1220 int err;
1221
1222 xc = xfrm_state_clone(x, &err);
1223 if (!xc)
1224 return NULL;
1225
1226 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1227 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1228
1229 /* add state */
1230 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1231 /* a care is needed when the destination address of the
1232 state is to be updated as it is a part of triplet */
1233 xfrm_state_insert(xc);
1234 } else {
1235 if ((err = xfrm_state_add(xc)) < 0)
1236 goto error;
1237 }
1238
1239 return xc;
1240 error:
1241 kfree(xc);
1242 return NULL;
1243 }
1244 EXPORT_SYMBOL(xfrm_state_migrate);
1245 #endif
1246
1247 int xfrm_state_update(struct xfrm_state *x)
1248 {
1249 struct xfrm_state *x1;
1250 int err;
1251 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1252
1253 spin_lock_bh(&xfrm_state_lock);
1254 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1255
1256 err = -ESRCH;
1257 if (!x1)
1258 goto out;
1259
1260 if (xfrm_state_kern(x1)) {
1261 xfrm_state_put(x1);
1262 err = -EEXIST;
1263 goto out;
1264 }
1265
1266 if (x1->km.state == XFRM_STATE_ACQ) {
1267 __xfrm_state_insert(x);
1268 x = NULL;
1269 }
1270 err = 0;
1271
1272 out:
1273 spin_unlock_bh(&xfrm_state_lock);
1274
1275 if (err)
1276 return err;
1277
1278 if (!x) {
1279 xfrm_state_delete(x1);
1280 xfrm_state_put(x1);
1281 return 0;
1282 }
1283
1284 err = -EINVAL;
1285 spin_lock_bh(&x1->lock);
1286 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1287 if (x->encap && x1->encap)
1288 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1289 if (x->coaddr && x1->coaddr) {
1290 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1291 }
1292 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1293 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1294 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1295 x1->km.dying = 0;
1296
1297 mod_timer(&x1->timer, jiffies + HZ);
1298 if (x1->curlft.use_time)
1299 xfrm_state_check_expire(x1);
1300
1301 err = 0;
1302 }
1303 spin_unlock_bh(&x1->lock);
1304
1305 xfrm_state_put(x1);
1306
1307 return err;
1308 }
1309 EXPORT_SYMBOL(xfrm_state_update);
1310
1311 int xfrm_state_check_expire(struct xfrm_state *x)
1312 {
1313 if (!x->curlft.use_time)
1314 x->curlft.use_time = get_seconds();
1315
1316 if (x->km.state != XFRM_STATE_VALID)
1317 return -EINVAL;
1318
1319 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1320 x->curlft.packets >= x->lft.hard_packet_limit) {
1321 x->km.state = XFRM_STATE_EXPIRED;
1322 mod_timer(&x->timer, jiffies);
1323 return -EINVAL;
1324 }
1325
1326 if (!x->km.dying &&
1327 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1328 x->curlft.packets >= x->lft.soft_packet_limit)) {
1329 x->km.dying = 1;
1330 km_state_expired(x, 0, 0);
1331 }
1332 return 0;
1333 }
1334 EXPORT_SYMBOL(xfrm_state_check_expire);
1335
1336 struct xfrm_state *
1337 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1338 unsigned short family)
1339 {
1340 struct xfrm_state *x;
1341
1342 spin_lock_bh(&xfrm_state_lock);
1343 x = __xfrm_state_lookup(daddr, spi, proto, family);
1344 spin_unlock_bh(&xfrm_state_lock);
1345 return x;
1346 }
1347 EXPORT_SYMBOL(xfrm_state_lookup);
1348
1349 struct xfrm_state *
1350 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1351 u8 proto, unsigned short family)
1352 {
1353 struct xfrm_state *x;
1354
1355 spin_lock_bh(&xfrm_state_lock);
1356 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1357 spin_unlock_bh(&xfrm_state_lock);
1358 return x;
1359 }
1360 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1361
1362 struct xfrm_state *
1363 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1364 xfrm_address_t *daddr, xfrm_address_t *saddr,
1365 int create, unsigned short family)
1366 {
1367 struct xfrm_state *x;
1368
1369 spin_lock_bh(&xfrm_state_lock);
1370 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1371 spin_unlock_bh(&xfrm_state_lock);
1372
1373 return x;
1374 }
1375 EXPORT_SYMBOL(xfrm_find_acq);
1376
1377 #ifdef CONFIG_XFRM_SUB_POLICY
1378 int
1379 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1380 unsigned short family)
1381 {
1382 int err = 0;
1383 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1384 if (!afinfo)
1385 return -EAFNOSUPPORT;
1386
1387 spin_lock_bh(&xfrm_state_lock);
1388 if (afinfo->tmpl_sort)
1389 err = afinfo->tmpl_sort(dst, src, n);
1390 spin_unlock_bh(&xfrm_state_lock);
1391 xfrm_state_put_afinfo(afinfo);
1392 return err;
1393 }
1394 EXPORT_SYMBOL(xfrm_tmpl_sort);
1395
1396 int
1397 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1398 unsigned short family)
1399 {
1400 int err = 0;
1401 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1402 if (!afinfo)
1403 return -EAFNOSUPPORT;
1404
1405 spin_lock_bh(&xfrm_state_lock);
1406 if (afinfo->state_sort)
1407 err = afinfo->state_sort(dst, src, n);
1408 spin_unlock_bh(&xfrm_state_lock);
1409 xfrm_state_put_afinfo(afinfo);
1410 return err;
1411 }
1412 EXPORT_SYMBOL(xfrm_state_sort);
1413 #endif
1414
1415 /* Silly enough, but I'm lazy to build resolution list */
1416
1417 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1418 {
1419 int i;
1420
1421 for (i = 0; i <= xfrm_state_hmask; i++) {
1422 struct hlist_node *entry;
1423 struct xfrm_state *x;
1424
1425 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1426 if (x->km.seq == seq &&
1427 x->km.state == XFRM_STATE_ACQ) {
1428 xfrm_state_hold(x);
1429 return x;
1430 }
1431 }
1432 }
1433 return NULL;
1434 }
1435
1436 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1437 {
1438 struct xfrm_state *x;
1439
1440 spin_lock_bh(&xfrm_state_lock);
1441 x = __xfrm_find_acq_byseq(seq);
1442 spin_unlock_bh(&xfrm_state_lock);
1443 return x;
1444 }
1445 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1446
1447 u32 xfrm_get_acqseq(void)
1448 {
1449 u32 res;
1450 static u32 acqseq;
1451 static DEFINE_SPINLOCK(acqseq_lock);
1452
1453 spin_lock_bh(&acqseq_lock);
1454 res = (++acqseq ? : ++acqseq);
1455 spin_unlock_bh(&acqseq_lock);
1456 return res;
1457 }
1458 EXPORT_SYMBOL(xfrm_get_acqseq);
1459
1460 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1461 {
1462 unsigned int h;
1463 struct xfrm_state *x0;
1464 int err = -ENOENT;
1465 __be32 minspi = htonl(low);
1466 __be32 maxspi = htonl(high);
1467
1468 spin_lock_bh(&x->lock);
1469 if (x->km.state == XFRM_STATE_DEAD)
1470 goto unlock;
1471
1472 err = 0;
1473 if (x->id.spi)
1474 goto unlock;
1475
1476 err = -ENOENT;
1477
1478 if (minspi == maxspi) {
1479 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1480 if (x0) {
1481 xfrm_state_put(x0);
1482 goto unlock;
1483 }
1484 x->id.spi = minspi;
1485 } else {
1486 u32 spi = 0;
1487 for (h=0; h<high-low+1; h++) {
1488 spi = low + net_random()%(high-low+1);
1489 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1490 if (x0 == NULL) {
1491 x->id.spi = htonl(spi);
1492 break;
1493 }
1494 xfrm_state_put(x0);
1495 }
1496 }
1497 if (x->id.spi) {
1498 spin_lock_bh(&xfrm_state_lock);
1499 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1500 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1501 spin_unlock_bh(&xfrm_state_lock);
1502
1503 err = 0;
1504 }
1505
1506 unlock:
1507 spin_unlock_bh(&x->lock);
1508
1509 return err;
1510 }
1511 EXPORT_SYMBOL(xfrm_alloc_spi);
1512
1513 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1514 void *data)
1515 {
1516 int i;
1517 struct xfrm_state *x, *last = NULL;
1518 struct hlist_node *entry;
1519 int count = 0;
1520 int err = 0;
1521
1522 spin_lock_bh(&xfrm_state_lock);
1523 for (i = 0; i <= xfrm_state_hmask; i++) {
1524 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1525 if (!xfrm_id_proto_match(x->id.proto, proto))
1526 continue;
1527 if (last) {
1528 err = func(last, count, data);
1529 if (err)
1530 goto out;
1531 }
1532 last = x;
1533 count++;
1534 }
1535 }
1536 if (count == 0) {
1537 err = -ENOENT;
1538 goto out;
1539 }
1540 err = func(last, 0, data);
1541 out:
1542 spin_unlock_bh(&xfrm_state_lock);
1543 return err;
1544 }
1545 EXPORT_SYMBOL(xfrm_state_walk);
1546
1547
1548 void xfrm_replay_notify(struct xfrm_state *x, int event)
1549 {
1550 struct km_event c;
1551 /* we send notify messages in case
1552 * 1. we updated on of the sequence numbers, and the seqno difference
1553 * is at least x->replay_maxdiff, in this case we also update the
1554 * timeout of our timer function
1555 * 2. if x->replay_maxage has elapsed since last update,
1556 * and there were changes
1557 *
1558 * The state structure must be locked!
1559 */
1560
1561 switch (event) {
1562 case XFRM_REPLAY_UPDATE:
1563 if (x->replay_maxdiff &&
1564 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1565 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1566 if (x->xflags & XFRM_TIME_DEFER)
1567 event = XFRM_REPLAY_TIMEOUT;
1568 else
1569 return;
1570 }
1571
1572 break;
1573
1574 case XFRM_REPLAY_TIMEOUT:
1575 if ((x->replay.seq == x->preplay.seq) &&
1576 (x->replay.bitmap == x->preplay.bitmap) &&
1577 (x->replay.oseq == x->preplay.oseq)) {
1578 x->xflags |= XFRM_TIME_DEFER;
1579 return;
1580 }
1581
1582 break;
1583 }
1584
1585 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1586 c.event = XFRM_MSG_NEWAE;
1587 c.data.aevent = event;
1588 km_state_notify(x, &c);
1589
1590 if (x->replay_maxage &&
1591 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1592 x->xflags &= ~XFRM_TIME_DEFER;
1593 }
1594
1595 static void xfrm_replay_timer_handler(unsigned long data)
1596 {
1597 struct xfrm_state *x = (struct xfrm_state*)data;
1598
1599 spin_lock(&x->lock);
1600
1601 if (x->km.state == XFRM_STATE_VALID) {
1602 if (xfrm_aevent_is_on())
1603 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1604 else
1605 x->xflags |= XFRM_TIME_DEFER;
1606 }
1607
1608 spin_unlock(&x->lock);
1609 }
1610
1611 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1612 {
1613 u32 diff;
1614 u32 seq = ntohl(net_seq);
1615
1616 if (unlikely(seq == 0))
1617 return -EINVAL;
1618
1619 if (likely(seq > x->replay.seq))
1620 return 0;
1621
1622 diff = x->replay.seq - seq;
1623 if (diff >= min_t(unsigned int, x->props.replay_window,
1624 sizeof(x->replay.bitmap) * 8)) {
1625 x->stats.replay_window++;
1626 return -EINVAL;
1627 }
1628
1629 if (x->replay.bitmap & (1U << diff)) {
1630 x->stats.replay++;
1631 return -EINVAL;
1632 }
1633 return 0;
1634 }
1635 EXPORT_SYMBOL(xfrm_replay_check);
1636
1637 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1638 {
1639 u32 diff;
1640 u32 seq = ntohl(net_seq);
1641
1642 if (seq > x->replay.seq) {
1643 diff = seq - x->replay.seq;
1644 if (diff < x->props.replay_window)
1645 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1646 else
1647 x->replay.bitmap = 1;
1648 x->replay.seq = seq;
1649 } else {
1650 diff = x->replay.seq - seq;
1651 x->replay.bitmap |= (1U << diff);
1652 }
1653
1654 if (xfrm_aevent_is_on())
1655 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1656 }
1657 EXPORT_SYMBOL(xfrm_replay_advance);
1658
1659 static LIST_HEAD(xfrm_km_list);
1660 static DEFINE_RWLOCK(xfrm_km_lock);
1661
1662 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1663 {
1664 struct xfrm_mgr *km;
1665
1666 read_lock(&xfrm_km_lock);
1667 list_for_each_entry(km, &xfrm_km_list, list)
1668 if (km->notify_policy)
1669 km->notify_policy(xp, dir, c);
1670 read_unlock(&xfrm_km_lock);
1671 }
1672
1673 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1674 {
1675 struct xfrm_mgr *km;
1676 read_lock(&xfrm_km_lock);
1677 list_for_each_entry(km, &xfrm_km_list, list)
1678 if (km->notify)
1679 km->notify(x, c);
1680 read_unlock(&xfrm_km_lock);
1681 }
1682
1683 EXPORT_SYMBOL(km_policy_notify);
1684 EXPORT_SYMBOL(km_state_notify);
1685
1686 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1687 {
1688 struct km_event c;
1689
1690 c.data.hard = hard;
1691 c.pid = pid;
1692 c.event = XFRM_MSG_EXPIRE;
1693 km_state_notify(x, &c);
1694
1695 if (hard)
1696 wake_up(&km_waitq);
1697 }
1698
1699 EXPORT_SYMBOL(km_state_expired);
1700 /*
1701 * We send to all registered managers regardless of failure
1702 * We are happy with one success
1703 */
1704 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1705 {
1706 int err = -EINVAL, acqret;
1707 struct xfrm_mgr *km;
1708
1709 read_lock(&xfrm_km_lock);
1710 list_for_each_entry(km, &xfrm_km_list, list) {
1711 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1712 if (!acqret)
1713 err = acqret;
1714 }
1715 read_unlock(&xfrm_km_lock);
1716 return err;
1717 }
1718 EXPORT_SYMBOL(km_query);
1719
1720 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1721 {
1722 int err = -EINVAL;
1723 struct xfrm_mgr *km;
1724
1725 read_lock(&xfrm_km_lock);
1726 list_for_each_entry(km, &xfrm_km_list, list) {
1727 if (km->new_mapping)
1728 err = km->new_mapping(x, ipaddr, sport);
1729 if (!err)
1730 break;
1731 }
1732 read_unlock(&xfrm_km_lock);
1733 return err;
1734 }
1735 EXPORT_SYMBOL(km_new_mapping);
1736
1737 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1738 {
1739 struct km_event c;
1740
1741 c.data.hard = hard;
1742 c.pid = pid;
1743 c.event = XFRM_MSG_POLEXPIRE;
1744 km_policy_notify(pol, dir, &c);
1745
1746 if (hard)
1747 wake_up(&km_waitq);
1748 }
1749 EXPORT_SYMBOL(km_policy_expired);
1750
1751 #ifdef CONFIG_XFRM_MIGRATE
1752 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1753 struct xfrm_migrate *m, int num_migrate)
1754 {
1755 int err = -EINVAL;
1756 int ret;
1757 struct xfrm_mgr *km;
1758
1759 read_lock(&xfrm_km_lock);
1760 list_for_each_entry(km, &xfrm_km_list, list) {
1761 if (km->migrate) {
1762 ret = km->migrate(sel, dir, type, m, num_migrate);
1763 if (!ret)
1764 err = ret;
1765 }
1766 }
1767 read_unlock(&xfrm_km_lock);
1768 return err;
1769 }
1770 EXPORT_SYMBOL(km_migrate);
1771 #endif
1772
1773 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1774 {
1775 int err = -EINVAL;
1776 int ret;
1777 struct xfrm_mgr *km;
1778
1779 read_lock(&xfrm_km_lock);
1780 list_for_each_entry(km, &xfrm_km_list, list) {
1781 if (km->report) {
1782 ret = km->report(proto, sel, addr);
1783 if (!ret)
1784 err = ret;
1785 }
1786 }
1787 read_unlock(&xfrm_km_lock);
1788 return err;
1789 }
1790 EXPORT_SYMBOL(km_report);
1791
1792 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1793 {
1794 int err;
1795 u8 *data;
1796 struct xfrm_mgr *km;
1797 struct xfrm_policy *pol = NULL;
1798
1799 if (optlen <= 0 || optlen > PAGE_SIZE)
1800 return -EMSGSIZE;
1801
1802 data = kmalloc(optlen, GFP_KERNEL);
1803 if (!data)
1804 return -ENOMEM;
1805
1806 err = -EFAULT;
1807 if (copy_from_user(data, optval, optlen))
1808 goto out;
1809
1810 err = -EINVAL;
1811 read_lock(&xfrm_km_lock);
1812 list_for_each_entry(km, &xfrm_km_list, list) {
1813 pol = km->compile_policy(sk, optname, data,
1814 optlen, &err);
1815 if (err >= 0)
1816 break;
1817 }
1818 read_unlock(&xfrm_km_lock);
1819
1820 if (err >= 0) {
1821 xfrm_sk_policy_insert(sk, err, pol);
1822 xfrm_pol_put(pol);
1823 err = 0;
1824 }
1825
1826 out:
1827 kfree(data);
1828 return err;
1829 }
1830 EXPORT_SYMBOL(xfrm_user_policy);
1831
1832 int xfrm_register_km(struct xfrm_mgr *km)
1833 {
1834 write_lock_bh(&xfrm_km_lock);
1835 list_add_tail(&km->list, &xfrm_km_list);
1836 write_unlock_bh(&xfrm_km_lock);
1837 return 0;
1838 }
1839 EXPORT_SYMBOL(xfrm_register_km);
1840
1841 int xfrm_unregister_km(struct xfrm_mgr *km)
1842 {
1843 write_lock_bh(&xfrm_km_lock);
1844 list_del(&km->list);
1845 write_unlock_bh(&xfrm_km_lock);
1846 return 0;
1847 }
1848 EXPORT_SYMBOL(xfrm_unregister_km);
1849
1850 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1851 {
1852 int err = 0;
1853 if (unlikely(afinfo == NULL))
1854 return -EINVAL;
1855 if (unlikely(afinfo->family >= NPROTO))
1856 return -EAFNOSUPPORT;
1857 write_lock_bh(&xfrm_state_afinfo_lock);
1858 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1859 err = -ENOBUFS;
1860 else
1861 xfrm_state_afinfo[afinfo->family] = afinfo;
1862 write_unlock_bh(&xfrm_state_afinfo_lock);
1863 return err;
1864 }
1865 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1866
1867 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1868 {
1869 int err = 0;
1870 if (unlikely(afinfo == NULL))
1871 return -EINVAL;
1872 if (unlikely(afinfo->family >= NPROTO))
1873 return -EAFNOSUPPORT;
1874 write_lock_bh(&xfrm_state_afinfo_lock);
1875 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1876 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1877 err = -EINVAL;
1878 else
1879 xfrm_state_afinfo[afinfo->family] = NULL;
1880 }
1881 write_unlock_bh(&xfrm_state_afinfo_lock);
1882 return err;
1883 }
1884 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1885
1886 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1887 {
1888 struct xfrm_state_afinfo *afinfo;
1889 if (unlikely(family >= NPROTO))
1890 return NULL;
1891 read_lock(&xfrm_state_afinfo_lock);
1892 afinfo = xfrm_state_afinfo[family];
1893 if (unlikely(!afinfo))
1894 read_unlock(&xfrm_state_afinfo_lock);
1895 return afinfo;
1896 }
1897
1898 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1899 {
1900 read_unlock(&xfrm_state_afinfo_lock);
1901 }
1902
1903 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1904 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1905 {
1906 if (x->tunnel) {
1907 struct xfrm_state *t = x->tunnel;
1908
1909 if (atomic_read(&t->tunnel_users) == 2)
1910 xfrm_state_delete(t);
1911 atomic_dec(&t->tunnel_users);
1912 xfrm_state_put(t);
1913 x->tunnel = NULL;
1914 }
1915 }
1916 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1917
1918 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1919 {
1920 int res;
1921
1922 spin_lock_bh(&x->lock);
1923 if (x->km.state == XFRM_STATE_VALID &&
1924 x->type && x->type->get_mtu)
1925 res = x->type->get_mtu(x, mtu);
1926 else
1927 res = mtu - x->props.header_len;
1928 spin_unlock_bh(&x->lock);
1929 return res;
1930 }
1931
1932 int xfrm_init_state(struct xfrm_state *x)
1933 {
1934 struct xfrm_state_afinfo *afinfo;
1935 int family = x->props.family;
1936 int err;
1937
1938 err = -EAFNOSUPPORT;
1939 afinfo = xfrm_state_get_afinfo(family);
1940 if (!afinfo)
1941 goto error;
1942
1943 err = 0;
1944 if (afinfo->init_flags)
1945 err = afinfo->init_flags(x);
1946
1947 xfrm_state_put_afinfo(afinfo);
1948
1949 if (err)
1950 goto error;
1951
1952 err = -EPROTONOSUPPORT;
1953 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1954 if (x->inner_mode == NULL)
1955 goto error;
1956
1957 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1958 family != x->sel.family)
1959 goto error;
1960
1961 x->type = xfrm_get_type(x->id.proto, family);
1962 if (x->type == NULL)
1963 goto error;
1964
1965 err = x->type->init_state(x);
1966 if (err)
1967 goto error;
1968
1969 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1970 if (x->outer_mode == NULL)
1971 goto error;
1972
1973 x->km.state = XFRM_STATE_VALID;
1974
1975 error:
1976 return err;
1977 }
1978
1979 EXPORT_SYMBOL(xfrm_init_state);
1980
1981 void __init xfrm_state_init(void)
1982 {
1983 unsigned int sz;
1984
1985 sz = sizeof(struct hlist_head) * 8;
1986
1987 xfrm_state_bydst = xfrm_hash_alloc(sz);
1988 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1989 xfrm_state_byspi = xfrm_hash_alloc(sz);
1990 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1991 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1992 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1993
1994 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1995 }
1996
1997 #ifdef CONFIG_AUDITSYSCALL
1998 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1999 struct audit_buffer *audit_buf)
2000 {
2001 if (x->security)
2002 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2003 x->security->ctx_alg, x->security->ctx_doi,
2004 x->security->ctx_str);
2005
2006 switch(x->props.family) {
2007 case AF_INET:
2008 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2009 NIPQUAD(x->props.saddr.a4),
2010 NIPQUAD(x->id.daddr.a4));
2011 break;
2012 case AF_INET6:
2013 {
2014 struct in6_addr saddr6, daddr6;
2015
2016 memcpy(&saddr6, x->props.saddr.a6,
2017 sizeof(struct in6_addr));
2018 memcpy(&daddr6, x->id.daddr.a6,
2019 sizeof(struct in6_addr));
2020 audit_log_format(audit_buf,
2021 " src=" NIP6_FMT " dst=" NIP6_FMT,
2022 NIP6(saddr6), NIP6(daddr6));
2023 }
2024 break;
2025 }
2026 }
2027
2028 void
2029 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
2030 {
2031 struct audit_buffer *audit_buf;
2032 u32 spi;
2033 extern int audit_enabled;
2034
2035 if (audit_enabled == 0)
2036 return;
2037 audit_buf = xfrm_audit_start(auid, sid);
2038 if (audit_buf == NULL)
2039 return;
2040 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
2041 xfrm_audit_common_stateinfo(x, audit_buf);
2042 spi = ntohl(x->id.spi);
2043 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2044 audit_log_end(audit_buf);
2045 }
2046 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2047
2048 void
2049 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
2050 {
2051 struct audit_buffer *audit_buf;
2052 u32 spi;
2053 extern int audit_enabled;
2054
2055 if (audit_enabled == 0)
2056 return;
2057 audit_buf = xfrm_audit_start(auid, sid);
2058 if (audit_buf == NULL)
2059 return;
2060 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
2061 xfrm_audit_common_stateinfo(x, audit_buf);
2062 spi = ntohl(x->id.spi);
2063 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2064 audit_log_end(audit_buf);
2065 }
2066 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2067 #endif /* CONFIG_AUDITSYSCALL */