6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl
);
26 u32 sysctl_xfrm_aevent_etime
= XFRM_AE_ETIME
;
27 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
29 u32 sysctl_xfrm_aevent_rseqth
= XFRM_AE_SEQT_SIZE
;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
32 /* Each xfrm_state may be linked to two tables:
34 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
35 2. Hash table by daddr to find what SAs exist for given
36 destination/tunnel endpoint. (output)
39 static DEFINE_SPINLOCK(xfrm_state_lock
);
41 /* Hash table to find appropriate SA towards given target (endpoint
42 * of tunnel or destination of transport mode) allowed by selector.
44 * Main use is finding SA after policy selected tunnel or transport mode.
45 * Also, it can be used by ah/esp icmp error handler to find offending SA.
47 static struct list_head xfrm_state_bydst
[XFRM_DST_HSIZE
];
48 static struct list_head xfrm_state_bysrc
[XFRM_DST_HSIZE
];
49 static struct list_head xfrm_state_byspi
[XFRM_DST_HSIZE
];
51 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
52 EXPORT_SYMBOL(km_waitq
);
54 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
55 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
57 static struct work_struct xfrm_state_gc_work
;
58 static struct list_head xfrm_state_gc_list
= LIST_HEAD_INIT(xfrm_state_gc_list
);
59 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
61 static int xfrm_state_gc_flush_bundles
;
63 int __xfrm_state_delete(struct xfrm_state
*x
);
65 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
66 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
68 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
69 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
71 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
73 if (del_timer(&x
->timer
))
75 if (del_timer(&x
->rtimer
))
83 xfrm_put_mode(x
->mode
);
85 x
->type
->destructor(x
);
86 xfrm_put_type(x
->type
);
88 security_xfrm_state_free(x
);
92 static void xfrm_state_gc_task(void *data
)
95 struct list_head
*entry
, *tmp
;
96 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
98 if (xfrm_state_gc_flush_bundles
) {
99 xfrm_state_gc_flush_bundles
= 0;
100 xfrm_flush_bundles();
103 spin_lock_bh(&xfrm_state_gc_lock
);
104 list_splice_init(&xfrm_state_gc_list
, &gc_list
);
105 spin_unlock_bh(&xfrm_state_gc_lock
);
107 list_for_each_safe(entry
, tmp
, &gc_list
) {
108 x
= list_entry(entry
, struct xfrm_state
, bydst
);
109 xfrm_state_gc_destroy(x
);
114 static inline unsigned long make_jiffies(long secs
)
116 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
117 return MAX_SCHEDULE_TIMEOUT
-1;
122 static void xfrm_timer_handler(unsigned long data
)
124 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
125 unsigned long now
= (unsigned long)xtime
.tv_sec
;
126 long next
= LONG_MAX
;
130 if (x
->km
.state
== XFRM_STATE_DEAD
)
132 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
134 if (x
->lft
.hard_add_expires_seconds
) {
135 long tmo
= x
->lft
.hard_add_expires_seconds
+
136 x
->curlft
.add_time
- now
;
142 if (x
->lft
.hard_use_expires_seconds
) {
143 long tmo
= x
->lft
.hard_use_expires_seconds
+
144 (x
->curlft
.use_time
? : now
) - now
;
152 if (x
->lft
.soft_add_expires_seconds
) {
153 long tmo
= x
->lft
.soft_add_expires_seconds
+
154 x
->curlft
.add_time
- now
;
160 if (x
->lft
.soft_use_expires_seconds
) {
161 long tmo
= x
->lft
.soft_use_expires_seconds
+
162 (x
->curlft
.use_time
? : now
) - now
;
171 km_state_expired(x
, 0, 0);
173 if (next
!= LONG_MAX
&&
174 !mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
)))
179 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
180 x
->km
.state
= XFRM_STATE_EXPIRED
;
185 if (!__xfrm_state_delete(x
) && x
->id
.spi
)
186 km_state_expired(x
, 1, 0);
189 spin_unlock(&x
->lock
);
193 static void xfrm_replay_timer_handler(unsigned long data
);
195 struct xfrm_state
*xfrm_state_alloc(void)
197 struct xfrm_state
*x
;
199 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
202 atomic_set(&x
->refcnt
, 1);
203 atomic_set(&x
->tunnel_users
, 0);
204 INIT_LIST_HEAD(&x
->bydst
);
205 INIT_LIST_HEAD(&x
->bysrc
);
206 INIT_LIST_HEAD(&x
->byspi
);
207 init_timer(&x
->timer
);
208 x
->timer
.function
= xfrm_timer_handler
;
209 x
->timer
.data
= (unsigned long)x
;
210 init_timer(&x
->rtimer
);
211 x
->rtimer
.function
= xfrm_replay_timer_handler
;
212 x
->rtimer
.data
= (unsigned long)x
;
213 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
214 x
->lft
.soft_byte_limit
= XFRM_INF
;
215 x
->lft
.soft_packet_limit
= XFRM_INF
;
216 x
->lft
.hard_byte_limit
= XFRM_INF
;
217 x
->lft
.hard_packet_limit
= XFRM_INF
;
218 x
->replay_maxage
= 0;
219 x
->replay_maxdiff
= 0;
220 spin_lock_init(&x
->lock
);
224 EXPORT_SYMBOL(xfrm_state_alloc
);
226 void __xfrm_state_destroy(struct xfrm_state
*x
)
228 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
230 spin_lock_bh(&xfrm_state_gc_lock
);
231 list_add(&x
->bydst
, &xfrm_state_gc_list
);
232 spin_unlock_bh(&xfrm_state_gc_lock
);
233 schedule_work(&xfrm_state_gc_work
);
235 EXPORT_SYMBOL(__xfrm_state_destroy
);
237 int __xfrm_state_delete(struct xfrm_state
*x
)
241 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
242 x
->km
.state
= XFRM_STATE_DEAD
;
243 spin_lock(&xfrm_state_lock
);
252 spin_unlock(&xfrm_state_lock
);
253 if (del_timer(&x
->timer
))
255 if (del_timer(&x
->rtimer
))
258 /* The number two in this test is the reference
259 * mentioned in the comment below plus the reference
260 * our caller holds. A larger value means that
261 * there are DSTs attached to this xfrm_state.
263 if (atomic_read(&x
->refcnt
) > 2) {
264 xfrm_state_gc_flush_bundles
= 1;
265 schedule_work(&xfrm_state_gc_work
);
268 /* All xfrm_state objects are created by xfrm_state_alloc.
269 * The xfrm_state_alloc call gives a reference, and that
270 * is what we are dropping here.
278 EXPORT_SYMBOL(__xfrm_state_delete
);
280 int xfrm_state_delete(struct xfrm_state
*x
)
284 spin_lock_bh(&x
->lock
);
285 err
= __xfrm_state_delete(x
);
286 spin_unlock_bh(&x
->lock
);
290 EXPORT_SYMBOL(xfrm_state_delete
);
292 void xfrm_state_flush(u8 proto
)
295 struct xfrm_state
*x
;
297 spin_lock_bh(&xfrm_state_lock
);
298 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
300 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
301 if (!xfrm_state_kern(x
) &&
302 xfrm_id_proto_match(x
->id
.proto
, proto
)) {
304 spin_unlock_bh(&xfrm_state_lock
);
306 xfrm_state_delete(x
);
309 spin_lock_bh(&xfrm_state_lock
);
314 spin_unlock_bh(&xfrm_state_lock
);
317 EXPORT_SYMBOL(xfrm_state_flush
);
320 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
321 struct xfrm_tmpl
*tmpl
,
322 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
323 unsigned short family
)
325 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
328 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
329 xfrm_state_put_afinfo(afinfo
);
334 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
335 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
336 struct xfrm_policy
*pol
, int *err
,
337 unsigned short family
)
339 unsigned h
= xfrm_dst_hash(daddr
, family
);
340 struct xfrm_state
*x
, *x0
;
341 int acquire_in_progress
= 0;
343 struct xfrm_state
*best
= NULL
;
344 struct xfrm_state_afinfo
*afinfo
;
346 afinfo
= xfrm_state_get_afinfo(family
);
347 if (afinfo
== NULL
) {
348 *err
= -EAFNOSUPPORT
;
352 spin_lock_bh(&xfrm_state_lock
);
353 list_for_each_entry(x
, xfrm_state_bydst
+h
, bydst
) {
354 if (x
->props
.family
== family
&&
355 x
->props
.reqid
== tmpl
->reqid
&&
356 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
357 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
358 tmpl
->mode
== x
->props
.mode
&&
359 tmpl
->id
.proto
== x
->id
.proto
&&
360 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
362 1. There is a valid state with matching selector.
364 2. Valid state with inappropriate selector. Skip.
366 Entering area of "sysdeps".
368 3. If state is not valid, selector is temporary,
369 it selects only session which triggered
370 previous resolution. Key manager will do
371 something to install a state with proper
374 if (x
->km
.state
== XFRM_STATE_VALID
) {
375 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
376 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
379 best
->km
.dying
> x
->km
.dying
||
380 (best
->km
.dying
== x
->km
.dying
&&
381 best
->curlft
.add_time
< x
->curlft
.add_time
))
383 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
384 acquire_in_progress
= 1;
385 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
386 x
->km
.state
== XFRM_STATE_EXPIRED
) {
387 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
388 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
395 if (!x
&& !error
&& !acquire_in_progress
) {
397 (x0
= afinfo
->state_lookup(daddr
, tmpl
->id
.spi
,
398 tmpl
->id
.proto
)) != NULL
) {
403 x
= xfrm_state_alloc();
408 /* Initialize temporary selector matching only
409 * to current session. */
410 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
412 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
414 x
->km
.state
= XFRM_STATE_DEAD
;
420 if (km_query(x
, tmpl
, pol
) == 0) {
421 x
->km
.state
= XFRM_STATE_ACQ
;
422 list_add_tail(&x
->bydst
, xfrm_state_bydst
+h
);
424 list_add_tail(&x
->bysrc
, xfrm_state_bysrc
+h
);
427 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
428 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
431 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
433 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
434 add_timer(&x
->timer
);
436 x
->km
.state
= XFRM_STATE_DEAD
;
446 *err
= acquire_in_progress
? -EAGAIN
: error
;
447 spin_unlock_bh(&xfrm_state_lock
);
448 xfrm_state_put_afinfo(afinfo
);
452 static void __xfrm_state_insert(struct xfrm_state
*x
)
454 unsigned h
= xfrm_dst_hash(&x
->id
.daddr
, x
->props
.family
);
456 list_add(&x
->bydst
, xfrm_state_bydst
+h
);
459 h
= xfrm_src_hash(&x
->props
.saddr
, x
->props
.family
);
461 list_add(&x
->bysrc
, xfrm_state_bysrc
+h
);
464 if (xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
)) {
465 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
468 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
472 if (!mod_timer(&x
->timer
, jiffies
+ HZ
))
475 if (x
->replay_maxage
&&
476 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
482 void xfrm_state_insert(struct xfrm_state
*x
)
484 spin_lock_bh(&xfrm_state_lock
);
485 __xfrm_state_insert(x
);
486 spin_unlock_bh(&xfrm_state_lock
);
488 xfrm_flush_all_bundles();
490 EXPORT_SYMBOL(xfrm_state_insert
);
492 static inline struct xfrm_state
*
493 __xfrm_state_locate(struct xfrm_state_afinfo
*afinfo
, struct xfrm_state
*x
,
497 return afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
499 return afinfo
->state_lookup_byaddr(&x
->id
.daddr
, &x
->props
.saddr
, x
->id
.proto
);
502 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
504 int xfrm_state_add(struct xfrm_state
*x
)
506 struct xfrm_state_afinfo
*afinfo
;
507 struct xfrm_state
*x1
;
510 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
512 family
= x
->props
.family
;
513 afinfo
= xfrm_state_get_afinfo(family
);
514 if (unlikely(afinfo
== NULL
))
515 return -EAFNOSUPPORT
;
517 spin_lock_bh(&xfrm_state_lock
);
519 x1
= __xfrm_state_locate(afinfo
, x
, use_spi
);
527 if (use_spi
&& x
->km
.seq
) {
528 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
529 if (x1
&& xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
)) {
536 x1
= afinfo
->find_acq(
537 x
->props
.mode
, x
->props
.reqid
, x
->id
.proto
,
538 &x
->id
.daddr
, &x
->props
.saddr
, 0);
540 __xfrm_state_insert(x
);
544 spin_unlock_bh(&xfrm_state_lock
);
545 xfrm_state_put_afinfo(afinfo
);
548 xfrm_flush_all_bundles();
551 xfrm_state_delete(x1
);
557 EXPORT_SYMBOL(xfrm_state_add
);
559 int xfrm_state_update(struct xfrm_state
*x
)
561 struct xfrm_state_afinfo
*afinfo
;
562 struct xfrm_state
*x1
;
564 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
566 afinfo
= xfrm_state_get_afinfo(x
->props
.family
);
567 if (unlikely(afinfo
== NULL
))
568 return -EAFNOSUPPORT
;
570 spin_lock_bh(&xfrm_state_lock
);
571 x1
= __xfrm_state_locate(afinfo
, x
, use_spi
);
577 if (xfrm_state_kern(x1
)) {
583 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
584 __xfrm_state_insert(x
);
590 spin_unlock_bh(&xfrm_state_lock
);
591 xfrm_state_put_afinfo(afinfo
);
597 xfrm_state_delete(x1
);
603 spin_lock_bh(&x1
->lock
);
604 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
605 if (x
->encap
&& x1
->encap
)
606 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
607 if (x
->coaddr
&& x1
->coaddr
) {
608 memcpy(x1
->coaddr
, x
->coaddr
, sizeof(*x1
->coaddr
));
610 if (!use_spi
&& memcmp(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
)))
611 memcpy(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
));
612 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
615 if (!mod_timer(&x1
->timer
, jiffies
+ HZ
))
617 if (x1
->curlft
.use_time
)
618 xfrm_state_check_expire(x1
);
622 spin_unlock_bh(&x1
->lock
);
628 EXPORT_SYMBOL(xfrm_state_update
);
630 int xfrm_state_check_expire(struct xfrm_state
*x
)
632 if (!x
->curlft
.use_time
)
633 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
635 if (x
->km
.state
!= XFRM_STATE_VALID
)
638 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
639 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
640 x
->km
.state
= XFRM_STATE_EXPIRED
;
641 if (!mod_timer(&x
->timer
, jiffies
))
647 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
648 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
650 km_state_expired(x
, 0, 0);
654 EXPORT_SYMBOL(xfrm_state_check_expire
);
656 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
658 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
662 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
664 /* Check tail too... */
668 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
670 int err
= xfrm_state_check_expire(x
);
673 err
= xfrm_state_check_space(x
, skb
);
677 EXPORT_SYMBOL(xfrm_state_check
);
680 xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
,
681 unsigned short family
)
683 struct xfrm_state
*x
;
684 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
688 spin_lock_bh(&xfrm_state_lock
);
689 x
= afinfo
->state_lookup(daddr
, spi
, proto
);
690 spin_unlock_bh(&xfrm_state_lock
);
691 xfrm_state_put_afinfo(afinfo
);
694 EXPORT_SYMBOL(xfrm_state_lookup
);
697 xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
698 u8 proto
, unsigned short family
)
700 struct xfrm_state
*x
;
701 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
705 spin_lock_bh(&xfrm_state_lock
);
706 x
= afinfo
->state_lookup_byaddr(daddr
, saddr
, proto
);
707 spin_unlock_bh(&xfrm_state_lock
);
708 xfrm_state_put_afinfo(afinfo
);
711 EXPORT_SYMBOL(xfrm_state_lookup_byaddr
);
714 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
715 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
716 int create
, unsigned short family
)
718 struct xfrm_state
*x
;
719 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
723 spin_lock_bh(&xfrm_state_lock
);
724 x
= afinfo
->find_acq(mode
, reqid
, proto
, daddr
, saddr
, create
);
725 spin_unlock_bh(&xfrm_state_lock
);
726 xfrm_state_put_afinfo(afinfo
);
729 EXPORT_SYMBOL(xfrm_find_acq
);
731 #ifdef CONFIG_XFRM_SUB_POLICY
733 xfrm_tmpl_sort(struct xfrm_tmpl
**dst
, struct xfrm_tmpl
**src
, int n
,
734 unsigned short family
)
737 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
739 return -EAFNOSUPPORT
;
741 spin_lock_bh(&xfrm_state_lock
);
742 if (afinfo
->tmpl_sort
)
743 err
= afinfo
->tmpl_sort(dst
, src
, n
);
744 spin_unlock_bh(&xfrm_state_lock
);
745 xfrm_state_put_afinfo(afinfo
);
748 EXPORT_SYMBOL(xfrm_tmpl_sort
);
751 xfrm_state_sort(struct xfrm_state
**dst
, struct xfrm_state
**src
, int n
,
752 unsigned short family
)
755 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
757 return -EAFNOSUPPORT
;
759 spin_lock_bh(&xfrm_state_lock
);
760 if (afinfo
->state_sort
)
761 err
= afinfo
->state_sort(dst
, src
, n
);
762 spin_unlock_bh(&xfrm_state_lock
);
763 xfrm_state_put_afinfo(afinfo
);
766 EXPORT_SYMBOL(xfrm_state_sort
);
769 /* Silly enough, but I'm lazy to build resolution list */
771 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
774 struct xfrm_state
*x
;
776 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
777 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
778 if (x
->km
.seq
== seq
&& x
->km
.state
== XFRM_STATE_ACQ
) {
787 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
789 struct xfrm_state
*x
;
791 spin_lock_bh(&xfrm_state_lock
);
792 x
= __xfrm_find_acq_byseq(seq
);
793 spin_unlock_bh(&xfrm_state_lock
);
796 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
798 u32
xfrm_get_acqseq(void)
802 static DEFINE_SPINLOCK(acqseq_lock
);
804 spin_lock_bh(&acqseq_lock
);
805 res
= (++acqseq
? : ++acqseq
);
806 spin_unlock_bh(&acqseq_lock
);
809 EXPORT_SYMBOL(xfrm_get_acqseq
);
812 xfrm_alloc_spi(struct xfrm_state
*x
, u32 minspi
, u32 maxspi
)
815 struct xfrm_state
*x0
;
820 if (minspi
== maxspi
) {
821 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
829 minspi
= ntohl(minspi
);
830 maxspi
= ntohl(maxspi
);
831 for (h
=0; h
<maxspi
-minspi
+1; h
++) {
832 spi
= minspi
+ net_random()%(maxspi
-minspi
+1);
833 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
835 x
->id
.spi
= htonl(spi
);
842 spin_lock_bh(&xfrm_state_lock
);
843 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
844 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
846 spin_unlock_bh(&xfrm_state_lock
);
850 EXPORT_SYMBOL(xfrm_alloc_spi
);
852 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
856 struct xfrm_state
*x
;
860 spin_lock_bh(&xfrm_state_lock
);
861 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
862 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
863 if (xfrm_id_proto_match(x
->id
.proto
, proto
))
872 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
873 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
874 if (!xfrm_id_proto_match(x
->id
.proto
, proto
))
876 err
= func(x
, --count
, data
);
882 spin_unlock_bh(&xfrm_state_lock
);
885 EXPORT_SYMBOL(xfrm_state_walk
);
888 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
891 /* we send notify messages in case
892 * 1. we updated on of the sequence numbers, and the seqno difference
893 * is at least x->replay_maxdiff, in this case we also update the
894 * timeout of our timer function
895 * 2. if x->replay_maxage has elapsed since last update,
896 * and there were changes
898 * The state structure must be locked!
902 case XFRM_REPLAY_UPDATE
:
903 if (x
->replay_maxdiff
&&
904 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
905 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
906 if (x
->xflags
& XFRM_TIME_DEFER
)
907 event
= XFRM_REPLAY_TIMEOUT
;
914 case XFRM_REPLAY_TIMEOUT
:
915 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
916 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
917 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
918 x
->xflags
|= XFRM_TIME_DEFER
;
925 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
926 c
.event
= XFRM_MSG_NEWAE
;
927 c
.data
.aevent
= event
;
928 km_state_notify(x
, &c
);
930 if (x
->replay_maxage
&&
931 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
)) {
933 x
->xflags
&= ~XFRM_TIME_DEFER
;
936 EXPORT_SYMBOL(xfrm_replay_notify
);
938 static void xfrm_replay_timer_handler(unsigned long data
)
940 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
944 if (x
->km
.state
== XFRM_STATE_VALID
) {
945 if (xfrm_aevent_is_on())
946 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
948 x
->xflags
|= XFRM_TIME_DEFER
;
951 spin_unlock(&x
->lock
);
955 int xfrm_replay_check(struct xfrm_state
*x
, u32 seq
)
961 if (unlikely(seq
== 0))
964 if (likely(seq
> x
->replay
.seq
))
967 diff
= x
->replay
.seq
- seq
;
968 if (diff
>= x
->props
.replay_window
) {
969 x
->stats
.replay_window
++;
973 if (x
->replay
.bitmap
& (1U << diff
)) {
979 EXPORT_SYMBOL(xfrm_replay_check
);
981 void xfrm_replay_advance(struct xfrm_state
*x
, u32 seq
)
987 if (seq
> x
->replay
.seq
) {
988 diff
= seq
- x
->replay
.seq
;
989 if (diff
< x
->props
.replay_window
)
990 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
992 x
->replay
.bitmap
= 1;
995 diff
= x
->replay
.seq
- seq
;
996 x
->replay
.bitmap
|= (1U << diff
);
999 if (xfrm_aevent_is_on())
1000 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
1002 EXPORT_SYMBOL(xfrm_replay_advance
);
1004 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
1005 static DEFINE_RWLOCK(xfrm_km_lock
);
1007 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
1009 struct xfrm_mgr
*km
;
1011 read_lock(&xfrm_km_lock
);
1012 list_for_each_entry(km
, &xfrm_km_list
, list
)
1013 if (km
->notify_policy
)
1014 km
->notify_policy(xp
, dir
, c
);
1015 read_unlock(&xfrm_km_lock
);
1018 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
1020 struct xfrm_mgr
*km
;
1021 read_lock(&xfrm_km_lock
);
1022 list_for_each_entry(km
, &xfrm_km_list
, list
)
1025 read_unlock(&xfrm_km_lock
);
1028 EXPORT_SYMBOL(km_policy_notify
);
1029 EXPORT_SYMBOL(km_state_notify
);
1031 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
1037 c
.event
= XFRM_MSG_EXPIRE
;
1038 km_state_notify(x
, &c
);
1044 EXPORT_SYMBOL(km_state_expired
);
1046 * We send to all registered managers regardless of failure
1047 * We are happy with one success
1049 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
1051 int err
= -EINVAL
, acqret
;
1052 struct xfrm_mgr
*km
;
1054 read_lock(&xfrm_km_lock
);
1055 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1056 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
1060 read_unlock(&xfrm_km_lock
);
1063 EXPORT_SYMBOL(km_query
);
1065 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, u16 sport
)
1068 struct xfrm_mgr
*km
;
1070 read_lock(&xfrm_km_lock
);
1071 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1072 if (km
->new_mapping
)
1073 err
= km
->new_mapping(x
, ipaddr
, sport
);
1077 read_unlock(&xfrm_km_lock
);
1080 EXPORT_SYMBOL(km_new_mapping
);
1082 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1088 c
.event
= XFRM_MSG_POLEXPIRE
;
1089 km_policy_notify(pol
, dir
, &c
);
1094 EXPORT_SYMBOL(km_policy_expired
);
1096 int km_report(u8 proto
, struct xfrm_selector
*sel
, xfrm_address_t
*addr
)
1100 struct xfrm_mgr
*km
;
1102 read_lock(&xfrm_km_lock
);
1103 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1105 ret
= km
->report(proto
, sel
, addr
);
1110 read_unlock(&xfrm_km_lock
);
1113 EXPORT_SYMBOL(km_report
);
1115 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1119 struct xfrm_mgr
*km
;
1120 struct xfrm_policy
*pol
= NULL
;
1122 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1125 data
= kmalloc(optlen
, GFP_KERNEL
);
1130 if (copy_from_user(data
, optval
, optlen
))
1134 read_lock(&xfrm_km_lock
);
1135 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1136 pol
= km
->compile_policy(sk
, optname
, data
,
1141 read_unlock(&xfrm_km_lock
);
1144 xfrm_sk_policy_insert(sk
, err
, pol
);
1153 EXPORT_SYMBOL(xfrm_user_policy
);
1155 int xfrm_register_km(struct xfrm_mgr
*km
)
1157 write_lock_bh(&xfrm_km_lock
);
1158 list_add_tail(&km
->list
, &xfrm_km_list
);
1159 write_unlock_bh(&xfrm_km_lock
);
1162 EXPORT_SYMBOL(xfrm_register_km
);
1164 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1166 write_lock_bh(&xfrm_km_lock
);
1167 list_del(&km
->list
);
1168 write_unlock_bh(&xfrm_km_lock
);
1171 EXPORT_SYMBOL(xfrm_unregister_km
);
1173 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1176 if (unlikely(afinfo
== NULL
))
1178 if (unlikely(afinfo
->family
>= NPROTO
))
1179 return -EAFNOSUPPORT
;
1180 write_lock_bh(&xfrm_state_afinfo_lock
);
1181 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1184 afinfo
->state_bydst
= xfrm_state_bydst
;
1185 afinfo
->state_bysrc
= xfrm_state_bysrc
;
1186 afinfo
->state_byspi
= xfrm_state_byspi
;
1187 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1189 write_unlock_bh(&xfrm_state_afinfo_lock
);
1192 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1194 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1197 if (unlikely(afinfo
== NULL
))
1199 if (unlikely(afinfo
->family
>= NPROTO
))
1200 return -EAFNOSUPPORT
;
1201 write_lock_bh(&xfrm_state_afinfo_lock
);
1202 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1203 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1206 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1207 afinfo
->state_byspi
= NULL
;
1208 afinfo
->state_bysrc
= NULL
;
1209 afinfo
->state_bydst
= NULL
;
1212 write_unlock_bh(&xfrm_state_afinfo_lock
);
1215 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1217 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1219 struct xfrm_state_afinfo
*afinfo
;
1220 if (unlikely(family
>= NPROTO
))
1222 read_lock(&xfrm_state_afinfo_lock
);
1223 afinfo
= xfrm_state_afinfo
[family
];
1224 if (unlikely(!afinfo
))
1225 read_unlock(&xfrm_state_afinfo_lock
);
1229 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1231 read_unlock(&xfrm_state_afinfo_lock
);
1234 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1235 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1238 struct xfrm_state
*t
= x
->tunnel
;
1240 if (atomic_read(&t
->tunnel_users
) == 2)
1241 xfrm_state_delete(t
);
1242 atomic_dec(&t
->tunnel_users
);
1247 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1250 * This function is NOT optimal. For example, with ESP it will give an
1251 * MTU that's usually two bytes short of being optimal. However, it will
1252 * usually give an answer that's a multiple of 4 provided the input is
1253 * also a multiple of 4.
1255 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1259 res
-= x
->props
.header_len
;
1267 spin_lock_bh(&x
->lock
);
1268 if (x
->km
.state
== XFRM_STATE_VALID
&&
1269 x
->type
&& x
->type
->get_max_size
)
1270 m
= x
->type
->get_max_size(x
, m
);
1272 m
+= x
->props
.header_len
;
1273 spin_unlock_bh(&x
->lock
);
1283 int xfrm_init_state(struct xfrm_state
*x
)
1285 struct xfrm_state_afinfo
*afinfo
;
1286 int family
= x
->props
.family
;
1289 err
= -EAFNOSUPPORT
;
1290 afinfo
= xfrm_state_get_afinfo(family
);
1295 if (afinfo
->init_flags
)
1296 err
= afinfo
->init_flags(x
);
1298 xfrm_state_put_afinfo(afinfo
);
1303 err
= -EPROTONOSUPPORT
;
1304 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1305 if (x
->type
== NULL
)
1308 err
= x
->type
->init_state(x
);
1312 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1313 if (x
->mode
== NULL
)
1316 x
->km
.state
= XFRM_STATE_VALID
;
1322 EXPORT_SYMBOL(xfrm_init_state
);
1324 void __init
xfrm_state_init(void)
1328 for (i
=0; i
<XFRM_DST_HSIZE
; i
++) {
1329 INIT_LIST_HEAD(&xfrm_state_bydst
[i
]);
1330 INIT_LIST_HEAD(&xfrm_state_bysrc
[i
]);
1331 INIT_LIST_HEAD(&xfrm_state_byspi
[i
]);
1333 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
, NULL
);