6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl
);
26 u32 sysctl_xfrm_aevent_etime
= XFRM_AE_ETIME
;
27 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
29 u32 sysctl_xfrm_aevent_rseqth
= XFRM_AE_SEQT_SIZE
;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
32 /* Each xfrm_state may be linked to two tables:
34 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
35 2. Hash table by daddr to find what SAs exist for given
36 destination/tunnel endpoint. (output)
39 static DEFINE_SPINLOCK(xfrm_state_lock
);
41 /* Hash table to find appropriate SA towards given target (endpoint
42 * of tunnel or destination of transport mode) allowed by selector.
44 * Main use is finding SA after policy selected tunnel or transport mode.
45 * Also, it can be used by ah/esp icmp error handler to find offending SA.
47 static struct list_head xfrm_state_bydst
[XFRM_DST_HSIZE
];
48 static struct list_head xfrm_state_bysrc
[XFRM_DST_HSIZE
];
49 static struct list_head xfrm_state_byspi
[XFRM_DST_HSIZE
];
51 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
52 EXPORT_SYMBOL(km_waitq
);
54 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
55 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
57 static struct work_struct xfrm_state_gc_work
;
58 static struct list_head xfrm_state_gc_list
= LIST_HEAD_INIT(xfrm_state_gc_list
);
59 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
61 static int xfrm_state_gc_flush_bundles
;
63 int __xfrm_state_delete(struct xfrm_state
*x
);
65 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
66 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
68 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
69 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
71 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
73 if (del_timer(&x
->timer
))
75 if (del_timer(&x
->rtimer
))
82 xfrm_put_mode(x
->mode
);
84 x
->type
->destructor(x
);
85 xfrm_put_type(x
->type
);
87 security_xfrm_state_free(x
);
91 static void xfrm_state_gc_task(void *data
)
94 struct list_head
*entry
, *tmp
;
95 struct list_head gc_list
= LIST_HEAD_INIT(gc_list
);
97 if (xfrm_state_gc_flush_bundles
) {
98 xfrm_state_gc_flush_bundles
= 0;
102 spin_lock_bh(&xfrm_state_gc_lock
);
103 list_splice_init(&xfrm_state_gc_list
, &gc_list
);
104 spin_unlock_bh(&xfrm_state_gc_lock
);
106 list_for_each_safe(entry
, tmp
, &gc_list
) {
107 x
= list_entry(entry
, struct xfrm_state
, bydst
);
108 xfrm_state_gc_destroy(x
);
113 static inline unsigned long make_jiffies(long secs
)
115 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
116 return MAX_SCHEDULE_TIMEOUT
-1;
121 static void xfrm_timer_handler(unsigned long data
)
123 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
124 unsigned long now
= (unsigned long)xtime
.tv_sec
;
125 long next
= LONG_MAX
;
129 if (x
->km
.state
== XFRM_STATE_DEAD
)
131 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
133 if (x
->lft
.hard_add_expires_seconds
) {
134 long tmo
= x
->lft
.hard_add_expires_seconds
+
135 x
->curlft
.add_time
- now
;
141 if (x
->lft
.hard_use_expires_seconds
) {
142 long tmo
= x
->lft
.hard_use_expires_seconds
+
143 (x
->curlft
.use_time
? : now
) - now
;
151 if (x
->lft
.soft_add_expires_seconds
) {
152 long tmo
= x
->lft
.soft_add_expires_seconds
+
153 x
->curlft
.add_time
- now
;
159 if (x
->lft
.soft_use_expires_seconds
) {
160 long tmo
= x
->lft
.soft_use_expires_seconds
+
161 (x
->curlft
.use_time
? : now
) - now
;
170 km_state_expired(x
, 0, 0);
172 if (next
!= LONG_MAX
&&
173 !mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
)))
178 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
179 x
->km
.state
= XFRM_STATE_EXPIRED
;
184 if (!__xfrm_state_delete(x
) && x
->id
.spi
)
185 km_state_expired(x
, 1, 0);
188 spin_unlock(&x
->lock
);
192 static void xfrm_replay_timer_handler(unsigned long data
);
194 struct xfrm_state
*xfrm_state_alloc(void)
196 struct xfrm_state
*x
;
198 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
201 atomic_set(&x
->refcnt
, 1);
202 atomic_set(&x
->tunnel_users
, 0);
203 INIT_LIST_HEAD(&x
->bydst
);
204 INIT_LIST_HEAD(&x
->bysrc
);
205 INIT_LIST_HEAD(&x
->byspi
);
206 init_timer(&x
->timer
);
207 x
->timer
.function
= xfrm_timer_handler
;
208 x
->timer
.data
= (unsigned long)x
;
209 init_timer(&x
->rtimer
);
210 x
->rtimer
.function
= xfrm_replay_timer_handler
;
211 x
->rtimer
.data
= (unsigned long)x
;
212 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
213 x
->lft
.soft_byte_limit
= XFRM_INF
;
214 x
->lft
.soft_packet_limit
= XFRM_INF
;
215 x
->lft
.hard_byte_limit
= XFRM_INF
;
216 x
->lft
.hard_packet_limit
= XFRM_INF
;
217 x
->replay_maxage
= 0;
218 x
->replay_maxdiff
= 0;
219 spin_lock_init(&x
->lock
);
223 EXPORT_SYMBOL(xfrm_state_alloc
);
225 void __xfrm_state_destroy(struct xfrm_state
*x
)
227 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
229 spin_lock_bh(&xfrm_state_gc_lock
);
230 list_add(&x
->bydst
, &xfrm_state_gc_list
);
231 spin_unlock_bh(&xfrm_state_gc_lock
);
232 schedule_work(&xfrm_state_gc_work
);
234 EXPORT_SYMBOL(__xfrm_state_destroy
);
236 int __xfrm_state_delete(struct xfrm_state
*x
)
240 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
241 x
->km
.state
= XFRM_STATE_DEAD
;
242 spin_lock(&xfrm_state_lock
);
251 spin_unlock(&xfrm_state_lock
);
252 if (del_timer(&x
->timer
))
254 if (del_timer(&x
->rtimer
))
257 /* The number two in this test is the reference
258 * mentioned in the comment below plus the reference
259 * our caller holds. A larger value means that
260 * there are DSTs attached to this xfrm_state.
262 if (atomic_read(&x
->refcnt
) > 2) {
263 xfrm_state_gc_flush_bundles
= 1;
264 schedule_work(&xfrm_state_gc_work
);
267 /* All xfrm_state objects are created by xfrm_state_alloc.
268 * The xfrm_state_alloc call gives a reference, and that
269 * is what we are dropping here.
277 EXPORT_SYMBOL(__xfrm_state_delete
);
279 int xfrm_state_delete(struct xfrm_state
*x
)
283 spin_lock_bh(&x
->lock
);
284 err
= __xfrm_state_delete(x
);
285 spin_unlock_bh(&x
->lock
);
289 EXPORT_SYMBOL(xfrm_state_delete
);
291 void xfrm_state_flush(u8 proto
)
294 struct xfrm_state
*x
;
296 spin_lock_bh(&xfrm_state_lock
);
297 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
299 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
300 if (!xfrm_state_kern(x
) &&
301 xfrm_id_proto_match(x
->id
.proto
, proto
)) {
303 spin_unlock_bh(&xfrm_state_lock
);
305 xfrm_state_delete(x
);
308 spin_lock_bh(&xfrm_state_lock
);
313 spin_unlock_bh(&xfrm_state_lock
);
316 EXPORT_SYMBOL(xfrm_state_flush
);
319 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
320 struct xfrm_tmpl
*tmpl
,
321 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
322 unsigned short family
)
324 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
327 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
328 xfrm_state_put_afinfo(afinfo
);
333 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
334 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
335 struct xfrm_policy
*pol
, int *err
,
336 unsigned short family
)
338 unsigned h
= xfrm_dst_hash(daddr
, family
);
339 struct xfrm_state
*x
, *x0
;
340 int acquire_in_progress
= 0;
342 struct xfrm_state
*best
= NULL
;
343 struct xfrm_state_afinfo
*afinfo
;
345 afinfo
= xfrm_state_get_afinfo(family
);
346 if (afinfo
== NULL
) {
347 *err
= -EAFNOSUPPORT
;
351 spin_lock_bh(&xfrm_state_lock
);
352 list_for_each_entry(x
, xfrm_state_bydst
+h
, bydst
) {
353 if (x
->props
.family
== family
&&
354 x
->props
.reqid
== tmpl
->reqid
&&
355 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
356 tmpl
->mode
== x
->props
.mode
&&
357 tmpl
->id
.proto
== x
->id
.proto
&&
358 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
360 1. There is a valid state with matching selector.
362 2. Valid state with inappropriate selector. Skip.
364 Entering area of "sysdeps".
366 3. If state is not valid, selector is temporary,
367 it selects only session which triggered
368 previous resolution. Key manager will do
369 something to install a state with proper
372 if (x
->km
.state
== XFRM_STATE_VALID
) {
373 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
374 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
377 best
->km
.dying
> x
->km
.dying
||
378 (best
->km
.dying
== x
->km
.dying
&&
379 best
->curlft
.add_time
< x
->curlft
.add_time
))
381 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
382 acquire_in_progress
= 1;
383 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
384 x
->km
.state
== XFRM_STATE_EXPIRED
) {
385 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
386 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
393 if (!x
&& !error
&& !acquire_in_progress
) {
395 (x0
= afinfo
->state_lookup(daddr
, tmpl
->id
.spi
,
396 tmpl
->id
.proto
)) != NULL
) {
401 x
= xfrm_state_alloc();
406 /* Initialize temporary selector matching only
407 * to current session. */
408 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
410 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
412 x
->km
.state
= XFRM_STATE_DEAD
;
418 if (km_query(x
, tmpl
, pol
) == 0) {
419 x
->km
.state
= XFRM_STATE_ACQ
;
420 list_add_tail(&x
->bydst
, xfrm_state_bydst
+h
);
422 list_add_tail(&x
->bysrc
, xfrm_state_bysrc
+h
);
425 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
426 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
429 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
431 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
432 add_timer(&x
->timer
);
434 x
->km
.state
= XFRM_STATE_DEAD
;
444 *err
= acquire_in_progress
? -EAGAIN
: error
;
445 spin_unlock_bh(&xfrm_state_lock
);
446 xfrm_state_put_afinfo(afinfo
);
450 static void __xfrm_state_insert(struct xfrm_state
*x
)
452 unsigned h
= xfrm_dst_hash(&x
->id
.daddr
, x
->props
.family
);
454 list_add(&x
->bydst
, xfrm_state_bydst
+h
);
457 h
= xfrm_src_hash(&x
->props
.saddr
, x
->props
.family
);
459 list_add(&x
->bysrc
, xfrm_state_bysrc
+h
);
462 if (xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
)) {
463 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
466 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
470 if (!mod_timer(&x
->timer
, jiffies
+ HZ
))
473 if (x
->replay_maxage
&&
474 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
480 void xfrm_state_insert(struct xfrm_state
*x
)
482 spin_lock_bh(&xfrm_state_lock
);
483 __xfrm_state_insert(x
);
484 spin_unlock_bh(&xfrm_state_lock
);
486 xfrm_flush_all_bundles();
488 EXPORT_SYMBOL(xfrm_state_insert
);
490 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
492 int xfrm_state_add(struct xfrm_state
*x
)
494 struct xfrm_state_afinfo
*afinfo
;
495 struct xfrm_state
*x1
;
499 family
= x
->props
.family
;
500 afinfo
= xfrm_state_get_afinfo(family
);
501 if (unlikely(afinfo
== NULL
))
502 return -EAFNOSUPPORT
;
504 spin_lock_bh(&xfrm_state_lock
);
506 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
515 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
516 if (x1
&& xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
)) {
523 x1
= afinfo
->find_acq(
524 x
->props
.mode
, x
->props
.reqid
, x
->id
.proto
,
525 &x
->id
.daddr
, &x
->props
.saddr
, 0);
527 __xfrm_state_insert(x
);
531 spin_unlock_bh(&xfrm_state_lock
);
532 xfrm_state_put_afinfo(afinfo
);
535 xfrm_flush_all_bundles();
538 xfrm_state_delete(x1
);
544 EXPORT_SYMBOL(xfrm_state_add
);
546 int xfrm_state_update(struct xfrm_state
*x
)
548 struct xfrm_state_afinfo
*afinfo
;
549 struct xfrm_state
*x1
;
552 afinfo
= xfrm_state_get_afinfo(x
->props
.family
);
553 if (unlikely(afinfo
== NULL
))
554 return -EAFNOSUPPORT
;
556 spin_lock_bh(&xfrm_state_lock
);
557 x1
= afinfo
->state_lookup(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
);
563 if (xfrm_state_kern(x1
)) {
569 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
570 __xfrm_state_insert(x
);
576 spin_unlock_bh(&xfrm_state_lock
);
577 xfrm_state_put_afinfo(afinfo
);
583 xfrm_state_delete(x1
);
589 spin_lock_bh(&x1
->lock
);
590 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
591 if (x
->encap
&& x1
->encap
)
592 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
593 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
596 if (!mod_timer(&x1
->timer
, jiffies
+ HZ
))
598 if (x1
->curlft
.use_time
)
599 xfrm_state_check_expire(x1
);
603 spin_unlock_bh(&x1
->lock
);
609 EXPORT_SYMBOL(xfrm_state_update
);
611 int xfrm_state_check_expire(struct xfrm_state
*x
)
613 if (!x
->curlft
.use_time
)
614 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
616 if (x
->km
.state
!= XFRM_STATE_VALID
)
619 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
620 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
621 x
->km
.state
= XFRM_STATE_EXPIRED
;
622 if (!mod_timer(&x
->timer
, jiffies
))
628 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
629 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
631 km_state_expired(x
, 0, 0);
635 EXPORT_SYMBOL(xfrm_state_check_expire
);
637 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
639 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
643 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
645 /* Check tail too... */
649 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
651 int err
= xfrm_state_check_expire(x
);
654 err
= xfrm_state_check_space(x
, skb
);
658 EXPORT_SYMBOL(xfrm_state_check
);
661 xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
,
662 unsigned short family
)
664 struct xfrm_state
*x
;
665 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
669 spin_lock_bh(&xfrm_state_lock
);
670 x
= afinfo
->state_lookup(daddr
, spi
, proto
);
671 spin_unlock_bh(&xfrm_state_lock
);
672 xfrm_state_put_afinfo(afinfo
);
675 EXPORT_SYMBOL(xfrm_state_lookup
);
678 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
679 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
680 int create
, unsigned short family
)
682 struct xfrm_state
*x
;
683 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
687 spin_lock_bh(&xfrm_state_lock
);
688 x
= afinfo
->find_acq(mode
, reqid
, proto
, daddr
, saddr
, create
);
689 spin_unlock_bh(&xfrm_state_lock
);
690 xfrm_state_put_afinfo(afinfo
);
693 EXPORT_SYMBOL(xfrm_find_acq
);
695 /* Silly enough, but I'm lazy to build resolution list */
697 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
700 struct xfrm_state
*x
;
702 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
703 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
704 if (x
->km
.seq
== seq
&& x
->km
.state
== XFRM_STATE_ACQ
) {
713 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
715 struct xfrm_state
*x
;
717 spin_lock_bh(&xfrm_state_lock
);
718 x
= __xfrm_find_acq_byseq(seq
);
719 spin_unlock_bh(&xfrm_state_lock
);
722 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
724 u32
xfrm_get_acqseq(void)
728 static DEFINE_SPINLOCK(acqseq_lock
);
730 spin_lock_bh(&acqseq_lock
);
731 res
= (++acqseq
? : ++acqseq
);
732 spin_unlock_bh(&acqseq_lock
);
735 EXPORT_SYMBOL(xfrm_get_acqseq
);
738 xfrm_alloc_spi(struct xfrm_state
*x
, u32 minspi
, u32 maxspi
)
741 struct xfrm_state
*x0
;
746 if (minspi
== maxspi
) {
747 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
755 minspi
= ntohl(minspi
);
756 maxspi
= ntohl(maxspi
);
757 for (h
=0; h
<maxspi
-minspi
+1; h
++) {
758 spi
= minspi
+ net_random()%(maxspi
-minspi
+1);
759 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
761 x
->id
.spi
= htonl(spi
);
768 spin_lock_bh(&xfrm_state_lock
);
769 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
770 list_add(&x
->byspi
, xfrm_state_byspi
+h
);
772 spin_unlock_bh(&xfrm_state_lock
);
776 EXPORT_SYMBOL(xfrm_alloc_spi
);
778 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
782 struct xfrm_state
*x
;
786 spin_lock_bh(&xfrm_state_lock
);
787 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
788 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
789 if (xfrm_id_proto_match(x
->id
.proto
, proto
))
798 for (i
= 0; i
< XFRM_DST_HSIZE
; i
++) {
799 list_for_each_entry(x
, xfrm_state_bydst
+i
, bydst
) {
800 if (!xfrm_id_proto_match(x
->id
.proto
, proto
))
802 err
= func(x
, --count
, data
);
808 spin_unlock_bh(&xfrm_state_lock
);
811 EXPORT_SYMBOL(xfrm_state_walk
);
814 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
817 /* we send notify messages in case
818 * 1. we updated on of the sequence numbers, and the seqno difference
819 * is at least x->replay_maxdiff, in this case we also update the
820 * timeout of our timer function
821 * 2. if x->replay_maxage has elapsed since last update,
822 * and there were changes
824 * The state structure must be locked!
828 case XFRM_REPLAY_UPDATE
:
829 if (x
->replay_maxdiff
&&
830 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
831 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
832 if (x
->xflags
& XFRM_TIME_DEFER
)
833 event
= XFRM_REPLAY_TIMEOUT
;
840 case XFRM_REPLAY_TIMEOUT
:
841 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
842 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
843 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
844 x
->xflags
|= XFRM_TIME_DEFER
;
851 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
852 c
.event
= XFRM_MSG_NEWAE
;
853 c
.data
.aevent
= event
;
854 km_state_notify(x
, &c
);
856 if (x
->replay_maxage
&&
857 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
)) {
859 x
->xflags
&= ~XFRM_TIME_DEFER
;
862 EXPORT_SYMBOL(xfrm_replay_notify
);
864 static void xfrm_replay_timer_handler(unsigned long data
)
866 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
870 if (x
->km
.state
== XFRM_STATE_VALID
) {
871 if (xfrm_aevent_is_on())
872 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
874 x
->xflags
|= XFRM_TIME_DEFER
;
877 spin_unlock(&x
->lock
);
881 int xfrm_replay_check(struct xfrm_state
*x
, u32 seq
)
887 if (unlikely(seq
== 0))
890 if (likely(seq
> x
->replay
.seq
))
893 diff
= x
->replay
.seq
- seq
;
894 if (diff
>= x
->props
.replay_window
) {
895 x
->stats
.replay_window
++;
899 if (x
->replay
.bitmap
& (1U << diff
)) {
905 EXPORT_SYMBOL(xfrm_replay_check
);
907 void xfrm_replay_advance(struct xfrm_state
*x
, u32 seq
)
913 if (seq
> x
->replay
.seq
) {
914 diff
= seq
- x
->replay
.seq
;
915 if (diff
< x
->props
.replay_window
)
916 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
918 x
->replay
.bitmap
= 1;
921 diff
= x
->replay
.seq
- seq
;
922 x
->replay
.bitmap
|= (1U << diff
);
925 if (xfrm_aevent_is_on())
926 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
928 EXPORT_SYMBOL(xfrm_replay_advance
);
930 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
931 static DEFINE_RWLOCK(xfrm_km_lock
);
933 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
937 read_lock(&xfrm_km_lock
);
938 list_for_each_entry(km
, &xfrm_km_list
, list
)
939 if (km
->notify_policy
)
940 km
->notify_policy(xp
, dir
, c
);
941 read_unlock(&xfrm_km_lock
);
944 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
947 read_lock(&xfrm_km_lock
);
948 list_for_each_entry(km
, &xfrm_km_list
, list
)
951 read_unlock(&xfrm_km_lock
);
954 EXPORT_SYMBOL(km_policy_notify
);
955 EXPORT_SYMBOL(km_state_notify
);
957 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
963 c
.event
= XFRM_MSG_EXPIRE
;
964 km_state_notify(x
, &c
);
970 EXPORT_SYMBOL(km_state_expired
);
972 * We send to all registered managers regardless of failure
973 * We are happy with one success
975 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
977 int err
= -EINVAL
, acqret
;
980 read_lock(&xfrm_km_lock
);
981 list_for_each_entry(km
, &xfrm_km_list
, list
) {
982 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
986 read_unlock(&xfrm_km_lock
);
989 EXPORT_SYMBOL(km_query
);
991 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, u16 sport
)
996 read_lock(&xfrm_km_lock
);
997 list_for_each_entry(km
, &xfrm_km_list
, list
) {
999 err
= km
->new_mapping(x
, ipaddr
, sport
);
1003 read_unlock(&xfrm_km_lock
);
1006 EXPORT_SYMBOL(km_new_mapping
);
1008 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1014 c
.event
= XFRM_MSG_POLEXPIRE
;
1015 km_policy_notify(pol
, dir
, &c
);
1020 EXPORT_SYMBOL(km_policy_expired
);
1022 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1026 struct xfrm_mgr
*km
;
1027 struct xfrm_policy
*pol
= NULL
;
1029 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1032 data
= kmalloc(optlen
, GFP_KERNEL
);
1037 if (copy_from_user(data
, optval
, optlen
))
1041 read_lock(&xfrm_km_lock
);
1042 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1043 pol
= km
->compile_policy(sk
, optname
, data
,
1048 read_unlock(&xfrm_km_lock
);
1051 xfrm_sk_policy_insert(sk
, err
, pol
);
1060 EXPORT_SYMBOL(xfrm_user_policy
);
1062 int xfrm_register_km(struct xfrm_mgr
*km
)
1064 write_lock_bh(&xfrm_km_lock
);
1065 list_add_tail(&km
->list
, &xfrm_km_list
);
1066 write_unlock_bh(&xfrm_km_lock
);
1069 EXPORT_SYMBOL(xfrm_register_km
);
1071 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1073 write_lock_bh(&xfrm_km_lock
);
1074 list_del(&km
->list
);
1075 write_unlock_bh(&xfrm_km_lock
);
1078 EXPORT_SYMBOL(xfrm_unregister_km
);
1080 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1083 if (unlikely(afinfo
== NULL
))
1085 if (unlikely(afinfo
->family
>= NPROTO
))
1086 return -EAFNOSUPPORT
;
1087 write_lock_bh(&xfrm_state_afinfo_lock
);
1088 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1091 afinfo
->state_bydst
= xfrm_state_bydst
;
1092 afinfo
->state_bysrc
= xfrm_state_bysrc
;
1093 afinfo
->state_byspi
= xfrm_state_byspi
;
1094 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1096 write_unlock_bh(&xfrm_state_afinfo_lock
);
1099 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1101 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1104 if (unlikely(afinfo
== NULL
))
1106 if (unlikely(afinfo
->family
>= NPROTO
))
1107 return -EAFNOSUPPORT
;
1108 write_lock_bh(&xfrm_state_afinfo_lock
);
1109 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1110 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1113 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1114 afinfo
->state_byspi
= NULL
;
1115 afinfo
->state_bysrc
= NULL
;
1116 afinfo
->state_bydst
= NULL
;
1119 write_unlock_bh(&xfrm_state_afinfo_lock
);
1122 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1124 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1126 struct xfrm_state_afinfo
*afinfo
;
1127 if (unlikely(family
>= NPROTO
))
1129 read_lock(&xfrm_state_afinfo_lock
);
1130 afinfo
= xfrm_state_afinfo
[family
];
1131 if (unlikely(!afinfo
))
1132 read_unlock(&xfrm_state_afinfo_lock
);
1136 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1138 read_unlock(&xfrm_state_afinfo_lock
);
1141 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1142 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1145 struct xfrm_state
*t
= x
->tunnel
;
1147 if (atomic_read(&t
->tunnel_users
) == 2)
1148 xfrm_state_delete(t
);
1149 atomic_dec(&t
->tunnel_users
);
1154 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1157 * This function is NOT optimal. For example, with ESP it will give an
1158 * MTU that's usually two bytes short of being optimal. However, it will
1159 * usually give an answer that's a multiple of 4 provided the input is
1160 * also a multiple of 4.
1162 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1166 res
-= x
->props
.header_len
;
1174 spin_lock_bh(&x
->lock
);
1175 if (x
->km
.state
== XFRM_STATE_VALID
&&
1176 x
->type
&& x
->type
->get_max_size
)
1177 m
= x
->type
->get_max_size(x
, m
);
1179 m
+= x
->props
.header_len
;
1180 spin_unlock_bh(&x
->lock
);
1190 int xfrm_init_state(struct xfrm_state
*x
)
1192 struct xfrm_state_afinfo
*afinfo
;
1193 int family
= x
->props
.family
;
1196 err
= -EAFNOSUPPORT
;
1197 afinfo
= xfrm_state_get_afinfo(family
);
1202 if (afinfo
->init_flags
)
1203 err
= afinfo
->init_flags(x
);
1205 xfrm_state_put_afinfo(afinfo
);
1210 err
= -EPROTONOSUPPORT
;
1211 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1212 if (x
->type
== NULL
)
1215 err
= x
->type
->init_state(x
);
1219 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1220 if (x
->mode
== NULL
)
1223 x
->km
.state
= XFRM_STATE_VALID
;
1229 EXPORT_SYMBOL(xfrm_init_state
);
1231 void __init
xfrm_state_init(void)
1235 for (i
=0; i
<XFRM_DST_HSIZE
; i
++) {
1236 INIT_LIST_HEAD(&xfrm_state_bydst
[i
]);
1237 INIT_LIST_HEAD(&xfrm_state_bysrc
[i
]);
1238 INIT_LIST_HEAD(&xfrm_state_byspi
[i
]);
1240 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
, NULL
);