]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/xfrm/xfrm_state.c
[IPSEC] Fix xfrm to pfkey SA state conversion
[mirror_ubuntu-artful-kernel.git] / net / xfrm / xfrm_state.c
CommitLineData
1da177e4
LT
1/*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16#include <linux/workqueue.h>
17#include <net/xfrm.h>
18#include <linux/pfkeyv2.h>
19#include <linux/ipsec.h>
20#include <linux/module.h>
21#include <asm/uaccess.h>
22
23/* Each xfrm_state may be linked to two tables:
24
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
28 */
29
30static DEFINE_SPINLOCK(xfrm_state_lock);
31
32/* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
34 *
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
37 */
38static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
40
41DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42EXPORT_SYMBOL(km_waitq);
43
44static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
46
47static struct work_struct xfrm_state_gc_work;
48static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49static DEFINE_SPINLOCK(xfrm_state_gc_lock);
50
51static int xfrm_state_gc_flush_bundles;
52
26b15dad 53static int __xfrm_state_delete(struct xfrm_state *x);
1da177e4
LT
54
55static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
57
58static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59static void km_state_expired(struct xfrm_state *x, int hard);
60
61static void xfrm_state_gc_destroy(struct xfrm_state *x)
62{
63 if (del_timer(&x->timer))
64 BUG();
65 if (x->aalg)
66 kfree(x->aalg);
67 if (x->ealg)
68 kfree(x->ealg);
69 if (x->calg)
70 kfree(x->calg);
71 if (x->encap)
72 kfree(x->encap);
73 if (x->type) {
74 x->type->destructor(x);
75 xfrm_put_type(x->type);
76 }
77 kfree(x);
78}
79
80static void xfrm_state_gc_task(void *data)
81{
82 struct xfrm_state *x;
83 struct list_head *entry, *tmp;
84 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
85
86 if (xfrm_state_gc_flush_bundles) {
87 xfrm_state_gc_flush_bundles = 0;
88 xfrm_flush_bundles();
89 }
90
91 spin_lock_bh(&xfrm_state_gc_lock);
92 list_splice_init(&xfrm_state_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm_state_gc_lock);
94
95 list_for_each_safe(entry, tmp, &gc_list) {
96 x = list_entry(entry, struct xfrm_state, bydst);
97 xfrm_state_gc_destroy(x);
98 }
99 wake_up(&km_waitq);
100}
101
102static inline unsigned long make_jiffies(long secs)
103{
104 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
105 return MAX_SCHEDULE_TIMEOUT-1;
106 else
107 return secs*HZ;
108}
109
110static void xfrm_timer_handler(unsigned long data)
111{
112 struct xfrm_state *x = (struct xfrm_state*)data;
113 unsigned long now = (unsigned long)xtime.tv_sec;
114 long next = LONG_MAX;
115 int warn = 0;
116
117 spin_lock(&x->lock);
118 if (x->km.state == XFRM_STATE_DEAD)
119 goto out;
120 if (x->km.state == XFRM_STATE_EXPIRED)
121 goto expired;
122 if (x->lft.hard_add_expires_seconds) {
123 long tmo = x->lft.hard_add_expires_seconds +
124 x->curlft.add_time - now;
125 if (tmo <= 0)
126 goto expired;
127 if (tmo < next)
128 next = tmo;
129 }
130 if (x->lft.hard_use_expires_seconds) {
131 long tmo = x->lft.hard_use_expires_seconds +
132 (x->curlft.use_time ? : now) - now;
133 if (tmo <= 0)
134 goto expired;
135 if (tmo < next)
136 next = tmo;
137 }
138 if (x->km.dying)
139 goto resched;
140 if (x->lft.soft_add_expires_seconds) {
141 long tmo = x->lft.soft_add_expires_seconds +
142 x->curlft.add_time - now;
143 if (tmo <= 0)
144 warn = 1;
145 else if (tmo < next)
146 next = tmo;
147 }
148 if (x->lft.soft_use_expires_seconds) {
149 long tmo = x->lft.soft_use_expires_seconds +
150 (x->curlft.use_time ? : now) - now;
151 if (tmo <= 0)
152 warn = 1;
153 else if (tmo < next)
154 next = tmo;
155 }
156
4666faab 157 x->km.dying = warn;
1da177e4
LT
158 if (warn)
159 km_state_expired(x, 0);
160resched:
161 if (next != LONG_MAX &&
162 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
163 xfrm_state_hold(x);
164 goto out;
165
166expired:
167 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
168 x->km.state = XFRM_STATE_EXPIRED;
169 wake_up(&km_waitq);
170 next = 2;
171 goto resched;
172 }
4666faab 173 if (!__xfrm_state_delete(x) && x->id.spi)
1da177e4 174 km_state_expired(x, 1);
1da177e4
LT
175
176out:
177 spin_unlock(&x->lock);
178 xfrm_state_put(x);
179}
180
181struct xfrm_state *xfrm_state_alloc(void)
182{
183 struct xfrm_state *x;
184
185 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
186
187 if (x) {
188 memset(x, 0, sizeof(struct xfrm_state));
189 atomic_set(&x->refcnt, 1);
190 atomic_set(&x->tunnel_users, 0);
191 INIT_LIST_HEAD(&x->bydst);
192 INIT_LIST_HEAD(&x->byspi);
193 init_timer(&x->timer);
194 x->timer.function = xfrm_timer_handler;
195 x->timer.data = (unsigned long)x;
196 x->curlft.add_time = (unsigned long)xtime.tv_sec;
197 x->lft.soft_byte_limit = XFRM_INF;
198 x->lft.soft_packet_limit = XFRM_INF;
199 x->lft.hard_byte_limit = XFRM_INF;
200 x->lft.hard_packet_limit = XFRM_INF;
201 spin_lock_init(&x->lock);
202 }
203 return x;
204}
205EXPORT_SYMBOL(xfrm_state_alloc);
206
207void __xfrm_state_destroy(struct xfrm_state *x)
208{
209 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
210
211 spin_lock_bh(&xfrm_state_gc_lock);
212 list_add(&x->bydst, &xfrm_state_gc_list);
213 spin_unlock_bh(&xfrm_state_gc_lock);
214 schedule_work(&xfrm_state_gc_work);
215}
216EXPORT_SYMBOL(__xfrm_state_destroy);
217
26b15dad 218static int __xfrm_state_delete(struct xfrm_state *x)
1da177e4 219{
26b15dad
JHS
220 int err = -ESRCH;
221
1da177e4
LT
222 if (x->km.state != XFRM_STATE_DEAD) {
223 x->km.state = XFRM_STATE_DEAD;
224 spin_lock(&xfrm_state_lock);
225 list_del(&x->bydst);
226 atomic_dec(&x->refcnt);
227 if (x->id.spi) {
228 list_del(&x->byspi);
229 atomic_dec(&x->refcnt);
230 }
231 spin_unlock(&xfrm_state_lock);
232 if (del_timer(&x->timer))
233 atomic_dec(&x->refcnt);
234
235 /* The number two in this test is the reference
236 * mentioned in the comment below plus the reference
237 * our caller holds. A larger value means that
238 * there are DSTs attached to this xfrm_state.
239 */
240 if (atomic_read(&x->refcnt) > 2) {
241 xfrm_state_gc_flush_bundles = 1;
242 schedule_work(&xfrm_state_gc_work);
243 }
244
245 /* All xfrm_state objects are created by xfrm_state_alloc.
246 * The xfrm_state_alloc call gives a reference, and that
247 * is what we are dropping here.
248 */
249 atomic_dec(&x->refcnt);
26b15dad 250 err = 0;
1da177e4 251 }
26b15dad
JHS
252
253 return err;
1da177e4
LT
254}
255
26b15dad 256int xfrm_state_delete(struct xfrm_state *x)
1da177e4 257{
26b15dad
JHS
258 int err;
259
1da177e4 260 spin_lock_bh(&x->lock);
26b15dad 261 err = __xfrm_state_delete(x);
1da177e4 262 spin_unlock_bh(&x->lock);
26b15dad
JHS
263
264 return err;
1da177e4
LT
265}
266EXPORT_SYMBOL(xfrm_state_delete);
267
268void xfrm_state_flush(u8 proto)
269{
270 int i;
271 struct xfrm_state *x;
272
273 spin_lock_bh(&xfrm_state_lock);
274 for (i = 0; i < XFRM_DST_HSIZE; i++) {
275restart:
276 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
277 if (!xfrm_state_kern(x) &&
278 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
279 xfrm_state_hold(x);
280 spin_unlock_bh(&xfrm_state_lock);
281
282 xfrm_state_delete(x);
283 xfrm_state_put(x);
284
285 spin_lock_bh(&xfrm_state_lock);
286 goto restart;
287 }
288 }
289 }
290 spin_unlock_bh(&xfrm_state_lock);
291 wake_up(&km_waitq);
292}
293EXPORT_SYMBOL(xfrm_state_flush);
294
295static int
296xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
297 struct xfrm_tmpl *tmpl,
298 xfrm_address_t *daddr, xfrm_address_t *saddr,
299 unsigned short family)
300{
301 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
302 if (!afinfo)
303 return -1;
304 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
305 xfrm_state_put_afinfo(afinfo);
306 return 0;
307}
308
309struct xfrm_state *
310xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
311 struct flowi *fl, struct xfrm_tmpl *tmpl,
312 struct xfrm_policy *pol, int *err,
313 unsigned short family)
314{
315 unsigned h = xfrm_dst_hash(daddr, family);
316 struct xfrm_state *x, *x0;
317 int acquire_in_progress = 0;
318 int error = 0;
319 struct xfrm_state *best = NULL;
320 struct xfrm_state_afinfo *afinfo;
321
322 afinfo = xfrm_state_get_afinfo(family);
323 if (afinfo == NULL) {
324 *err = -EAFNOSUPPORT;
325 return NULL;
326 }
327
328 spin_lock_bh(&xfrm_state_lock);
329 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
330 if (x->props.family == family &&
331 x->props.reqid == tmpl->reqid &&
332 xfrm_state_addr_check(x, daddr, saddr, family) &&
333 tmpl->mode == x->props.mode &&
334 tmpl->id.proto == x->id.proto &&
335 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
336 /* Resolution logic:
337 1. There is a valid state with matching selector.
338 Done.
339 2. Valid state with inappropriate selector. Skip.
340
341 Entering area of "sysdeps".
342
343 3. If state is not valid, selector is temporary,
344 it selects only session which triggered
345 previous resolution. Key manager will do
346 something to install a state with proper
347 selector.
348 */
349 if (x->km.state == XFRM_STATE_VALID) {
350 if (!xfrm_selector_match(&x->sel, fl, family))
351 continue;
352 if (!best ||
353 best->km.dying > x->km.dying ||
354 (best->km.dying == x->km.dying &&
355 best->curlft.add_time < x->curlft.add_time))
356 best = x;
357 } else if (x->km.state == XFRM_STATE_ACQ) {
358 acquire_in_progress = 1;
359 } else if (x->km.state == XFRM_STATE_ERROR ||
360 x->km.state == XFRM_STATE_EXPIRED) {
361 if (xfrm_selector_match(&x->sel, fl, family))
362 error = -ESRCH;
363 }
364 }
365 }
366
367 x = best;
368 if (!x && !error && !acquire_in_progress) {
5c5d281a
PM
369 if (tmpl->id.spi &&
370 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
371 tmpl->id.proto)) != NULL) {
1da177e4
LT
372 xfrm_state_put(x0);
373 error = -EEXIST;
374 goto out;
375 }
376 x = xfrm_state_alloc();
377 if (x == NULL) {
378 error = -ENOMEM;
379 goto out;
380 }
381 /* Initialize temporary selector matching only
382 * to current session. */
383 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
384
385 if (km_query(x, tmpl, pol) == 0) {
386 x->km.state = XFRM_STATE_ACQ;
387 list_add_tail(&x->bydst, xfrm_state_bydst+h);
388 xfrm_state_hold(x);
389 if (x->id.spi) {
390 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
391 list_add(&x->byspi, xfrm_state_byspi+h);
392 xfrm_state_hold(x);
393 }
394 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
395 xfrm_state_hold(x);
396 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
397 add_timer(&x->timer);
398 } else {
399 x->km.state = XFRM_STATE_DEAD;
400 xfrm_state_put(x);
401 x = NULL;
402 error = -ESRCH;
403 }
404 }
405out:
406 if (x)
407 xfrm_state_hold(x);
408 else
409 *err = acquire_in_progress ? -EAGAIN : error;
410 spin_unlock_bh(&xfrm_state_lock);
411 xfrm_state_put_afinfo(afinfo);
412 return x;
413}
414
415static void __xfrm_state_insert(struct xfrm_state *x)
416{
417 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
418
419 list_add(&x->bydst, xfrm_state_bydst+h);
420 xfrm_state_hold(x);
421
422 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
423
424 list_add(&x->byspi, xfrm_state_byspi+h);
425 xfrm_state_hold(x);
426
427 if (!mod_timer(&x->timer, jiffies + HZ))
428 xfrm_state_hold(x);
429
430 wake_up(&km_waitq);
431}
432
433void xfrm_state_insert(struct xfrm_state *x)
434{
435 spin_lock_bh(&xfrm_state_lock);
436 __xfrm_state_insert(x);
437 spin_unlock_bh(&xfrm_state_lock);
438}
439EXPORT_SYMBOL(xfrm_state_insert);
440
441static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
442
443int xfrm_state_add(struct xfrm_state *x)
444{
445 struct xfrm_state_afinfo *afinfo;
446 struct xfrm_state *x1;
447 int family;
448 int err;
449
450 family = x->props.family;
451 afinfo = xfrm_state_get_afinfo(family);
452 if (unlikely(afinfo == NULL))
453 return -EAFNOSUPPORT;
454
455 spin_lock_bh(&xfrm_state_lock);
456
457 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
458 if (x1) {
459 xfrm_state_put(x1);
460 x1 = NULL;
461 err = -EEXIST;
462 goto out;
463 }
464
465 if (x->km.seq) {
466 x1 = __xfrm_find_acq_byseq(x->km.seq);
467 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
468 xfrm_state_put(x1);
469 x1 = NULL;
470 }
471 }
472
473 if (!x1)
474 x1 = afinfo->find_acq(
475 x->props.mode, x->props.reqid, x->id.proto,
476 &x->id.daddr, &x->props.saddr, 0);
477
478 __xfrm_state_insert(x);
479 err = 0;
480
481out:
482 spin_unlock_bh(&xfrm_state_lock);
483 xfrm_state_put_afinfo(afinfo);
484
485 if (x1) {
486 xfrm_state_delete(x1);
487 xfrm_state_put(x1);
488 }
489
490 return err;
491}
492EXPORT_SYMBOL(xfrm_state_add);
493
494int xfrm_state_update(struct xfrm_state *x)
495{
496 struct xfrm_state_afinfo *afinfo;
497 struct xfrm_state *x1;
498 int err;
499
500 afinfo = xfrm_state_get_afinfo(x->props.family);
501 if (unlikely(afinfo == NULL))
502 return -EAFNOSUPPORT;
503
504 spin_lock_bh(&xfrm_state_lock);
505 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
506
507 err = -ESRCH;
508 if (!x1)
509 goto out;
510
511 if (xfrm_state_kern(x1)) {
512 xfrm_state_put(x1);
513 err = -EEXIST;
514 goto out;
515 }
516
517 if (x1->km.state == XFRM_STATE_ACQ) {
518 __xfrm_state_insert(x);
519 x = NULL;
520 }
521 err = 0;
522
523out:
524 spin_unlock_bh(&xfrm_state_lock);
525 xfrm_state_put_afinfo(afinfo);
526
527 if (err)
528 return err;
529
530 if (!x) {
531 xfrm_state_delete(x1);
532 xfrm_state_put(x1);
533 return 0;
534 }
535
536 err = -EINVAL;
537 spin_lock_bh(&x1->lock);
538 if (likely(x1->km.state == XFRM_STATE_VALID)) {
539 if (x->encap && x1->encap)
540 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
541 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
542 x1->km.dying = 0;
543
544 if (!mod_timer(&x1->timer, jiffies + HZ))
545 xfrm_state_hold(x1);
546 if (x1->curlft.use_time)
547 xfrm_state_check_expire(x1);
548
549 err = 0;
550 }
551 spin_unlock_bh(&x1->lock);
552
553 xfrm_state_put(x1);
554
555 return err;
556}
557EXPORT_SYMBOL(xfrm_state_update);
558
559int xfrm_state_check_expire(struct xfrm_state *x)
560{
561 if (!x->curlft.use_time)
562 x->curlft.use_time = (unsigned long)xtime.tv_sec;
563
564 if (x->km.state != XFRM_STATE_VALID)
565 return -EINVAL;
566
567 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
568 x->curlft.packets >= x->lft.hard_packet_limit) {
4666faab
HX
569 x->km.state = XFRM_STATE_EXPIRED;
570 if (!mod_timer(&x->timer, jiffies))
1da177e4
LT
571 xfrm_state_hold(x);
572 return -EINVAL;
573 }
574
575 if (!x->km.dying &&
576 (x->curlft.bytes >= x->lft.soft_byte_limit ||
4666faab
HX
577 x->curlft.packets >= x->lft.soft_packet_limit)) {
578 x->km.dying = 1;
1da177e4 579 km_state_expired(x, 0);
4666faab 580 }
1da177e4
LT
581 return 0;
582}
583EXPORT_SYMBOL(xfrm_state_check_expire);
584
585static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
586{
587 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
588 - skb_headroom(skb);
589
590 if (nhead > 0)
591 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
592
593 /* Check tail too... */
594 return 0;
595}
596
597int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
598{
599 int err = xfrm_state_check_expire(x);
600 if (err < 0)
601 goto err;
602 err = xfrm_state_check_space(x, skb);
603err:
604 return err;
605}
606EXPORT_SYMBOL(xfrm_state_check);
607
608struct xfrm_state *
609xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
610 unsigned short family)
611{
612 struct xfrm_state *x;
613 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
614 if (!afinfo)
615 return NULL;
616
617 spin_lock_bh(&xfrm_state_lock);
618 x = afinfo->state_lookup(daddr, spi, proto);
619 spin_unlock_bh(&xfrm_state_lock);
620 xfrm_state_put_afinfo(afinfo);
621 return x;
622}
623EXPORT_SYMBOL(xfrm_state_lookup);
624
625struct xfrm_state *
626xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
627 xfrm_address_t *daddr, xfrm_address_t *saddr,
628 int create, unsigned short family)
629{
630 struct xfrm_state *x;
631 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
632 if (!afinfo)
633 return NULL;
634
635 spin_lock_bh(&xfrm_state_lock);
636 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
637 spin_unlock_bh(&xfrm_state_lock);
638 xfrm_state_put_afinfo(afinfo);
639 return x;
640}
641EXPORT_SYMBOL(xfrm_find_acq);
642
643/* Silly enough, but I'm lazy to build resolution list */
644
645static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
646{
647 int i;
648 struct xfrm_state *x;
649
650 for (i = 0; i < XFRM_DST_HSIZE; i++) {
651 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
652 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
653 xfrm_state_hold(x);
654 return x;
655 }
656 }
657 }
658 return NULL;
659}
660
661struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
662{
663 struct xfrm_state *x;
664
665 spin_lock_bh(&xfrm_state_lock);
666 x = __xfrm_find_acq_byseq(seq);
667 spin_unlock_bh(&xfrm_state_lock);
668 return x;
669}
670EXPORT_SYMBOL(xfrm_find_acq_byseq);
671
672u32 xfrm_get_acqseq(void)
673{
674 u32 res;
675 static u32 acqseq;
676 static DEFINE_SPINLOCK(acqseq_lock);
677
678 spin_lock_bh(&acqseq_lock);
679 res = (++acqseq ? : ++acqseq);
680 spin_unlock_bh(&acqseq_lock);
681 return res;
682}
683EXPORT_SYMBOL(xfrm_get_acqseq);
684
685void
686xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
687{
688 u32 h;
689 struct xfrm_state *x0;
690
691 if (x->id.spi)
692 return;
693
694 if (minspi == maxspi) {
695 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
696 if (x0) {
697 xfrm_state_put(x0);
698 return;
699 }
700 x->id.spi = minspi;
701 } else {
702 u32 spi = 0;
703 minspi = ntohl(minspi);
704 maxspi = ntohl(maxspi);
705 for (h=0; h<maxspi-minspi+1; h++) {
706 spi = minspi + net_random()%(maxspi-minspi+1);
707 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
708 if (x0 == NULL) {
709 x->id.spi = htonl(spi);
710 break;
711 }
712 xfrm_state_put(x0);
713 }
714 }
715 if (x->id.spi) {
716 spin_lock_bh(&xfrm_state_lock);
717 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
718 list_add(&x->byspi, xfrm_state_byspi+h);
719 xfrm_state_hold(x);
720 spin_unlock_bh(&xfrm_state_lock);
721 wake_up(&km_waitq);
722 }
723}
724EXPORT_SYMBOL(xfrm_alloc_spi);
725
726int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
727 void *data)
728{
729 int i;
730 struct xfrm_state *x;
731 int count = 0;
732 int err = 0;
733
734 spin_lock_bh(&xfrm_state_lock);
735 for (i = 0; i < XFRM_DST_HSIZE; i++) {
736 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
737 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
738 count++;
739 }
740 }
741 if (count == 0) {
742 err = -ENOENT;
743 goto out;
744 }
745
746 for (i = 0; i < XFRM_DST_HSIZE; i++) {
747 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
748 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
749 continue;
750 err = func(x, --count, data);
751 if (err)
752 goto out;
753 }
754 }
755out:
756 spin_unlock_bh(&xfrm_state_lock);
757 return err;
758}
759EXPORT_SYMBOL(xfrm_state_walk);
760
761int xfrm_replay_check(struct xfrm_state *x, u32 seq)
762{
763 u32 diff;
764
765 seq = ntohl(seq);
766
767 if (unlikely(seq == 0))
768 return -EINVAL;
769
770 if (likely(seq > x->replay.seq))
771 return 0;
772
773 diff = x->replay.seq - seq;
774 if (diff >= x->props.replay_window) {
775 x->stats.replay_window++;
776 return -EINVAL;
777 }
778
779 if (x->replay.bitmap & (1U << diff)) {
780 x->stats.replay++;
781 return -EINVAL;
782 }
783 return 0;
784}
785EXPORT_SYMBOL(xfrm_replay_check);
786
787void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
788{
789 u32 diff;
790
791 seq = ntohl(seq);
792
793 if (seq > x->replay.seq) {
794 diff = seq - x->replay.seq;
795 if (diff < x->props.replay_window)
796 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
797 else
798 x->replay.bitmap = 1;
799 x->replay.seq = seq;
800 } else {
801 diff = x->replay.seq - seq;
802 x->replay.bitmap |= (1U << diff);
803 }
804}
805EXPORT_SYMBOL(xfrm_replay_advance);
806
807static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
808static DEFINE_RWLOCK(xfrm_km_lock);
809
26b15dad 810void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1da177e4
LT
811{
812 struct xfrm_mgr *km;
813
26b15dad
JHS
814 read_lock(&xfrm_km_lock);
815 list_for_each_entry(km, &xfrm_km_list, list)
816 if (km->notify_policy)
817 km->notify_policy(xp, dir, c);
818 read_unlock(&xfrm_km_lock);
819}
1da177e4 820
26b15dad
JHS
821void km_state_notify(struct xfrm_state *x, struct km_event *c)
822{
823 struct xfrm_mgr *km;
1da177e4
LT
824 read_lock(&xfrm_km_lock);
825 list_for_each_entry(km, &xfrm_km_list, list)
26b15dad
JHS
826 if (km->notify)
827 km->notify(x, c);
1da177e4 828 read_unlock(&xfrm_km_lock);
26b15dad
JHS
829}
830
831EXPORT_SYMBOL(km_policy_notify);
832EXPORT_SYMBOL(km_state_notify);
833
834static void km_state_expired(struct xfrm_state *x, int hard)
835{
836 struct km_event c;
837
26b15dad
JHS
838 c.data = hard;
839 c.event = XFRM_SAP_EXPIRED;
840 km_state_notify(x, &c);
1da177e4
LT
841
842 if (hard)
843 wake_up(&km_waitq);
844}
845
26b15dad
JHS
846/*
847 * We send to all registered managers regardless of failure
848 * We are happy with one success
849*/
1da177e4
LT
850static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
851{
26b15dad 852 int err = -EINVAL, acqret;
1da177e4
LT
853 struct xfrm_mgr *km;
854
855 read_lock(&xfrm_km_lock);
856 list_for_each_entry(km, &xfrm_km_list, list) {
26b15dad
JHS
857 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
858 if (!acqret)
859 err = acqret;
1da177e4
LT
860 }
861 read_unlock(&xfrm_km_lock);
862 return err;
863}
864
865int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
866{
867 int err = -EINVAL;
868 struct xfrm_mgr *km;
869
870 read_lock(&xfrm_km_lock);
871 list_for_each_entry(km, &xfrm_km_list, list) {
872 if (km->new_mapping)
873 err = km->new_mapping(x, ipaddr, sport);
874 if (!err)
875 break;
876 }
877 read_unlock(&xfrm_km_lock);
878 return err;
879}
880EXPORT_SYMBOL(km_new_mapping);
881
882void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
883{
26b15dad 884 struct km_event c;
1da177e4 885
26b15dad
JHS
886 c.data = hard;
887 c.data = hard;
888 c.event = XFRM_SAP_EXPIRED;
889 km_policy_notify(pol, dir, &c);
1da177e4
LT
890
891 if (hard)
892 wake_up(&km_waitq);
893}
894
895int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
896{
897 int err;
898 u8 *data;
899 struct xfrm_mgr *km;
900 struct xfrm_policy *pol = NULL;
901
902 if (optlen <= 0 || optlen > PAGE_SIZE)
903 return -EMSGSIZE;
904
905 data = kmalloc(optlen, GFP_KERNEL);
906 if (!data)
907 return -ENOMEM;
908
909 err = -EFAULT;
910 if (copy_from_user(data, optval, optlen))
911 goto out;
912
913 err = -EINVAL;
914 read_lock(&xfrm_km_lock);
915 list_for_each_entry(km, &xfrm_km_list, list) {
916 pol = km->compile_policy(sk->sk_family, optname, data,
917 optlen, &err);
918 if (err >= 0)
919 break;
920 }
921 read_unlock(&xfrm_km_lock);
922
923 if (err >= 0) {
924 xfrm_sk_policy_insert(sk, err, pol);
925 xfrm_pol_put(pol);
926 err = 0;
927 }
928
929out:
930 kfree(data);
931 return err;
932}
933EXPORT_SYMBOL(xfrm_user_policy);
934
935int xfrm_register_km(struct xfrm_mgr *km)
936{
937 write_lock_bh(&xfrm_km_lock);
938 list_add_tail(&km->list, &xfrm_km_list);
939 write_unlock_bh(&xfrm_km_lock);
940 return 0;
941}
942EXPORT_SYMBOL(xfrm_register_km);
943
944int xfrm_unregister_km(struct xfrm_mgr *km)
945{
946 write_lock_bh(&xfrm_km_lock);
947 list_del(&km->list);
948 write_unlock_bh(&xfrm_km_lock);
949 return 0;
950}
951EXPORT_SYMBOL(xfrm_unregister_km);
952
953int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
954{
955 int err = 0;
956 if (unlikely(afinfo == NULL))
957 return -EINVAL;
958 if (unlikely(afinfo->family >= NPROTO))
959 return -EAFNOSUPPORT;
960 write_lock(&xfrm_state_afinfo_lock);
961 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
962 err = -ENOBUFS;
963 else {
964 afinfo->state_bydst = xfrm_state_bydst;
965 afinfo->state_byspi = xfrm_state_byspi;
966 xfrm_state_afinfo[afinfo->family] = afinfo;
967 }
968 write_unlock(&xfrm_state_afinfo_lock);
969 return err;
970}
971EXPORT_SYMBOL(xfrm_state_register_afinfo);
972
973int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
974{
975 int err = 0;
976 if (unlikely(afinfo == NULL))
977 return -EINVAL;
978 if (unlikely(afinfo->family >= NPROTO))
979 return -EAFNOSUPPORT;
980 write_lock(&xfrm_state_afinfo_lock);
981 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
982 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
983 err = -EINVAL;
984 else {
985 xfrm_state_afinfo[afinfo->family] = NULL;
986 afinfo->state_byspi = NULL;
987 afinfo->state_bydst = NULL;
988 }
989 }
990 write_unlock(&xfrm_state_afinfo_lock);
991 return err;
992}
993EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
994
995static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
996{
997 struct xfrm_state_afinfo *afinfo;
998 if (unlikely(family >= NPROTO))
999 return NULL;
1000 read_lock(&xfrm_state_afinfo_lock);
1001 afinfo = xfrm_state_afinfo[family];
1002 if (likely(afinfo != NULL))
1003 read_lock(&afinfo->lock);
1004 read_unlock(&xfrm_state_afinfo_lock);
1005 return afinfo;
1006}
1007
1008static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1009{
1010 if (unlikely(afinfo == NULL))
1011 return;
1012 read_unlock(&afinfo->lock);
1013}
1014
1015/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1016void xfrm_state_delete_tunnel(struct xfrm_state *x)
1017{
1018 if (x->tunnel) {
1019 struct xfrm_state *t = x->tunnel;
1020
1021 if (atomic_read(&t->tunnel_users) == 2)
1022 xfrm_state_delete(t);
1023 atomic_dec(&t->tunnel_users);
1024 xfrm_state_put(t);
1025 x->tunnel = NULL;
1026 }
1027}
1028EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1029
1030int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1031{
1032 int res = mtu;
1033
1034 res -= x->props.header_len;
1035
1036 for (;;) {
1037 int m = res;
1038
1039 if (m < 68)
1040 return 68;
1041
1042 spin_lock_bh(&x->lock);
1043 if (x->km.state == XFRM_STATE_VALID &&
1044 x->type && x->type->get_max_size)
1045 m = x->type->get_max_size(x, m);
1046 else
1047 m += x->props.header_len;
1048 spin_unlock_bh(&x->lock);
1049
1050 if (m <= mtu)
1051 break;
1052 res -= (m - mtu);
1053 }
1054
1055 return res;
1056}
1057
1058EXPORT_SYMBOL(xfrm_state_mtu);
1059
1060void __init xfrm_state_init(void)
1061{
1062 int i;
1063
1064 for (i=0; i<XFRM_DST_HSIZE; i++) {
1065 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1066 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1067 }
1068 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
1069}
1070