]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/xfrm/xfrm_state.c
[WANROUTER]: Delete superfluous source file "net/wanrouter/af_wanpipe.c".
[mirror_ubuntu-artful-kernel.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
23 #include <linux/audit.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 /* Each xfrm_state may be linked to two tables:
37
38 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40 destination/tunnel endpoint. (output)
41 */
42
43 static DEFINE_SPINLOCK(xfrm_state_lock);
44
45 /* Hash table to find appropriate SA towards given target (endpoint
46 * of tunnel or destination of transport mode) allowed by selector.
47 *
48 * Main use is finding SA after policy selected tunnel or transport mode.
49 * Also, it can be used by ah/esp icmp error handler to find offending SA.
50 */
51 static struct hlist_head *xfrm_state_bydst __read_mostly;
52 static struct hlist_head *xfrm_state_bysrc __read_mostly;
53 static struct hlist_head *xfrm_state_byspi __read_mostly;
54 static unsigned int xfrm_state_hmask __read_mostly;
55 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
56 static unsigned int xfrm_state_num;
57 static unsigned int xfrm_state_genid;
58
59 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
60 xfrm_address_t *saddr,
61 u32 reqid,
62 unsigned short family)
63 {
64 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
65 }
66
67 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
68 xfrm_address_t *saddr,
69 unsigned short family)
70 {
71 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
72 }
73
74 static inline unsigned int
75 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
76 {
77 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
78 }
79
80 static void xfrm_hash_transfer(struct hlist_head *list,
81 struct hlist_head *ndsttable,
82 struct hlist_head *nsrctable,
83 struct hlist_head *nspitable,
84 unsigned int nhashmask)
85 {
86 struct hlist_node *entry, *tmp;
87 struct xfrm_state *x;
88
89 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
90 unsigned int h;
91
92 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
93 x->props.reqid, x->props.family,
94 nhashmask);
95 hlist_add_head(&x->bydst, ndsttable+h);
96
97 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
98 x->props.family,
99 nhashmask);
100 hlist_add_head(&x->bysrc, nsrctable+h);
101
102 if (x->id.spi) {
103 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
104 x->id.proto, x->props.family,
105 nhashmask);
106 hlist_add_head(&x->byspi, nspitable+h);
107 }
108 }
109 }
110
111 static unsigned long xfrm_hash_new_size(void)
112 {
113 return ((xfrm_state_hmask + 1) << 1) *
114 sizeof(struct hlist_head);
115 }
116
117 static DEFINE_MUTEX(hash_resize_mutex);
118
119 static void xfrm_hash_resize(struct work_struct *__unused)
120 {
121 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
122 unsigned long nsize, osize;
123 unsigned int nhashmask, ohashmask;
124 int i;
125
126 mutex_lock(&hash_resize_mutex);
127
128 nsize = xfrm_hash_new_size();
129 ndst = xfrm_hash_alloc(nsize);
130 if (!ndst)
131 goto out_unlock;
132 nsrc = xfrm_hash_alloc(nsize);
133 if (!nsrc) {
134 xfrm_hash_free(ndst, nsize);
135 goto out_unlock;
136 }
137 nspi = xfrm_hash_alloc(nsize);
138 if (!nspi) {
139 xfrm_hash_free(ndst, nsize);
140 xfrm_hash_free(nsrc, nsize);
141 goto out_unlock;
142 }
143
144 spin_lock_bh(&xfrm_state_lock);
145
146 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
147 for (i = xfrm_state_hmask; i >= 0; i--)
148 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
149 nhashmask);
150
151 odst = xfrm_state_bydst;
152 osrc = xfrm_state_bysrc;
153 ospi = xfrm_state_byspi;
154 ohashmask = xfrm_state_hmask;
155
156 xfrm_state_bydst = ndst;
157 xfrm_state_bysrc = nsrc;
158 xfrm_state_byspi = nspi;
159 xfrm_state_hmask = nhashmask;
160
161 spin_unlock_bh(&xfrm_state_lock);
162
163 osize = (ohashmask + 1) * sizeof(struct hlist_head);
164 xfrm_hash_free(odst, osize);
165 xfrm_hash_free(osrc, osize);
166 xfrm_hash_free(ospi, osize);
167
168 out_unlock:
169 mutex_unlock(&hash_resize_mutex);
170 }
171
172 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
173
174 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
175 EXPORT_SYMBOL(km_waitq);
176
177 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
178 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
179
180 static struct work_struct xfrm_state_gc_work;
181 static HLIST_HEAD(xfrm_state_gc_list);
182 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
183
184 int __xfrm_state_delete(struct xfrm_state *x);
185
186 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
187 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
188
189 static void xfrm_state_gc_destroy(struct xfrm_state *x)
190 {
191 del_timer_sync(&x->timer);
192 del_timer_sync(&x->rtimer);
193 kfree(x->aalg);
194 kfree(x->ealg);
195 kfree(x->calg);
196 kfree(x->encap);
197 kfree(x->coaddr);
198 if (x->mode)
199 xfrm_put_mode(x->mode);
200 if (x->type) {
201 x->type->destructor(x);
202 xfrm_put_type(x->type);
203 }
204 security_xfrm_state_free(x);
205 kfree(x);
206 }
207
208 static void xfrm_state_gc_task(struct work_struct *data)
209 {
210 struct xfrm_state *x;
211 struct hlist_node *entry, *tmp;
212 struct hlist_head gc_list;
213
214 spin_lock_bh(&xfrm_state_gc_lock);
215 gc_list.first = xfrm_state_gc_list.first;
216 INIT_HLIST_HEAD(&xfrm_state_gc_list);
217 spin_unlock_bh(&xfrm_state_gc_lock);
218
219 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
220 xfrm_state_gc_destroy(x);
221
222 wake_up(&km_waitq);
223 }
224
225 static inline unsigned long make_jiffies(long secs)
226 {
227 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
228 return MAX_SCHEDULE_TIMEOUT-1;
229 else
230 return secs*HZ;
231 }
232
233 static void xfrm_timer_handler(unsigned long data)
234 {
235 struct xfrm_state *x = (struct xfrm_state*)data;
236 unsigned long now = (unsigned long)xtime.tv_sec;
237 long next = LONG_MAX;
238 int warn = 0;
239 int err = 0;
240
241 spin_lock(&x->lock);
242 if (x->km.state == XFRM_STATE_DEAD)
243 goto out;
244 if (x->km.state == XFRM_STATE_EXPIRED)
245 goto expired;
246 if (x->lft.hard_add_expires_seconds) {
247 long tmo = x->lft.hard_add_expires_seconds +
248 x->curlft.add_time - now;
249 if (tmo <= 0)
250 goto expired;
251 if (tmo < next)
252 next = tmo;
253 }
254 if (x->lft.hard_use_expires_seconds) {
255 long tmo = x->lft.hard_use_expires_seconds +
256 (x->curlft.use_time ? : now) - now;
257 if (tmo <= 0)
258 goto expired;
259 if (tmo < next)
260 next = tmo;
261 }
262 if (x->km.dying)
263 goto resched;
264 if (x->lft.soft_add_expires_seconds) {
265 long tmo = x->lft.soft_add_expires_seconds +
266 x->curlft.add_time - now;
267 if (tmo <= 0)
268 warn = 1;
269 else if (tmo < next)
270 next = tmo;
271 }
272 if (x->lft.soft_use_expires_seconds) {
273 long tmo = x->lft.soft_use_expires_seconds +
274 (x->curlft.use_time ? : now) - now;
275 if (tmo <= 0)
276 warn = 1;
277 else if (tmo < next)
278 next = tmo;
279 }
280
281 x->km.dying = warn;
282 if (warn)
283 km_state_expired(x, 0, 0);
284 resched:
285 if (next != LONG_MAX)
286 mod_timer(&x->timer, jiffies + make_jiffies(next));
287
288 goto out;
289
290 expired:
291 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
292 x->km.state = XFRM_STATE_EXPIRED;
293 wake_up(&km_waitq);
294 next = 2;
295 goto resched;
296 }
297
298 err = __xfrm_state_delete(x);
299 if (!err && x->id.spi)
300 km_state_expired(x, 1, 0);
301
302 xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
303 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
304
305 out:
306 spin_unlock(&x->lock);
307 }
308
309 static void xfrm_replay_timer_handler(unsigned long data);
310
311 struct xfrm_state *xfrm_state_alloc(void)
312 {
313 struct xfrm_state *x;
314
315 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
316
317 if (x) {
318 atomic_set(&x->refcnt, 1);
319 atomic_set(&x->tunnel_users, 0);
320 INIT_HLIST_NODE(&x->bydst);
321 INIT_HLIST_NODE(&x->bysrc);
322 INIT_HLIST_NODE(&x->byspi);
323 init_timer(&x->timer);
324 x->timer.function = xfrm_timer_handler;
325 x->timer.data = (unsigned long)x;
326 init_timer(&x->rtimer);
327 x->rtimer.function = xfrm_replay_timer_handler;
328 x->rtimer.data = (unsigned long)x;
329 x->curlft.add_time = (unsigned long)xtime.tv_sec;
330 x->lft.soft_byte_limit = XFRM_INF;
331 x->lft.soft_packet_limit = XFRM_INF;
332 x->lft.hard_byte_limit = XFRM_INF;
333 x->lft.hard_packet_limit = XFRM_INF;
334 x->replay_maxage = 0;
335 x->replay_maxdiff = 0;
336 spin_lock_init(&x->lock);
337 }
338 return x;
339 }
340 EXPORT_SYMBOL(xfrm_state_alloc);
341
342 void __xfrm_state_destroy(struct xfrm_state *x)
343 {
344 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
345
346 spin_lock_bh(&xfrm_state_gc_lock);
347 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
348 spin_unlock_bh(&xfrm_state_gc_lock);
349 schedule_work(&xfrm_state_gc_work);
350 }
351 EXPORT_SYMBOL(__xfrm_state_destroy);
352
353 int __xfrm_state_delete(struct xfrm_state *x)
354 {
355 int err = -ESRCH;
356
357 if (x->km.state != XFRM_STATE_DEAD) {
358 x->km.state = XFRM_STATE_DEAD;
359 spin_lock(&xfrm_state_lock);
360 hlist_del(&x->bydst);
361 hlist_del(&x->bysrc);
362 if (x->id.spi)
363 hlist_del(&x->byspi);
364 xfrm_state_num--;
365 spin_unlock(&xfrm_state_lock);
366
367 /* All xfrm_state objects are created by xfrm_state_alloc.
368 * The xfrm_state_alloc call gives a reference, and that
369 * is what we are dropping here.
370 */
371 __xfrm_state_put(x);
372 err = 0;
373 }
374
375 return err;
376 }
377 EXPORT_SYMBOL(__xfrm_state_delete);
378
379 int xfrm_state_delete(struct xfrm_state *x)
380 {
381 int err;
382
383 spin_lock_bh(&x->lock);
384 err = __xfrm_state_delete(x);
385 spin_unlock_bh(&x->lock);
386
387 return err;
388 }
389 EXPORT_SYMBOL(xfrm_state_delete);
390
391 void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
392 {
393 int i;
394 int err = 0;
395
396 spin_lock_bh(&xfrm_state_lock);
397 for (i = 0; i <= xfrm_state_hmask; i++) {
398 struct hlist_node *entry;
399 struct xfrm_state *x;
400 restart:
401 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
402 if (!xfrm_state_kern(x) &&
403 xfrm_id_proto_match(x->id.proto, proto)) {
404 xfrm_state_hold(x);
405 spin_unlock_bh(&xfrm_state_lock);
406
407 err = xfrm_state_delete(x);
408 xfrm_audit_log(audit_info->loginuid,
409 audit_info->secid,
410 AUDIT_MAC_IPSEC_DELSA,
411 err ? 0 : 1, NULL, x);
412 xfrm_state_put(x);
413
414 spin_lock_bh(&xfrm_state_lock);
415 goto restart;
416 }
417 }
418 }
419 spin_unlock_bh(&xfrm_state_lock);
420 wake_up(&km_waitq);
421 }
422 EXPORT_SYMBOL(xfrm_state_flush);
423
424 static int
425 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
426 struct xfrm_tmpl *tmpl,
427 xfrm_address_t *daddr, xfrm_address_t *saddr,
428 unsigned short family)
429 {
430 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
431 if (!afinfo)
432 return -1;
433 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
434 xfrm_state_put_afinfo(afinfo);
435 return 0;
436 }
437
438 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
439 {
440 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
441 struct xfrm_state *x;
442 struct hlist_node *entry;
443
444 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
445 if (x->props.family != family ||
446 x->id.spi != spi ||
447 x->id.proto != proto)
448 continue;
449
450 switch (family) {
451 case AF_INET:
452 if (x->id.daddr.a4 != daddr->a4)
453 continue;
454 break;
455 case AF_INET6:
456 if (!ipv6_addr_equal((struct in6_addr *)daddr,
457 (struct in6_addr *)
458 x->id.daddr.a6))
459 continue;
460 break;
461 };
462
463 xfrm_state_hold(x);
464 return x;
465 }
466
467 return NULL;
468 }
469
470 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
471 {
472 unsigned int h = xfrm_src_hash(daddr, saddr, family);
473 struct xfrm_state *x;
474 struct hlist_node *entry;
475
476 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
477 if (x->props.family != family ||
478 x->id.proto != proto)
479 continue;
480
481 switch (family) {
482 case AF_INET:
483 if (x->id.daddr.a4 != daddr->a4 ||
484 x->props.saddr.a4 != saddr->a4)
485 continue;
486 break;
487 case AF_INET6:
488 if (!ipv6_addr_equal((struct in6_addr *)daddr,
489 (struct in6_addr *)
490 x->id.daddr.a6) ||
491 !ipv6_addr_equal((struct in6_addr *)saddr,
492 (struct in6_addr *)
493 x->props.saddr.a6))
494 continue;
495 break;
496 };
497
498 xfrm_state_hold(x);
499 return x;
500 }
501
502 return NULL;
503 }
504
505 static inline struct xfrm_state *
506 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
507 {
508 if (use_spi)
509 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
510 x->id.proto, family);
511 else
512 return __xfrm_state_lookup_byaddr(&x->id.daddr,
513 &x->props.saddr,
514 x->id.proto, family);
515 }
516
517 static void xfrm_hash_grow_check(int have_hash_collision)
518 {
519 if (have_hash_collision &&
520 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
521 xfrm_state_num > xfrm_state_hmask)
522 schedule_work(&xfrm_hash_work);
523 }
524
525 struct xfrm_state *
526 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
527 struct flowi *fl, struct xfrm_tmpl *tmpl,
528 struct xfrm_policy *pol, int *err,
529 unsigned short family)
530 {
531 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
532 struct hlist_node *entry;
533 struct xfrm_state *x, *x0;
534 int acquire_in_progress = 0;
535 int error = 0;
536 struct xfrm_state *best = NULL;
537
538 spin_lock_bh(&xfrm_state_lock);
539 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
540 if (x->props.family == family &&
541 x->props.reqid == tmpl->reqid &&
542 !(x->props.flags & XFRM_STATE_WILDRECV) &&
543 xfrm_state_addr_check(x, daddr, saddr, family) &&
544 tmpl->mode == x->props.mode &&
545 tmpl->id.proto == x->id.proto &&
546 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
547 /* Resolution logic:
548 1. There is a valid state with matching selector.
549 Done.
550 2. Valid state with inappropriate selector. Skip.
551
552 Entering area of "sysdeps".
553
554 3. If state is not valid, selector is temporary,
555 it selects only session which triggered
556 previous resolution. Key manager will do
557 something to install a state with proper
558 selector.
559 */
560 if (x->km.state == XFRM_STATE_VALID) {
561 if (!xfrm_selector_match(&x->sel, fl, family) ||
562 !security_xfrm_state_pol_flow_match(x, pol, fl))
563 continue;
564 if (!best ||
565 best->km.dying > x->km.dying ||
566 (best->km.dying == x->km.dying &&
567 best->curlft.add_time < x->curlft.add_time))
568 best = x;
569 } else if (x->km.state == XFRM_STATE_ACQ) {
570 acquire_in_progress = 1;
571 } else if (x->km.state == XFRM_STATE_ERROR ||
572 x->km.state == XFRM_STATE_EXPIRED) {
573 if (xfrm_selector_match(&x->sel, fl, family) &&
574 security_xfrm_state_pol_flow_match(x, pol, fl))
575 error = -ESRCH;
576 }
577 }
578 }
579
580 x = best;
581 if (!x && !error && !acquire_in_progress) {
582 if (tmpl->id.spi &&
583 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
584 tmpl->id.proto, family)) != NULL) {
585 xfrm_state_put(x0);
586 error = -EEXIST;
587 goto out;
588 }
589 x = xfrm_state_alloc();
590 if (x == NULL) {
591 error = -ENOMEM;
592 goto out;
593 }
594 /* Initialize temporary selector matching only
595 * to current session. */
596 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
597
598 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
599 if (error) {
600 x->km.state = XFRM_STATE_DEAD;
601 xfrm_state_put(x);
602 x = NULL;
603 goto out;
604 }
605
606 if (km_query(x, tmpl, pol) == 0) {
607 x->km.state = XFRM_STATE_ACQ;
608 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
609 h = xfrm_src_hash(daddr, saddr, family);
610 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
611 if (x->id.spi) {
612 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
613 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
614 }
615 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
616 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
617 add_timer(&x->timer);
618 xfrm_state_num++;
619 xfrm_hash_grow_check(x->bydst.next != NULL);
620 } else {
621 x->km.state = XFRM_STATE_DEAD;
622 xfrm_state_put(x);
623 x = NULL;
624 error = -ESRCH;
625 }
626 }
627 out:
628 if (x)
629 xfrm_state_hold(x);
630 else
631 *err = acquire_in_progress ? -EAGAIN : error;
632 spin_unlock_bh(&xfrm_state_lock);
633 return x;
634 }
635
636 static void __xfrm_state_insert(struct xfrm_state *x)
637 {
638 unsigned int h;
639
640 x->genid = ++xfrm_state_genid;
641
642 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
643 x->props.reqid, x->props.family);
644 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
645
646 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
647 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
648
649 if (x->id.spi) {
650 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
651 x->props.family);
652
653 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
654 }
655
656 mod_timer(&x->timer, jiffies + HZ);
657 if (x->replay_maxage)
658 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
659
660 wake_up(&km_waitq);
661
662 xfrm_state_num++;
663
664 xfrm_hash_grow_check(x->bydst.next != NULL);
665 }
666
667 /* xfrm_state_lock is held */
668 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
669 {
670 unsigned short family = xnew->props.family;
671 u32 reqid = xnew->props.reqid;
672 struct xfrm_state *x;
673 struct hlist_node *entry;
674 unsigned int h;
675
676 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
677 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
678 if (x->props.family == family &&
679 x->props.reqid == reqid &&
680 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
681 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
682 x->genid = xfrm_state_genid;
683 }
684 }
685
686 void xfrm_state_insert(struct xfrm_state *x)
687 {
688 spin_lock_bh(&xfrm_state_lock);
689 __xfrm_state_bump_genids(x);
690 __xfrm_state_insert(x);
691 spin_unlock_bh(&xfrm_state_lock);
692 }
693 EXPORT_SYMBOL(xfrm_state_insert);
694
695 /* xfrm_state_lock is held */
696 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
697 {
698 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
699 struct hlist_node *entry;
700 struct xfrm_state *x;
701
702 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
703 if (x->props.reqid != reqid ||
704 x->props.mode != mode ||
705 x->props.family != family ||
706 x->km.state != XFRM_STATE_ACQ ||
707 x->id.spi != 0)
708 continue;
709
710 switch (family) {
711 case AF_INET:
712 if (x->id.daddr.a4 != daddr->a4 ||
713 x->props.saddr.a4 != saddr->a4)
714 continue;
715 break;
716 case AF_INET6:
717 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
718 (struct in6_addr *)daddr) ||
719 !ipv6_addr_equal((struct in6_addr *)
720 x->props.saddr.a6,
721 (struct in6_addr *)saddr))
722 continue;
723 break;
724 };
725
726 xfrm_state_hold(x);
727 return x;
728 }
729
730 if (!create)
731 return NULL;
732
733 x = xfrm_state_alloc();
734 if (likely(x)) {
735 switch (family) {
736 case AF_INET:
737 x->sel.daddr.a4 = daddr->a4;
738 x->sel.saddr.a4 = saddr->a4;
739 x->sel.prefixlen_d = 32;
740 x->sel.prefixlen_s = 32;
741 x->props.saddr.a4 = saddr->a4;
742 x->id.daddr.a4 = daddr->a4;
743 break;
744
745 case AF_INET6:
746 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
747 (struct in6_addr *)daddr);
748 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
749 (struct in6_addr *)saddr);
750 x->sel.prefixlen_d = 128;
751 x->sel.prefixlen_s = 128;
752 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
753 (struct in6_addr *)saddr);
754 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
755 (struct in6_addr *)daddr);
756 break;
757 };
758
759 x->km.state = XFRM_STATE_ACQ;
760 x->id.proto = proto;
761 x->props.family = family;
762 x->props.mode = mode;
763 x->props.reqid = reqid;
764 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
765 xfrm_state_hold(x);
766 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
767 add_timer(&x->timer);
768 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
769 h = xfrm_src_hash(daddr, saddr, family);
770 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
771 wake_up(&km_waitq);
772
773 xfrm_state_num++;
774
775 xfrm_hash_grow_check(x->bydst.next != NULL);
776 }
777
778 return x;
779 }
780
781 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
782
783 int xfrm_state_add(struct xfrm_state *x)
784 {
785 struct xfrm_state *x1;
786 int family;
787 int err;
788 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
789
790 family = x->props.family;
791
792 spin_lock_bh(&xfrm_state_lock);
793
794 x1 = __xfrm_state_locate(x, use_spi, family);
795 if (x1) {
796 xfrm_state_put(x1);
797 x1 = NULL;
798 err = -EEXIST;
799 goto out;
800 }
801
802 if (use_spi && x->km.seq) {
803 x1 = __xfrm_find_acq_byseq(x->km.seq);
804 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
805 xfrm_state_put(x1);
806 x1 = NULL;
807 }
808 }
809
810 if (use_spi && !x1)
811 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
812 x->id.proto,
813 &x->id.daddr, &x->props.saddr, 0);
814
815 __xfrm_state_bump_genids(x);
816 __xfrm_state_insert(x);
817 err = 0;
818
819 out:
820 spin_unlock_bh(&xfrm_state_lock);
821
822 if (x1) {
823 xfrm_state_delete(x1);
824 xfrm_state_put(x1);
825 }
826
827 return err;
828 }
829 EXPORT_SYMBOL(xfrm_state_add);
830
831 #ifdef CONFIG_XFRM_MIGRATE
832 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
833 {
834 int err = -ENOMEM;
835 struct xfrm_state *x = xfrm_state_alloc();
836 if (!x)
837 goto error;
838
839 memcpy(&x->id, &orig->id, sizeof(x->id));
840 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
841 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
842 x->props.mode = orig->props.mode;
843 x->props.replay_window = orig->props.replay_window;
844 x->props.reqid = orig->props.reqid;
845 x->props.family = orig->props.family;
846 x->props.saddr = orig->props.saddr;
847
848 if (orig->aalg) {
849 x->aalg = xfrm_algo_clone(orig->aalg);
850 if (!x->aalg)
851 goto error;
852 }
853 x->props.aalgo = orig->props.aalgo;
854
855 if (orig->ealg) {
856 x->ealg = xfrm_algo_clone(orig->ealg);
857 if (!x->ealg)
858 goto error;
859 }
860 x->props.ealgo = orig->props.ealgo;
861
862 if (orig->calg) {
863 x->calg = xfrm_algo_clone(orig->calg);
864 if (!x->calg)
865 goto error;
866 }
867 x->props.calgo = orig->props.calgo;
868
869 if (orig->encap) {
870 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
871 if (!x->encap)
872 goto error;
873 }
874
875 if (orig->coaddr) {
876 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
877 GFP_KERNEL);
878 if (!x->coaddr)
879 goto error;
880 }
881
882 err = xfrm_init_state(x);
883 if (err)
884 goto error;
885
886 x->props.flags = orig->props.flags;
887
888 x->curlft.add_time = orig->curlft.add_time;
889 x->km.state = orig->km.state;
890 x->km.seq = orig->km.seq;
891
892 return x;
893
894 error:
895 if (errp)
896 *errp = err;
897 if (x) {
898 kfree(x->aalg);
899 kfree(x->ealg);
900 kfree(x->calg);
901 kfree(x->encap);
902 kfree(x->coaddr);
903 }
904 kfree(x);
905 return NULL;
906 }
907 EXPORT_SYMBOL(xfrm_state_clone);
908
909 /* xfrm_state_lock is held */
910 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
911 {
912 unsigned int h;
913 struct xfrm_state *x;
914 struct hlist_node *entry;
915
916 if (m->reqid) {
917 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
918 m->reqid, m->old_family);
919 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
920 if (x->props.mode != m->mode ||
921 x->id.proto != m->proto)
922 continue;
923 if (m->reqid && x->props.reqid != m->reqid)
924 continue;
925 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
926 m->old_family) ||
927 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
928 m->old_family))
929 continue;
930 xfrm_state_hold(x);
931 return x;
932 }
933 } else {
934 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
935 m->old_family);
936 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
937 if (x->props.mode != m->mode ||
938 x->id.proto != m->proto)
939 continue;
940 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
941 m->old_family) ||
942 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
943 m->old_family))
944 continue;
945 xfrm_state_hold(x);
946 return x;
947 }
948 }
949
950 return NULL;
951 }
952 EXPORT_SYMBOL(xfrm_migrate_state_find);
953
954 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
955 struct xfrm_migrate *m)
956 {
957 struct xfrm_state *xc;
958 int err;
959
960 xc = xfrm_state_clone(x, &err);
961 if (!xc)
962 return NULL;
963
964 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
965 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
966
967 /* add state */
968 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
969 /* a care is needed when the destination address of the
970 state is to be updated as it is a part of triplet */
971 xfrm_state_insert(xc);
972 } else {
973 if ((err = xfrm_state_add(xc)) < 0)
974 goto error;
975 }
976
977 return xc;
978 error:
979 kfree(xc);
980 return NULL;
981 }
982 EXPORT_SYMBOL(xfrm_state_migrate);
983 #endif
984
985 int xfrm_state_update(struct xfrm_state *x)
986 {
987 struct xfrm_state *x1;
988 int err;
989 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
990
991 spin_lock_bh(&xfrm_state_lock);
992 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
993
994 err = -ESRCH;
995 if (!x1)
996 goto out;
997
998 if (xfrm_state_kern(x1)) {
999 xfrm_state_put(x1);
1000 err = -EEXIST;
1001 goto out;
1002 }
1003
1004 if (x1->km.state == XFRM_STATE_ACQ) {
1005 __xfrm_state_insert(x);
1006 x = NULL;
1007 }
1008 err = 0;
1009
1010 out:
1011 spin_unlock_bh(&xfrm_state_lock);
1012
1013 if (err)
1014 return err;
1015
1016 if (!x) {
1017 xfrm_state_delete(x1);
1018 xfrm_state_put(x1);
1019 return 0;
1020 }
1021
1022 err = -EINVAL;
1023 spin_lock_bh(&x1->lock);
1024 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1025 if (x->encap && x1->encap)
1026 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1027 if (x->coaddr && x1->coaddr) {
1028 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1029 }
1030 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1031 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1032 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1033 x1->km.dying = 0;
1034
1035 mod_timer(&x1->timer, jiffies + HZ);
1036 if (x1->curlft.use_time)
1037 xfrm_state_check_expire(x1);
1038
1039 err = 0;
1040 }
1041 spin_unlock_bh(&x1->lock);
1042
1043 xfrm_state_put(x1);
1044
1045 return err;
1046 }
1047 EXPORT_SYMBOL(xfrm_state_update);
1048
1049 int xfrm_state_check_expire(struct xfrm_state *x)
1050 {
1051 if (!x->curlft.use_time)
1052 x->curlft.use_time = (unsigned long)xtime.tv_sec;
1053
1054 if (x->km.state != XFRM_STATE_VALID)
1055 return -EINVAL;
1056
1057 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1058 x->curlft.packets >= x->lft.hard_packet_limit) {
1059 x->km.state = XFRM_STATE_EXPIRED;
1060 mod_timer(&x->timer, jiffies);
1061 return -EINVAL;
1062 }
1063
1064 if (!x->km.dying &&
1065 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1066 x->curlft.packets >= x->lft.soft_packet_limit)) {
1067 x->km.dying = 1;
1068 km_state_expired(x, 0, 0);
1069 }
1070 return 0;
1071 }
1072 EXPORT_SYMBOL(xfrm_state_check_expire);
1073
1074 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
1075 {
1076 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
1077 - skb_headroom(skb);
1078
1079 if (nhead > 0)
1080 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
1081
1082 /* Check tail too... */
1083 return 0;
1084 }
1085
1086 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
1087 {
1088 int err = xfrm_state_check_expire(x);
1089 if (err < 0)
1090 goto err;
1091 err = xfrm_state_check_space(x, skb);
1092 err:
1093 return err;
1094 }
1095 EXPORT_SYMBOL(xfrm_state_check);
1096
1097 struct xfrm_state *
1098 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1099 unsigned short family)
1100 {
1101 struct xfrm_state *x;
1102
1103 spin_lock_bh(&xfrm_state_lock);
1104 x = __xfrm_state_lookup(daddr, spi, proto, family);
1105 spin_unlock_bh(&xfrm_state_lock);
1106 return x;
1107 }
1108 EXPORT_SYMBOL(xfrm_state_lookup);
1109
1110 struct xfrm_state *
1111 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1112 u8 proto, unsigned short family)
1113 {
1114 struct xfrm_state *x;
1115
1116 spin_lock_bh(&xfrm_state_lock);
1117 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1118 spin_unlock_bh(&xfrm_state_lock);
1119 return x;
1120 }
1121 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1122
1123 struct xfrm_state *
1124 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1125 xfrm_address_t *daddr, xfrm_address_t *saddr,
1126 int create, unsigned short family)
1127 {
1128 struct xfrm_state *x;
1129
1130 spin_lock_bh(&xfrm_state_lock);
1131 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1132 spin_unlock_bh(&xfrm_state_lock);
1133
1134 return x;
1135 }
1136 EXPORT_SYMBOL(xfrm_find_acq);
1137
1138 #ifdef CONFIG_XFRM_SUB_POLICY
1139 int
1140 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1141 unsigned short family)
1142 {
1143 int err = 0;
1144 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1145 if (!afinfo)
1146 return -EAFNOSUPPORT;
1147
1148 spin_lock_bh(&xfrm_state_lock);
1149 if (afinfo->tmpl_sort)
1150 err = afinfo->tmpl_sort(dst, src, n);
1151 spin_unlock_bh(&xfrm_state_lock);
1152 xfrm_state_put_afinfo(afinfo);
1153 return err;
1154 }
1155 EXPORT_SYMBOL(xfrm_tmpl_sort);
1156
1157 int
1158 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1159 unsigned short family)
1160 {
1161 int err = 0;
1162 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1163 if (!afinfo)
1164 return -EAFNOSUPPORT;
1165
1166 spin_lock_bh(&xfrm_state_lock);
1167 if (afinfo->state_sort)
1168 err = afinfo->state_sort(dst, src, n);
1169 spin_unlock_bh(&xfrm_state_lock);
1170 xfrm_state_put_afinfo(afinfo);
1171 return err;
1172 }
1173 EXPORT_SYMBOL(xfrm_state_sort);
1174 #endif
1175
1176 /* Silly enough, but I'm lazy to build resolution list */
1177
1178 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1179 {
1180 int i;
1181
1182 for (i = 0; i <= xfrm_state_hmask; i++) {
1183 struct hlist_node *entry;
1184 struct xfrm_state *x;
1185
1186 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1187 if (x->km.seq == seq &&
1188 x->km.state == XFRM_STATE_ACQ) {
1189 xfrm_state_hold(x);
1190 return x;
1191 }
1192 }
1193 }
1194 return NULL;
1195 }
1196
1197 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1198 {
1199 struct xfrm_state *x;
1200
1201 spin_lock_bh(&xfrm_state_lock);
1202 x = __xfrm_find_acq_byseq(seq);
1203 spin_unlock_bh(&xfrm_state_lock);
1204 return x;
1205 }
1206 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1207
1208 u32 xfrm_get_acqseq(void)
1209 {
1210 u32 res;
1211 static u32 acqseq;
1212 static DEFINE_SPINLOCK(acqseq_lock);
1213
1214 spin_lock_bh(&acqseq_lock);
1215 res = (++acqseq ? : ++acqseq);
1216 spin_unlock_bh(&acqseq_lock);
1217 return res;
1218 }
1219 EXPORT_SYMBOL(xfrm_get_acqseq);
1220
1221 void
1222 xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1223 {
1224 unsigned int h;
1225 struct xfrm_state *x0;
1226
1227 if (x->id.spi)
1228 return;
1229
1230 if (minspi == maxspi) {
1231 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1232 if (x0) {
1233 xfrm_state_put(x0);
1234 return;
1235 }
1236 x->id.spi = minspi;
1237 } else {
1238 u32 spi = 0;
1239 u32 low = ntohl(minspi);
1240 u32 high = ntohl(maxspi);
1241 for (h=0; h<high-low+1; h++) {
1242 spi = low + net_random()%(high-low+1);
1243 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1244 if (x0 == NULL) {
1245 x->id.spi = htonl(spi);
1246 break;
1247 }
1248 xfrm_state_put(x0);
1249 }
1250 }
1251 if (x->id.spi) {
1252 spin_lock_bh(&xfrm_state_lock);
1253 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1254 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1255 spin_unlock_bh(&xfrm_state_lock);
1256 wake_up(&km_waitq);
1257 }
1258 }
1259 EXPORT_SYMBOL(xfrm_alloc_spi);
1260
1261 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1262 void *data)
1263 {
1264 int i;
1265 struct xfrm_state *x, *last = NULL;
1266 struct hlist_node *entry;
1267 int count = 0;
1268 int err = 0;
1269
1270 spin_lock_bh(&xfrm_state_lock);
1271 for (i = 0; i <= xfrm_state_hmask; i++) {
1272 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1273 if (!xfrm_id_proto_match(x->id.proto, proto))
1274 continue;
1275 if (last) {
1276 err = func(last, count, data);
1277 if (err)
1278 goto out;
1279 }
1280 last = x;
1281 count++;
1282 }
1283 }
1284 if (count == 0) {
1285 err = -ENOENT;
1286 goto out;
1287 }
1288 err = func(last, 0, data);
1289 out:
1290 spin_unlock_bh(&xfrm_state_lock);
1291 return err;
1292 }
1293 EXPORT_SYMBOL(xfrm_state_walk);
1294
1295
1296 void xfrm_replay_notify(struct xfrm_state *x, int event)
1297 {
1298 struct km_event c;
1299 /* we send notify messages in case
1300 * 1. we updated on of the sequence numbers, and the seqno difference
1301 * is at least x->replay_maxdiff, in this case we also update the
1302 * timeout of our timer function
1303 * 2. if x->replay_maxage has elapsed since last update,
1304 * and there were changes
1305 *
1306 * The state structure must be locked!
1307 */
1308
1309 switch (event) {
1310 case XFRM_REPLAY_UPDATE:
1311 if (x->replay_maxdiff &&
1312 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1313 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1314 if (x->xflags & XFRM_TIME_DEFER)
1315 event = XFRM_REPLAY_TIMEOUT;
1316 else
1317 return;
1318 }
1319
1320 break;
1321
1322 case XFRM_REPLAY_TIMEOUT:
1323 if ((x->replay.seq == x->preplay.seq) &&
1324 (x->replay.bitmap == x->preplay.bitmap) &&
1325 (x->replay.oseq == x->preplay.oseq)) {
1326 x->xflags |= XFRM_TIME_DEFER;
1327 return;
1328 }
1329
1330 break;
1331 }
1332
1333 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1334 c.event = XFRM_MSG_NEWAE;
1335 c.data.aevent = event;
1336 km_state_notify(x, &c);
1337
1338 if (x->replay_maxage &&
1339 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1340 x->xflags &= ~XFRM_TIME_DEFER;
1341 }
1342 EXPORT_SYMBOL(xfrm_replay_notify);
1343
1344 static void xfrm_replay_timer_handler(unsigned long data)
1345 {
1346 struct xfrm_state *x = (struct xfrm_state*)data;
1347
1348 spin_lock(&x->lock);
1349
1350 if (x->km.state == XFRM_STATE_VALID) {
1351 if (xfrm_aevent_is_on())
1352 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1353 else
1354 x->xflags |= XFRM_TIME_DEFER;
1355 }
1356
1357 spin_unlock(&x->lock);
1358 }
1359
1360 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1361 {
1362 u32 diff;
1363 u32 seq = ntohl(net_seq);
1364
1365 if (unlikely(seq == 0))
1366 return -EINVAL;
1367
1368 if (likely(seq > x->replay.seq))
1369 return 0;
1370
1371 diff = x->replay.seq - seq;
1372 if (diff >= x->props.replay_window) {
1373 x->stats.replay_window++;
1374 return -EINVAL;
1375 }
1376
1377 if (x->replay.bitmap & (1U << diff)) {
1378 x->stats.replay++;
1379 return -EINVAL;
1380 }
1381 return 0;
1382 }
1383 EXPORT_SYMBOL(xfrm_replay_check);
1384
1385 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1386 {
1387 u32 diff;
1388 u32 seq = ntohl(net_seq);
1389
1390 if (seq > x->replay.seq) {
1391 diff = seq - x->replay.seq;
1392 if (diff < x->props.replay_window)
1393 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1394 else
1395 x->replay.bitmap = 1;
1396 x->replay.seq = seq;
1397 } else {
1398 diff = x->replay.seq - seq;
1399 x->replay.bitmap |= (1U << diff);
1400 }
1401
1402 if (xfrm_aevent_is_on())
1403 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1404 }
1405 EXPORT_SYMBOL(xfrm_replay_advance);
1406
1407 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1408 static DEFINE_RWLOCK(xfrm_km_lock);
1409
1410 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1411 {
1412 struct xfrm_mgr *km;
1413
1414 read_lock(&xfrm_km_lock);
1415 list_for_each_entry(km, &xfrm_km_list, list)
1416 if (km->notify_policy)
1417 km->notify_policy(xp, dir, c);
1418 read_unlock(&xfrm_km_lock);
1419 }
1420
1421 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1422 {
1423 struct xfrm_mgr *km;
1424 read_lock(&xfrm_km_lock);
1425 list_for_each_entry(km, &xfrm_km_list, list)
1426 if (km->notify)
1427 km->notify(x, c);
1428 read_unlock(&xfrm_km_lock);
1429 }
1430
1431 EXPORT_SYMBOL(km_policy_notify);
1432 EXPORT_SYMBOL(km_state_notify);
1433
1434 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1435 {
1436 struct km_event c;
1437
1438 c.data.hard = hard;
1439 c.pid = pid;
1440 c.event = XFRM_MSG_EXPIRE;
1441 km_state_notify(x, &c);
1442
1443 if (hard)
1444 wake_up(&km_waitq);
1445 }
1446
1447 EXPORT_SYMBOL(km_state_expired);
1448 /*
1449 * We send to all registered managers regardless of failure
1450 * We are happy with one success
1451 */
1452 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1453 {
1454 int err = -EINVAL, acqret;
1455 struct xfrm_mgr *km;
1456
1457 read_lock(&xfrm_km_lock);
1458 list_for_each_entry(km, &xfrm_km_list, list) {
1459 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1460 if (!acqret)
1461 err = acqret;
1462 }
1463 read_unlock(&xfrm_km_lock);
1464 return err;
1465 }
1466 EXPORT_SYMBOL(km_query);
1467
1468 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1469 {
1470 int err = -EINVAL;
1471 struct xfrm_mgr *km;
1472
1473 read_lock(&xfrm_km_lock);
1474 list_for_each_entry(km, &xfrm_km_list, list) {
1475 if (km->new_mapping)
1476 err = km->new_mapping(x, ipaddr, sport);
1477 if (!err)
1478 break;
1479 }
1480 read_unlock(&xfrm_km_lock);
1481 return err;
1482 }
1483 EXPORT_SYMBOL(km_new_mapping);
1484
1485 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1486 {
1487 struct km_event c;
1488
1489 c.data.hard = hard;
1490 c.pid = pid;
1491 c.event = XFRM_MSG_POLEXPIRE;
1492 km_policy_notify(pol, dir, &c);
1493
1494 if (hard)
1495 wake_up(&km_waitq);
1496 }
1497 EXPORT_SYMBOL(km_policy_expired);
1498
1499 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1500 struct xfrm_migrate *m, int num_migrate)
1501 {
1502 int err = -EINVAL;
1503 int ret;
1504 struct xfrm_mgr *km;
1505
1506 read_lock(&xfrm_km_lock);
1507 list_for_each_entry(km, &xfrm_km_list, list) {
1508 if (km->migrate) {
1509 ret = km->migrate(sel, dir, type, m, num_migrate);
1510 if (!ret)
1511 err = ret;
1512 }
1513 }
1514 read_unlock(&xfrm_km_lock);
1515 return err;
1516 }
1517 EXPORT_SYMBOL(km_migrate);
1518
1519 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1520 {
1521 int err = -EINVAL;
1522 int ret;
1523 struct xfrm_mgr *km;
1524
1525 read_lock(&xfrm_km_lock);
1526 list_for_each_entry(km, &xfrm_km_list, list) {
1527 if (km->report) {
1528 ret = km->report(proto, sel, addr);
1529 if (!ret)
1530 err = ret;
1531 }
1532 }
1533 read_unlock(&xfrm_km_lock);
1534 return err;
1535 }
1536 EXPORT_SYMBOL(km_report);
1537
1538 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1539 {
1540 int err;
1541 u8 *data;
1542 struct xfrm_mgr *km;
1543 struct xfrm_policy *pol = NULL;
1544
1545 if (optlen <= 0 || optlen > PAGE_SIZE)
1546 return -EMSGSIZE;
1547
1548 data = kmalloc(optlen, GFP_KERNEL);
1549 if (!data)
1550 return -ENOMEM;
1551
1552 err = -EFAULT;
1553 if (copy_from_user(data, optval, optlen))
1554 goto out;
1555
1556 err = -EINVAL;
1557 read_lock(&xfrm_km_lock);
1558 list_for_each_entry(km, &xfrm_km_list, list) {
1559 pol = km->compile_policy(sk, optname, data,
1560 optlen, &err);
1561 if (err >= 0)
1562 break;
1563 }
1564 read_unlock(&xfrm_km_lock);
1565
1566 if (err >= 0) {
1567 xfrm_sk_policy_insert(sk, err, pol);
1568 xfrm_pol_put(pol);
1569 err = 0;
1570 }
1571
1572 out:
1573 kfree(data);
1574 return err;
1575 }
1576 EXPORT_SYMBOL(xfrm_user_policy);
1577
1578 int xfrm_register_km(struct xfrm_mgr *km)
1579 {
1580 write_lock_bh(&xfrm_km_lock);
1581 list_add_tail(&km->list, &xfrm_km_list);
1582 write_unlock_bh(&xfrm_km_lock);
1583 return 0;
1584 }
1585 EXPORT_SYMBOL(xfrm_register_km);
1586
1587 int xfrm_unregister_km(struct xfrm_mgr *km)
1588 {
1589 write_lock_bh(&xfrm_km_lock);
1590 list_del(&km->list);
1591 write_unlock_bh(&xfrm_km_lock);
1592 return 0;
1593 }
1594 EXPORT_SYMBOL(xfrm_unregister_km);
1595
1596 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1597 {
1598 int err = 0;
1599 if (unlikely(afinfo == NULL))
1600 return -EINVAL;
1601 if (unlikely(afinfo->family >= NPROTO))
1602 return -EAFNOSUPPORT;
1603 write_lock_bh(&xfrm_state_afinfo_lock);
1604 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1605 err = -ENOBUFS;
1606 else
1607 xfrm_state_afinfo[afinfo->family] = afinfo;
1608 write_unlock_bh(&xfrm_state_afinfo_lock);
1609 return err;
1610 }
1611 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1612
1613 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1614 {
1615 int err = 0;
1616 if (unlikely(afinfo == NULL))
1617 return -EINVAL;
1618 if (unlikely(afinfo->family >= NPROTO))
1619 return -EAFNOSUPPORT;
1620 write_lock_bh(&xfrm_state_afinfo_lock);
1621 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1622 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1623 err = -EINVAL;
1624 else
1625 xfrm_state_afinfo[afinfo->family] = NULL;
1626 }
1627 write_unlock_bh(&xfrm_state_afinfo_lock);
1628 return err;
1629 }
1630 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1631
1632 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1633 {
1634 struct xfrm_state_afinfo *afinfo;
1635 if (unlikely(family >= NPROTO))
1636 return NULL;
1637 read_lock(&xfrm_state_afinfo_lock);
1638 afinfo = xfrm_state_afinfo[family];
1639 if (unlikely(!afinfo))
1640 read_unlock(&xfrm_state_afinfo_lock);
1641 return afinfo;
1642 }
1643
1644 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1645 {
1646 read_unlock(&xfrm_state_afinfo_lock);
1647 }
1648
1649 EXPORT_SYMBOL(xfrm_state_get_afinfo);
1650 EXPORT_SYMBOL(xfrm_state_put_afinfo);
1651
1652 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1653 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1654 {
1655 if (x->tunnel) {
1656 struct xfrm_state *t = x->tunnel;
1657
1658 if (atomic_read(&t->tunnel_users) == 2)
1659 xfrm_state_delete(t);
1660 atomic_dec(&t->tunnel_users);
1661 xfrm_state_put(t);
1662 x->tunnel = NULL;
1663 }
1664 }
1665 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1666
1667 /*
1668 * This function is NOT optimal. For example, with ESP it will give an
1669 * MTU that's usually two bytes short of being optimal. However, it will
1670 * usually give an answer that's a multiple of 4 provided the input is
1671 * also a multiple of 4.
1672 */
1673 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1674 {
1675 int res = mtu;
1676
1677 res -= x->props.header_len;
1678
1679 for (;;) {
1680 int m = res;
1681
1682 if (m < 68)
1683 return 68;
1684
1685 spin_lock_bh(&x->lock);
1686 if (x->km.state == XFRM_STATE_VALID &&
1687 x->type && x->type->get_max_size)
1688 m = x->type->get_max_size(x, m);
1689 else
1690 m += x->props.header_len;
1691 spin_unlock_bh(&x->lock);
1692
1693 if (m <= mtu)
1694 break;
1695 res -= (m - mtu);
1696 }
1697
1698 return res;
1699 }
1700
1701 int xfrm_init_state(struct xfrm_state *x)
1702 {
1703 struct xfrm_state_afinfo *afinfo;
1704 int family = x->props.family;
1705 int err;
1706
1707 err = -EAFNOSUPPORT;
1708 afinfo = xfrm_state_get_afinfo(family);
1709 if (!afinfo)
1710 goto error;
1711
1712 err = 0;
1713 if (afinfo->init_flags)
1714 err = afinfo->init_flags(x);
1715
1716 xfrm_state_put_afinfo(afinfo);
1717
1718 if (err)
1719 goto error;
1720
1721 err = -EPROTONOSUPPORT;
1722 x->type = xfrm_get_type(x->id.proto, family);
1723 if (x->type == NULL)
1724 goto error;
1725
1726 err = x->type->init_state(x);
1727 if (err)
1728 goto error;
1729
1730 x->mode = xfrm_get_mode(x->props.mode, family);
1731 if (x->mode == NULL)
1732 goto error;
1733
1734 x->km.state = XFRM_STATE_VALID;
1735
1736 error:
1737 return err;
1738 }
1739
1740 EXPORT_SYMBOL(xfrm_init_state);
1741
1742 void __init xfrm_state_init(void)
1743 {
1744 unsigned int sz;
1745
1746 sz = sizeof(struct hlist_head) * 8;
1747
1748 xfrm_state_bydst = xfrm_hash_alloc(sz);
1749 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1750 xfrm_state_byspi = xfrm_hash_alloc(sz);
1751 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1752 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1753 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1754
1755 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1756 }
1757