]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/xfrm/xfrm_policy.c
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[mirror_ubuntu-focal-kernel.git] / net / xfrm / xfrm_policy.c
1 /*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/cpu.h>
28 #include <linux/audit.h>
29 #include <linux/rhashtable.h>
30 #include <linux/if_tunnel.h>
31 #include <net/dst.h>
32 #include <net/flow.h>
33 #include <net/xfrm.h>
34 #include <net/ip.h>
35 #if IS_ENABLED(CONFIG_IPV6_MIP6)
36 #include <net/mip6.h>
37 #endif
38 #ifdef CONFIG_XFRM_STATISTICS
39 #include <net/snmp.h>
40 #endif
41
42 #include "xfrm_hash.h"
43
44 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
45 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
46 #define XFRM_MAX_QUEUE_LEN 100
47
48 struct xfrm_flo {
49 struct dst_entry *dst_orig;
50 u8 flags;
51 };
52
53 /* prefixes smaller than this are stored in lists, not trees. */
54 #define INEXACT_PREFIXLEN_IPV4 16
55 #define INEXACT_PREFIXLEN_IPV6 48
56
57 struct xfrm_pol_inexact_node {
58 struct rb_node node;
59 union {
60 xfrm_address_t addr;
61 struct rcu_head rcu;
62 };
63 u8 prefixlen;
64
65 struct rb_root root;
66
67 /* the policies matching this node, can be empty list */
68 struct hlist_head hhead;
69 };
70
71 /* xfrm inexact policy search tree:
72 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
73 * |
74 * +---- root_d: sorted by daddr:prefix
75 * | |
76 * | xfrm_pol_inexact_node
77 * | |
78 * | +- root: sorted by saddr/prefix
79 * | | |
80 * | | xfrm_pol_inexact_node
81 * | | |
82 * | | + root: unused
83 * | | |
84 * | | + hhead: saddr:daddr policies
85 * | |
86 * | +- coarse policies and all any:daddr policies
87 * |
88 * +---- root_s: sorted by saddr:prefix
89 * | |
90 * | xfrm_pol_inexact_node
91 * | |
92 * | + root: unused
93 * | |
94 * | + hhead: saddr:any policies
95 * |
96 * +---- coarse policies and all any:any policies
97 *
98 * Lookups return four candidate lists:
99 * 1. any:any list from top-level xfrm_pol_inexact_bin
100 * 2. any:daddr list from daddr tree
101 * 3. saddr:daddr list from 2nd level daddr tree
102 * 4. saddr:any list from saddr tree
103 *
104 * This result set then needs to be searched for the policy with
105 * the lowest priority. If two results have same prio, youngest one wins.
106 */
107
108 struct xfrm_pol_inexact_key {
109 possible_net_t net;
110 u32 if_id;
111 u16 family;
112 u8 dir, type;
113 };
114
115 struct xfrm_pol_inexact_bin {
116 struct xfrm_pol_inexact_key k;
117 struct rhash_head head;
118 /* list containing '*:*' policies */
119 struct hlist_head hhead;
120
121 seqcount_t count;
122 /* tree sorted by daddr/prefix */
123 struct rb_root root_d;
124
125 /* tree sorted by saddr/prefix */
126 struct rb_root root_s;
127
128 /* slow path below */
129 struct list_head inexact_bins;
130 struct rcu_head rcu;
131 };
132
133 enum xfrm_pol_inexact_candidate_type {
134 XFRM_POL_CAND_BOTH,
135 XFRM_POL_CAND_SADDR,
136 XFRM_POL_CAND_DADDR,
137 XFRM_POL_CAND_ANY,
138
139 XFRM_POL_CAND_MAX,
140 };
141
142 struct xfrm_pol_inexact_candidates {
143 struct hlist_head *res[XFRM_POL_CAND_MAX];
144 };
145
146 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
147 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
148
149 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
150 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
151 __read_mostly;
152
153 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
154 static __read_mostly seqcount_t xfrm_policy_hash_generation;
155
156 static struct rhashtable xfrm_policy_inexact_table;
157 static const struct rhashtable_params xfrm_pol_inexact_params;
158
159 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
160 static int stale_bundle(struct dst_entry *dst);
161 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
162 static void xfrm_policy_queue_process(struct timer_list *t);
163
164 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
165 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
166 int dir);
167
168 static struct xfrm_pol_inexact_bin *
169 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
170 u32 if_id);
171
172 static struct xfrm_pol_inexact_bin *
173 xfrm_policy_inexact_lookup_rcu(struct net *net,
174 u8 type, u16 family, u8 dir, u32 if_id);
175 static struct xfrm_policy *
176 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
177 bool excl);
178 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
179 struct xfrm_policy *policy);
180
181 static bool
182 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
183 struct xfrm_pol_inexact_bin *b,
184 const xfrm_address_t *saddr,
185 const xfrm_address_t *daddr);
186
187 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
188 {
189 return refcount_inc_not_zero(&policy->refcnt);
190 }
191
192 static inline bool
193 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
194 {
195 const struct flowi4 *fl4 = &fl->u.ip4;
196
197 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
198 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
199 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
200 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
201 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
202 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
203 }
204
205 static inline bool
206 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
207 {
208 const struct flowi6 *fl6 = &fl->u.ip6;
209
210 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
211 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
212 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
213 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
214 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
215 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
216 }
217
218 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
219 unsigned short family)
220 {
221 switch (family) {
222 case AF_INET:
223 return __xfrm4_selector_match(sel, fl);
224 case AF_INET6:
225 return __xfrm6_selector_match(sel, fl);
226 }
227 return false;
228 }
229
230 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
231 {
232 const struct xfrm_policy_afinfo *afinfo;
233
234 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
235 return NULL;
236 rcu_read_lock();
237 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
238 if (unlikely(!afinfo))
239 rcu_read_unlock();
240 return afinfo;
241 }
242
243 /* Called with rcu_read_lock(). */
244 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
245 {
246 return rcu_dereference(xfrm_if_cb);
247 }
248
249 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
250 const xfrm_address_t *saddr,
251 const xfrm_address_t *daddr,
252 int family, u32 mark)
253 {
254 const struct xfrm_policy_afinfo *afinfo;
255 struct dst_entry *dst;
256
257 afinfo = xfrm_policy_get_afinfo(family);
258 if (unlikely(afinfo == NULL))
259 return ERR_PTR(-EAFNOSUPPORT);
260
261 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
262
263 rcu_read_unlock();
264
265 return dst;
266 }
267 EXPORT_SYMBOL(__xfrm_dst_lookup);
268
269 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
270 int tos, int oif,
271 xfrm_address_t *prev_saddr,
272 xfrm_address_t *prev_daddr,
273 int family, u32 mark)
274 {
275 struct net *net = xs_net(x);
276 xfrm_address_t *saddr = &x->props.saddr;
277 xfrm_address_t *daddr = &x->id.daddr;
278 struct dst_entry *dst;
279
280 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
281 saddr = x->coaddr;
282 daddr = prev_daddr;
283 }
284 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
285 saddr = prev_saddr;
286 daddr = x->coaddr;
287 }
288
289 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
290
291 if (!IS_ERR(dst)) {
292 if (prev_saddr != saddr)
293 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
294 if (prev_daddr != daddr)
295 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
296 }
297
298 return dst;
299 }
300
301 static inline unsigned long make_jiffies(long secs)
302 {
303 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
304 return MAX_SCHEDULE_TIMEOUT-1;
305 else
306 return secs*HZ;
307 }
308
309 static void xfrm_policy_timer(struct timer_list *t)
310 {
311 struct xfrm_policy *xp = from_timer(xp, t, timer);
312 time64_t now = ktime_get_real_seconds();
313 time64_t next = TIME64_MAX;
314 int warn = 0;
315 int dir;
316
317 read_lock(&xp->lock);
318
319 if (unlikely(xp->walk.dead))
320 goto out;
321
322 dir = xfrm_policy_id2dir(xp->index);
323
324 if (xp->lft.hard_add_expires_seconds) {
325 time64_t tmo = xp->lft.hard_add_expires_seconds +
326 xp->curlft.add_time - now;
327 if (tmo <= 0)
328 goto expired;
329 if (tmo < next)
330 next = tmo;
331 }
332 if (xp->lft.hard_use_expires_seconds) {
333 time64_t tmo = xp->lft.hard_use_expires_seconds +
334 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
335 if (tmo <= 0)
336 goto expired;
337 if (tmo < next)
338 next = tmo;
339 }
340 if (xp->lft.soft_add_expires_seconds) {
341 time64_t tmo = xp->lft.soft_add_expires_seconds +
342 xp->curlft.add_time - now;
343 if (tmo <= 0) {
344 warn = 1;
345 tmo = XFRM_KM_TIMEOUT;
346 }
347 if (tmo < next)
348 next = tmo;
349 }
350 if (xp->lft.soft_use_expires_seconds) {
351 time64_t tmo = xp->lft.soft_use_expires_seconds +
352 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
353 if (tmo <= 0) {
354 warn = 1;
355 tmo = XFRM_KM_TIMEOUT;
356 }
357 if (tmo < next)
358 next = tmo;
359 }
360
361 if (warn)
362 km_policy_expired(xp, dir, 0, 0);
363 if (next != TIME64_MAX &&
364 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
365 xfrm_pol_hold(xp);
366
367 out:
368 read_unlock(&xp->lock);
369 xfrm_pol_put(xp);
370 return;
371
372 expired:
373 read_unlock(&xp->lock);
374 if (!xfrm_policy_delete(xp, dir))
375 km_policy_expired(xp, dir, 1, 0);
376 xfrm_pol_put(xp);
377 }
378
379 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
380 * SPD calls.
381 */
382
383 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
384 {
385 struct xfrm_policy *policy;
386
387 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
388
389 if (policy) {
390 write_pnet(&policy->xp_net, net);
391 INIT_LIST_HEAD(&policy->walk.all);
392 INIT_HLIST_NODE(&policy->bydst_inexact_list);
393 INIT_HLIST_NODE(&policy->bydst);
394 INIT_HLIST_NODE(&policy->byidx);
395 rwlock_init(&policy->lock);
396 refcount_set(&policy->refcnt, 1);
397 skb_queue_head_init(&policy->polq.hold_queue);
398 timer_setup(&policy->timer, xfrm_policy_timer, 0);
399 timer_setup(&policy->polq.hold_timer,
400 xfrm_policy_queue_process, 0);
401 }
402 return policy;
403 }
404 EXPORT_SYMBOL(xfrm_policy_alloc);
405
406 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
407 {
408 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
409
410 security_xfrm_policy_free(policy->security);
411 kfree(policy);
412 }
413
414 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
415
416 void xfrm_policy_destroy(struct xfrm_policy *policy)
417 {
418 BUG_ON(!policy->walk.dead);
419
420 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
421 BUG();
422
423 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
424 }
425 EXPORT_SYMBOL(xfrm_policy_destroy);
426
427 /* Rule must be locked. Release descendant resources, announce
428 * entry dead. The rule must be unlinked from lists to the moment.
429 */
430
431 static void xfrm_policy_kill(struct xfrm_policy *policy)
432 {
433 policy->walk.dead = 1;
434
435 atomic_inc(&policy->genid);
436
437 if (del_timer(&policy->polq.hold_timer))
438 xfrm_pol_put(policy);
439 skb_queue_purge(&policy->polq.hold_queue);
440
441 if (del_timer(&policy->timer))
442 xfrm_pol_put(policy);
443
444 xfrm_pol_put(policy);
445 }
446
447 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
448
449 static inline unsigned int idx_hash(struct net *net, u32 index)
450 {
451 return __idx_hash(index, net->xfrm.policy_idx_hmask);
452 }
453
454 /* calculate policy hash thresholds */
455 static void __get_hash_thresh(struct net *net,
456 unsigned short family, int dir,
457 u8 *dbits, u8 *sbits)
458 {
459 switch (family) {
460 case AF_INET:
461 *dbits = net->xfrm.policy_bydst[dir].dbits4;
462 *sbits = net->xfrm.policy_bydst[dir].sbits4;
463 break;
464
465 case AF_INET6:
466 *dbits = net->xfrm.policy_bydst[dir].dbits6;
467 *sbits = net->xfrm.policy_bydst[dir].sbits6;
468 break;
469
470 default:
471 *dbits = 0;
472 *sbits = 0;
473 }
474 }
475
476 static struct hlist_head *policy_hash_bysel(struct net *net,
477 const struct xfrm_selector *sel,
478 unsigned short family, int dir)
479 {
480 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
481 unsigned int hash;
482 u8 dbits;
483 u8 sbits;
484
485 __get_hash_thresh(net, family, dir, &dbits, &sbits);
486 hash = __sel_hash(sel, family, hmask, dbits, sbits);
487
488 if (hash == hmask + 1)
489 return NULL;
490
491 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
492 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
493 }
494
495 static struct hlist_head *policy_hash_direct(struct net *net,
496 const xfrm_address_t *daddr,
497 const xfrm_address_t *saddr,
498 unsigned short family, int dir)
499 {
500 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
501 unsigned int hash;
502 u8 dbits;
503 u8 sbits;
504
505 __get_hash_thresh(net, family, dir, &dbits, &sbits);
506 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
507
508 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
509 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
510 }
511
512 static void xfrm_dst_hash_transfer(struct net *net,
513 struct hlist_head *list,
514 struct hlist_head *ndsttable,
515 unsigned int nhashmask,
516 int dir)
517 {
518 struct hlist_node *tmp, *entry0 = NULL;
519 struct xfrm_policy *pol;
520 unsigned int h0 = 0;
521 u8 dbits;
522 u8 sbits;
523
524 redo:
525 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
526 unsigned int h;
527
528 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
529 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
530 pol->family, nhashmask, dbits, sbits);
531 if (!entry0) {
532 hlist_del_rcu(&pol->bydst);
533 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
534 h0 = h;
535 } else {
536 if (h != h0)
537 continue;
538 hlist_del_rcu(&pol->bydst);
539 hlist_add_behind_rcu(&pol->bydst, entry0);
540 }
541 entry0 = &pol->bydst;
542 }
543 if (!hlist_empty(list)) {
544 entry0 = NULL;
545 goto redo;
546 }
547 }
548
549 static void xfrm_idx_hash_transfer(struct hlist_head *list,
550 struct hlist_head *nidxtable,
551 unsigned int nhashmask)
552 {
553 struct hlist_node *tmp;
554 struct xfrm_policy *pol;
555
556 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
557 unsigned int h;
558
559 h = __idx_hash(pol->index, nhashmask);
560 hlist_add_head(&pol->byidx, nidxtable+h);
561 }
562 }
563
564 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
565 {
566 return ((old_hmask + 1) << 1) - 1;
567 }
568
569 static void xfrm_bydst_resize(struct net *net, int dir)
570 {
571 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
572 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
573 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
574 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
575 struct hlist_head *odst;
576 int i;
577
578 if (!ndst)
579 return;
580
581 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
582 write_seqcount_begin(&xfrm_policy_hash_generation);
583
584 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
585 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
586
587 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
588 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
589
590 for (i = hmask; i >= 0; i--)
591 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
592
593 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
594 net->xfrm.policy_bydst[dir].hmask = nhashmask;
595
596 write_seqcount_end(&xfrm_policy_hash_generation);
597 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
598
599 synchronize_rcu();
600
601 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
602 }
603
604 static void xfrm_byidx_resize(struct net *net, int total)
605 {
606 unsigned int hmask = net->xfrm.policy_idx_hmask;
607 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
608 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
609 struct hlist_head *oidx = net->xfrm.policy_byidx;
610 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
611 int i;
612
613 if (!nidx)
614 return;
615
616 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
617
618 for (i = hmask; i >= 0; i--)
619 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
620
621 net->xfrm.policy_byidx = nidx;
622 net->xfrm.policy_idx_hmask = nhashmask;
623
624 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
625
626 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
627 }
628
629 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
630 {
631 unsigned int cnt = net->xfrm.policy_count[dir];
632 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
633
634 if (total)
635 *total += cnt;
636
637 if ((hmask + 1) < xfrm_policy_hashmax &&
638 cnt > hmask)
639 return 1;
640
641 return 0;
642 }
643
644 static inline int xfrm_byidx_should_resize(struct net *net, int total)
645 {
646 unsigned int hmask = net->xfrm.policy_idx_hmask;
647
648 if ((hmask + 1) < xfrm_policy_hashmax &&
649 total > hmask)
650 return 1;
651
652 return 0;
653 }
654
655 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
656 {
657 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
658 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
659 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
660 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
661 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
662 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
663 si->spdhcnt = net->xfrm.policy_idx_hmask;
664 si->spdhmcnt = xfrm_policy_hashmax;
665 }
666 EXPORT_SYMBOL(xfrm_spd_getinfo);
667
668 static DEFINE_MUTEX(hash_resize_mutex);
669 static void xfrm_hash_resize(struct work_struct *work)
670 {
671 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
672 int dir, total;
673
674 mutex_lock(&hash_resize_mutex);
675
676 total = 0;
677 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
678 if (xfrm_bydst_should_resize(net, dir, &total))
679 xfrm_bydst_resize(net, dir);
680 }
681 if (xfrm_byidx_should_resize(net, total))
682 xfrm_byidx_resize(net, total);
683
684 mutex_unlock(&hash_resize_mutex);
685 }
686
687 /* Make sure *pol can be inserted into fastbin.
688 * Useful to check that later insert requests will be sucessful
689 * (provided xfrm_policy_lock is held throughout).
690 */
691 static struct xfrm_pol_inexact_bin *
692 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
693 {
694 struct xfrm_pol_inexact_bin *bin, *prev;
695 struct xfrm_pol_inexact_key k = {
696 .family = pol->family,
697 .type = pol->type,
698 .dir = dir,
699 .if_id = pol->if_id,
700 };
701 struct net *net = xp_net(pol);
702
703 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
704
705 write_pnet(&k.net, net);
706 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
707 xfrm_pol_inexact_params);
708 if (bin)
709 return bin;
710
711 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
712 if (!bin)
713 return NULL;
714
715 bin->k = k;
716 INIT_HLIST_HEAD(&bin->hhead);
717 bin->root_d = RB_ROOT;
718 bin->root_s = RB_ROOT;
719 seqcount_init(&bin->count);
720
721 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
722 &bin->k, &bin->head,
723 xfrm_pol_inexact_params);
724 if (!prev) {
725 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
726 return bin;
727 }
728
729 kfree(bin);
730
731 return IS_ERR(prev) ? NULL : prev;
732 }
733
734 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
735 int family, u8 prefixlen)
736 {
737 if (xfrm_addr_any(addr, family))
738 return true;
739
740 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
741 return true;
742
743 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
744 return true;
745
746 return false;
747 }
748
749 static bool
750 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
751 {
752 const xfrm_address_t *addr;
753 bool saddr_any, daddr_any;
754 u8 prefixlen;
755
756 addr = &policy->selector.saddr;
757 prefixlen = policy->selector.prefixlen_s;
758
759 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
760 policy->family,
761 prefixlen);
762 addr = &policy->selector.daddr;
763 prefixlen = policy->selector.prefixlen_d;
764 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
765 policy->family,
766 prefixlen);
767 return saddr_any && daddr_any;
768 }
769
770 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
771 const xfrm_address_t *addr, u8 prefixlen)
772 {
773 node->addr = *addr;
774 node->prefixlen = prefixlen;
775 }
776
777 static struct xfrm_pol_inexact_node *
778 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
779 {
780 struct xfrm_pol_inexact_node *node;
781
782 node = kzalloc(sizeof(*node), GFP_ATOMIC);
783 if (node)
784 xfrm_pol_inexact_node_init(node, addr, prefixlen);
785
786 return node;
787 }
788
789 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
790 const xfrm_address_t *b,
791 u8 prefixlen, u16 family)
792 {
793 unsigned int pdw, pbi;
794 int delta = 0;
795
796 switch (family) {
797 case AF_INET:
798 if (sizeof(long) == 4 && prefixlen == 0)
799 return ntohl(a->a4) - ntohl(b->a4);
800 return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
801 (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
802 case AF_INET6:
803 pdw = prefixlen >> 5;
804 pbi = prefixlen & 0x1f;
805
806 if (pdw) {
807 delta = memcmp(a->a6, b->a6, pdw << 2);
808 if (delta)
809 return delta;
810 }
811 if (pbi) {
812 u32 mask = ~0u << (32 - pbi);
813
814 delta = (ntohl(a->a6[pdw]) & mask) -
815 (ntohl(b->a6[pdw]) & mask);
816 }
817 break;
818 default:
819 break;
820 }
821
822 return delta;
823 }
824
825 static void xfrm_policy_inexact_list_reinsert(struct net *net,
826 struct xfrm_pol_inexact_node *n,
827 u16 family)
828 {
829 unsigned int matched_s, matched_d;
830 struct xfrm_policy *policy, *p;
831
832 matched_s = 0;
833 matched_d = 0;
834
835 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
836 struct hlist_node *newpos = NULL;
837 bool matches_s, matches_d;
838
839 if (!policy->bydst_reinsert)
840 continue;
841
842 WARN_ON_ONCE(policy->family != family);
843
844 policy->bydst_reinsert = false;
845 hlist_for_each_entry(p, &n->hhead, bydst) {
846 if (policy->priority > p->priority)
847 newpos = &p->bydst;
848 else if (policy->priority == p->priority &&
849 policy->pos > p->pos)
850 newpos = &p->bydst;
851 else
852 break;
853 }
854
855 if (newpos)
856 hlist_add_behind_rcu(&policy->bydst, newpos);
857 else
858 hlist_add_head_rcu(&policy->bydst, &n->hhead);
859
860 /* paranoia checks follow.
861 * Check that the reinserted policy matches at least
862 * saddr or daddr for current node prefix.
863 *
864 * Matching both is fine, matching saddr in one policy
865 * (but not daddr) and then matching only daddr in another
866 * is a bug.
867 */
868 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
869 &n->addr,
870 n->prefixlen,
871 family) == 0;
872 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
873 &n->addr,
874 n->prefixlen,
875 family) == 0;
876 if (matches_s && matches_d)
877 continue;
878
879 WARN_ON_ONCE(!matches_s && !matches_d);
880 if (matches_s)
881 matched_s++;
882 if (matches_d)
883 matched_d++;
884 WARN_ON_ONCE(matched_s && matched_d);
885 }
886 }
887
888 static void xfrm_policy_inexact_node_reinsert(struct net *net,
889 struct xfrm_pol_inexact_node *n,
890 struct rb_root *new,
891 u16 family)
892 {
893 struct xfrm_pol_inexact_node *node;
894 struct rb_node **p, *parent;
895
896 /* we should not have another subtree here */
897 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
898 restart:
899 parent = NULL;
900 p = &new->rb_node;
901 while (*p) {
902 u8 prefixlen;
903 int delta;
904
905 parent = *p;
906 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
907
908 prefixlen = min(node->prefixlen, n->prefixlen);
909
910 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
911 prefixlen, family);
912 if (delta < 0) {
913 p = &parent->rb_left;
914 } else if (delta > 0) {
915 p = &parent->rb_right;
916 } else {
917 struct xfrm_policy *tmp;
918
919 hlist_for_each_entry(tmp, &n->hhead, bydst) {
920 tmp->bydst_reinsert = true;
921 hlist_del_rcu(&tmp->bydst);
922 }
923
924 xfrm_policy_inexact_list_reinsert(net, node, family);
925
926 if (node->prefixlen == n->prefixlen) {
927 kfree_rcu(n, rcu);
928 return;
929 }
930
931 rb_erase(*p, new);
932 kfree_rcu(n, rcu);
933 n = node;
934 n->prefixlen = prefixlen;
935 goto restart;
936 }
937 }
938
939 rb_link_node_rcu(&n->node, parent, p);
940 rb_insert_color(&n->node, new);
941 }
942
943 /* merge nodes v and n */
944 static void xfrm_policy_inexact_node_merge(struct net *net,
945 struct xfrm_pol_inexact_node *v,
946 struct xfrm_pol_inexact_node *n,
947 u16 family)
948 {
949 struct xfrm_pol_inexact_node *node;
950 struct xfrm_policy *tmp;
951 struct rb_node *rnode;
952
953 /* To-be-merged node v has a subtree.
954 *
955 * Dismantle it and insert its nodes to n->root.
956 */
957 while ((rnode = rb_first(&v->root)) != NULL) {
958 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
959 rb_erase(&node->node, &v->root);
960 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
961 family);
962 }
963
964 hlist_for_each_entry(tmp, &v->hhead, bydst) {
965 tmp->bydst_reinsert = true;
966 hlist_del_rcu(&tmp->bydst);
967 }
968
969 xfrm_policy_inexact_list_reinsert(net, n, family);
970 }
971
972 static struct xfrm_pol_inexact_node *
973 xfrm_policy_inexact_insert_node(struct net *net,
974 struct rb_root *root,
975 xfrm_address_t *addr,
976 u16 family, u8 prefixlen, u8 dir)
977 {
978 struct xfrm_pol_inexact_node *cached = NULL;
979 struct rb_node **p, *parent = NULL;
980 struct xfrm_pol_inexact_node *node;
981
982 p = &root->rb_node;
983 while (*p) {
984 int delta;
985
986 parent = *p;
987 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
988
989 delta = xfrm_policy_addr_delta(addr, &node->addr,
990 node->prefixlen,
991 family);
992 if (delta == 0 && prefixlen >= node->prefixlen) {
993 WARN_ON_ONCE(cached); /* ipsec policies got lost */
994 return node;
995 }
996
997 if (delta < 0)
998 p = &parent->rb_left;
999 else
1000 p = &parent->rb_right;
1001
1002 if (prefixlen < node->prefixlen) {
1003 delta = xfrm_policy_addr_delta(addr, &node->addr,
1004 prefixlen,
1005 family);
1006 if (delta)
1007 continue;
1008
1009 /* This node is a subnet of the new prefix. It needs
1010 * to be removed and re-inserted with the smaller
1011 * prefix and all nodes that are now also covered
1012 * by the reduced prefixlen.
1013 */
1014 rb_erase(&node->node, root);
1015
1016 if (!cached) {
1017 xfrm_pol_inexact_node_init(node, addr,
1018 prefixlen);
1019 cached = node;
1020 } else {
1021 /* This node also falls within the new
1022 * prefixlen. Merge the to-be-reinserted
1023 * node and this one.
1024 */
1025 xfrm_policy_inexact_node_merge(net, node,
1026 cached, family);
1027 kfree_rcu(node, rcu);
1028 }
1029
1030 /* restart */
1031 p = &root->rb_node;
1032 parent = NULL;
1033 }
1034 }
1035
1036 node = cached;
1037 if (!node) {
1038 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1039 if (!node)
1040 return NULL;
1041 }
1042
1043 rb_link_node_rcu(&node->node, parent, p);
1044 rb_insert_color(&node->node, root);
1045
1046 return node;
1047 }
1048
1049 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1050 {
1051 struct xfrm_pol_inexact_node *node;
1052 struct rb_node *rn = rb_first(r);
1053
1054 while (rn) {
1055 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1056
1057 xfrm_policy_inexact_gc_tree(&node->root, rm);
1058 rn = rb_next(rn);
1059
1060 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1061 WARN_ON_ONCE(rm);
1062 continue;
1063 }
1064
1065 rb_erase(&node->node, r);
1066 kfree_rcu(node, rcu);
1067 }
1068 }
1069
1070 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1071 {
1072 write_seqcount_begin(&b->count);
1073 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1074 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1075 write_seqcount_end(&b->count);
1076
1077 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1078 !hlist_empty(&b->hhead)) {
1079 WARN_ON_ONCE(net_exit);
1080 return;
1081 }
1082
1083 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1084 xfrm_pol_inexact_params) == 0) {
1085 list_del(&b->inexact_bins);
1086 kfree_rcu(b, rcu);
1087 }
1088 }
1089
1090 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1091 {
1092 struct net *net = read_pnet(&b->k.net);
1093
1094 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1095 __xfrm_policy_inexact_prune_bin(b, false);
1096 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1097 }
1098
1099 static void __xfrm_policy_inexact_flush(struct net *net)
1100 {
1101 struct xfrm_pol_inexact_bin *bin, *t;
1102
1103 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1104
1105 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1106 __xfrm_policy_inexact_prune_bin(bin, false);
1107 }
1108
1109 static struct hlist_head *
1110 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1111 struct xfrm_policy *policy, u8 dir)
1112 {
1113 struct xfrm_pol_inexact_node *n;
1114 struct net *net;
1115
1116 net = xp_net(policy);
1117 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1118
1119 if (xfrm_policy_inexact_insert_use_any_list(policy))
1120 return &bin->hhead;
1121
1122 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1123 policy->family,
1124 policy->selector.prefixlen_d)) {
1125 write_seqcount_begin(&bin->count);
1126 n = xfrm_policy_inexact_insert_node(net,
1127 &bin->root_s,
1128 &policy->selector.saddr,
1129 policy->family,
1130 policy->selector.prefixlen_s,
1131 dir);
1132 write_seqcount_end(&bin->count);
1133 if (!n)
1134 return NULL;
1135
1136 return &n->hhead;
1137 }
1138
1139 /* daddr is fixed */
1140 write_seqcount_begin(&bin->count);
1141 n = xfrm_policy_inexact_insert_node(net,
1142 &bin->root_d,
1143 &policy->selector.daddr,
1144 policy->family,
1145 policy->selector.prefixlen_d, dir);
1146 write_seqcount_end(&bin->count);
1147 if (!n)
1148 return NULL;
1149
1150 /* saddr is wildcard */
1151 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1152 policy->family,
1153 policy->selector.prefixlen_s))
1154 return &n->hhead;
1155
1156 write_seqcount_begin(&bin->count);
1157 n = xfrm_policy_inexact_insert_node(net,
1158 &n->root,
1159 &policy->selector.saddr,
1160 policy->family,
1161 policy->selector.prefixlen_s, dir);
1162 write_seqcount_end(&bin->count);
1163 if (!n)
1164 return NULL;
1165
1166 return &n->hhead;
1167 }
1168
1169 static struct xfrm_policy *
1170 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1171 {
1172 struct xfrm_pol_inexact_bin *bin;
1173 struct xfrm_policy *delpol;
1174 struct hlist_head *chain;
1175 struct net *net;
1176
1177 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1178 if (!bin)
1179 return ERR_PTR(-ENOMEM);
1180
1181 net = xp_net(policy);
1182 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1183
1184 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1185 if (!chain) {
1186 __xfrm_policy_inexact_prune_bin(bin, false);
1187 return ERR_PTR(-ENOMEM);
1188 }
1189
1190 delpol = xfrm_policy_insert_list(chain, policy, excl);
1191 if (delpol && excl) {
1192 __xfrm_policy_inexact_prune_bin(bin, false);
1193 return ERR_PTR(-EEXIST);
1194 }
1195
1196 chain = &net->xfrm.policy_inexact[dir];
1197 xfrm_policy_insert_inexact_list(chain, policy);
1198
1199 if (delpol)
1200 __xfrm_policy_inexact_prune_bin(bin, false);
1201
1202 return delpol;
1203 }
1204
1205 static void xfrm_hash_rebuild(struct work_struct *work)
1206 {
1207 struct net *net = container_of(work, struct net,
1208 xfrm.policy_hthresh.work);
1209 unsigned int hmask;
1210 struct xfrm_policy *pol;
1211 struct xfrm_policy *policy;
1212 struct hlist_head *chain;
1213 struct hlist_head *odst;
1214 struct hlist_node *newpos;
1215 int i;
1216 int dir;
1217 unsigned seq;
1218 u8 lbits4, rbits4, lbits6, rbits6;
1219
1220 mutex_lock(&hash_resize_mutex);
1221
1222 /* read selector prefixlen thresholds */
1223 do {
1224 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1225
1226 lbits4 = net->xfrm.policy_hthresh.lbits4;
1227 rbits4 = net->xfrm.policy_hthresh.rbits4;
1228 lbits6 = net->xfrm.policy_hthresh.lbits6;
1229 rbits6 = net->xfrm.policy_hthresh.rbits6;
1230 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1231
1232 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1233 write_seqcount_begin(&xfrm_policy_hash_generation);
1234
1235 /* make sure that we can insert the indirect policies again before
1236 * we start with destructive action.
1237 */
1238 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1239 struct xfrm_pol_inexact_bin *bin;
1240 u8 dbits, sbits;
1241
1242 dir = xfrm_policy_id2dir(policy->index);
1243 if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1244 continue;
1245
1246 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1247 if (policy->family == AF_INET) {
1248 dbits = rbits4;
1249 sbits = lbits4;
1250 } else {
1251 dbits = rbits6;
1252 sbits = lbits6;
1253 }
1254 } else {
1255 if (policy->family == AF_INET) {
1256 dbits = lbits4;
1257 sbits = rbits4;
1258 } else {
1259 dbits = lbits6;
1260 sbits = rbits6;
1261 }
1262 }
1263
1264 if (policy->selector.prefixlen_d < dbits ||
1265 policy->selector.prefixlen_s < sbits)
1266 continue;
1267
1268 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1269 if (!bin)
1270 goto out_unlock;
1271
1272 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1273 goto out_unlock;
1274 }
1275
1276 /* reset the bydst and inexact table in all directions */
1277 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1278 struct hlist_node *n;
1279
1280 hlist_for_each_entry_safe(policy, n,
1281 &net->xfrm.policy_inexact[dir],
1282 bydst_inexact_list)
1283 hlist_del_init(&policy->bydst_inexact_list);
1284
1285 hmask = net->xfrm.policy_bydst[dir].hmask;
1286 odst = net->xfrm.policy_bydst[dir].table;
1287 for (i = hmask; i >= 0; i--)
1288 INIT_HLIST_HEAD(odst + i);
1289 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1290 /* dir out => dst = remote, src = local */
1291 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1292 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1293 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1294 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1295 } else {
1296 /* dir in/fwd => dst = local, src = remote */
1297 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1298 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1299 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1300 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1301 }
1302 }
1303
1304 /* re-insert all policies by order of creation */
1305 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1306 if (policy->walk.dead)
1307 continue;
1308 dir = xfrm_policy_id2dir(policy->index);
1309 if (dir >= XFRM_POLICY_MAX) {
1310 /* skip socket policies */
1311 continue;
1312 }
1313 newpos = NULL;
1314 chain = policy_hash_bysel(net, &policy->selector,
1315 policy->family, dir);
1316
1317 hlist_del_rcu(&policy->bydst);
1318
1319 if (!chain) {
1320 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1321
1322 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1323 continue;
1324 }
1325
1326 hlist_for_each_entry(pol, chain, bydst) {
1327 if (policy->priority >= pol->priority)
1328 newpos = &pol->bydst;
1329 else
1330 break;
1331 }
1332 if (newpos)
1333 hlist_add_behind_rcu(&policy->bydst, newpos);
1334 else
1335 hlist_add_head_rcu(&policy->bydst, chain);
1336 }
1337
1338 out_unlock:
1339 __xfrm_policy_inexact_flush(net);
1340 write_seqcount_end(&xfrm_policy_hash_generation);
1341 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1342
1343 mutex_unlock(&hash_resize_mutex);
1344 }
1345
1346 void xfrm_policy_hash_rebuild(struct net *net)
1347 {
1348 schedule_work(&net->xfrm.policy_hthresh.work);
1349 }
1350 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1351
1352 /* Generate new index... KAME seems to generate them ordered by cost
1353 * of an absolute inpredictability of ordering of rules. This will not pass. */
1354 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1355 {
1356 static u32 idx_generator;
1357
1358 for (;;) {
1359 struct hlist_head *list;
1360 struct xfrm_policy *p;
1361 u32 idx;
1362 int found;
1363
1364 if (!index) {
1365 idx = (idx_generator | dir);
1366 idx_generator += 8;
1367 } else {
1368 idx = index;
1369 index = 0;
1370 }
1371
1372 if (idx == 0)
1373 idx = 8;
1374 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1375 found = 0;
1376 hlist_for_each_entry(p, list, byidx) {
1377 if (p->index == idx) {
1378 found = 1;
1379 break;
1380 }
1381 }
1382 if (!found)
1383 return idx;
1384 }
1385 }
1386
1387 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1388 {
1389 u32 *p1 = (u32 *) s1;
1390 u32 *p2 = (u32 *) s2;
1391 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1392 int i;
1393
1394 for (i = 0; i < len; i++) {
1395 if (p1[i] != p2[i])
1396 return 1;
1397 }
1398
1399 return 0;
1400 }
1401
1402 static void xfrm_policy_requeue(struct xfrm_policy *old,
1403 struct xfrm_policy *new)
1404 {
1405 struct xfrm_policy_queue *pq = &old->polq;
1406 struct sk_buff_head list;
1407
1408 if (skb_queue_empty(&pq->hold_queue))
1409 return;
1410
1411 __skb_queue_head_init(&list);
1412
1413 spin_lock_bh(&pq->hold_queue.lock);
1414 skb_queue_splice_init(&pq->hold_queue, &list);
1415 if (del_timer(&pq->hold_timer))
1416 xfrm_pol_put(old);
1417 spin_unlock_bh(&pq->hold_queue.lock);
1418
1419 pq = &new->polq;
1420
1421 spin_lock_bh(&pq->hold_queue.lock);
1422 skb_queue_splice(&list, &pq->hold_queue);
1423 pq->timeout = XFRM_QUEUE_TMO_MIN;
1424 if (!mod_timer(&pq->hold_timer, jiffies))
1425 xfrm_pol_hold(new);
1426 spin_unlock_bh(&pq->hold_queue.lock);
1427 }
1428
1429 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
1430 struct xfrm_policy *pol)
1431 {
1432 u32 mark = policy->mark.v & policy->mark.m;
1433
1434 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
1435 return true;
1436
1437 if ((mark & pol->mark.m) == pol->mark.v &&
1438 policy->priority == pol->priority)
1439 return true;
1440
1441 return false;
1442 }
1443
1444 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1445 {
1446 const struct xfrm_pol_inexact_key *k = data;
1447 u32 a = k->type << 24 | k->dir << 16 | k->family;
1448
1449 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1450 seed);
1451 }
1452
1453 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1454 {
1455 const struct xfrm_pol_inexact_bin *b = data;
1456
1457 return xfrm_pol_bin_key(&b->k, 0, seed);
1458 }
1459
1460 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1461 const void *ptr)
1462 {
1463 const struct xfrm_pol_inexact_key *key = arg->key;
1464 const struct xfrm_pol_inexact_bin *b = ptr;
1465 int ret;
1466
1467 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1468 return -1;
1469
1470 ret = b->k.dir ^ key->dir;
1471 if (ret)
1472 return ret;
1473
1474 ret = b->k.type ^ key->type;
1475 if (ret)
1476 return ret;
1477
1478 ret = b->k.family ^ key->family;
1479 if (ret)
1480 return ret;
1481
1482 return b->k.if_id ^ key->if_id;
1483 }
1484
1485 static const struct rhashtable_params xfrm_pol_inexact_params = {
1486 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1487 .hashfn = xfrm_pol_bin_key,
1488 .obj_hashfn = xfrm_pol_bin_obj,
1489 .obj_cmpfn = xfrm_pol_bin_cmp,
1490 .automatic_shrinking = true,
1491 };
1492
1493 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1494 struct xfrm_policy *policy)
1495 {
1496 struct xfrm_policy *pol, *delpol = NULL;
1497 struct hlist_node *newpos = NULL;
1498 int i = 0;
1499
1500 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1501 if (pol->type == policy->type &&
1502 pol->if_id == policy->if_id &&
1503 !selector_cmp(&pol->selector, &policy->selector) &&
1504 xfrm_policy_mark_match(policy, pol) &&
1505 xfrm_sec_ctx_match(pol->security, policy->security) &&
1506 !WARN_ON(delpol)) {
1507 delpol = pol;
1508 if (policy->priority > pol->priority)
1509 continue;
1510 } else if (policy->priority >= pol->priority) {
1511 newpos = &pol->bydst_inexact_list;
1512 continue;
1513 }
1514 if (delpol)
1515 break;
1516 }
1517
1518 if (newpos)
1519 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1520 else
1521 hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1522
1523 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1524 pol->pos = i;
1525 i++;
1526 }
1527 }
1528
1529 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1530 struct xfrm_policy *policy,
1531 bool excl)
1532 {
1533 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1534
1535 hlist_for_each_entry(pol, chain, bydst) {
1536 if (pol->type == policy->type &&
1537 pol->if_id == policy->if_id &&
1538 !selector_cmp(&pol->selector, &policy->selector) &&
1539 xfrm_policy_mark_match(policy, pol) &&
1540 xfrm_sec_ctx_match(pol->security, policy->security) &&
1541 !WARN_ON(delpol)) {
1542 if (excl)
1543 return ERR_PTR(-EEXIST);
1544 delpol = pol;
1545 if (policy->priority > pol->priority)
1546 continue;
1547 } else if (policy->priority >= pol->priority) {
1548 newpos = pol;
1549 continue;
1550 }
1551 if (delpol)
1552 break;
1553 }
1554
1555 if (newpos)
1556 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1557 else
1558 hlist_add_head_rcu(&policy->bydst, chain);
1559
1560 return delpol;
1561 }
1562
1563 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1564 {
1565 struct net *net = xp_net(policy);
1566 struct xfrm_policy *delpol;
1567 struct hlist_head *chain;
1568
1569 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1570 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1571 if (chain)
1572 delpol = xfrm_policy_insert_list(chain, policy, excl);
1573 else
1574 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1575
1576 if (IS_ERR(delpol)) {
1577 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1578 return PTR_ERR(delpol);
1579 }
1580
1581 __xfrm_policy_link(policy, dir);
1582
1583 /* After previous checking, family can either be AF_INET or AF_INET6 */
1584 if (policy->family == AF_INET)
1585 rt_genid_bump_ipv4(net);
1586 else
1587 rt_genid_bump_ipv6(net);
1588
1589 if (delpol) {
1590 xfrm_policy_requeue(delpol, policy);
1591 __xfrm_policy_unlink(delpol, dir);
1592 }
1593 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1594 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1595 policy->curlft.add_time = ktime_get_real_seconds();
1596 policy->curlft.use_time = 0;
1597 if (!mod_timer(&policy->timer, jiffies + HZ))
1598 xfrm_pol_hold(policy);
1599 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1600
1601 if (delpol)
1602 xfrm_policy_kill(delpol);
1603 else if (xfrm_bydst_should_resize(net, dir, NULL))
1604 schedule_work(&net->xfrm.policy_hash_work);
1605
1606 return 0;
1607 }
1608 EXPORT_SYMBOL(xfrm_policy_insert);
1609
1610 static struct xfrm_policy *
1611 __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
1612 u8 type, int dir,
1613 struct xfrm_selector *sel,
1614 struct xfrm_sec_ctx *ctx)
1615 {
1616 struct xfrm_policy *pol;
1617
1618 if (!chain)
1619 return NULL;
1620
1621 hlist_for_each_entry(pol, chain, bydst) {
1622 if (pol->type == type &&
1623 pol->if_id == if_id &&
1624 (mark & pol->mark.m) == pol->mark.v &&
1625 !selector_cmp(sel, &pol->selector) &&
1626 xfrm_sec_ctx_match(ctx, pol->security))
1627 return pol;
1628 }
1629
1630 return NULL;
1631 }
1632
1633 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1634 u8 type, int dir,
1635 struct xfrm_selector *sel,
1636 struct xfrm_sec_ctx *ctx, int delete,
1637 int *err)
1638 {
1639 struct xfrm_pol_inexact_bin *bin = NULL;
1640 struct xfrm_policy *pol, *ret = NULL;
1641 struct hlist_head *chain;
1642
1643 *err = 0;
1644 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1645 chain = policy_hash_bysel(net, sel, sel->family, dir);
1646 if (!chain) {
1647 struct xfrm_pol_inexact_candidates cand;
1648 int i;
1649
1650 bin = xfrm_policy_inexact_lookup(net, type,
1651 sel->family, dir, if_id);
1652 if (!bin) {
1653 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1654 return NULL;
1655 }
1656
1657 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1658 &sel->saddr,
1659 &sel->daddr)) {
1660 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1661 return NULL;
1662 }
1663
1664 pol = NULL;
1665 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1666 struct xfrm_policy *tmp;
1667
1668 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1669 if_id, type, dir,
1670 sel, ctx);
1671 if (!tmp)
1672 continue;
1673
1674 if (!pol || tmp->pos < pol->pos)
1675 pol = tmp;
1676 }
1677 } else {
1678 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1679 sel, ctx);
1680 }
1681
1682 if (pol) {
1683 xfrm_pol_hold(pol);
1684 if (delete) {
1685 *err = security_xfrm_policy_delete(pol->security);
1686 if (*err) {
1687 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1688 return pol;
1689 }
1690 __xfrm_policy_unlink(pol, dir);
1691 }
1692 ret = pol;
1693 }
1694 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1695
1696 if (ret && delete)
1697 xfrm_policy_kill(ret);
1698 if (bin && delete)
1699 xfrm_policy_inexact_prune_bin(bin);
1700 return ret;
1701 }
1702 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1703
1704 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
1705 u8 type, int dir, u32 id, int delete,
1706 int *err)
1707 {
1708 struct xfrm_policy *pol, *ret;
1709 struct hlist_head *chain;
1710
1711 *err = -ENOENT;
1712 if (xfrm_policy_id2dir(id) != dir)
1713 return NULL;
1714
1715 *err = 0;
1716 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1717 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1718 ret = NULL;
1719 hlist_for_each_entry(pol, chain, byidx) {
1720 if (pol->type == type && pol->index == id &&
1721 pol->if_id == if_id &&
1722 (mark & pol->mark.m) == pol->mark.v) {
1723 xfrm_pol_hold(pol);
1724 if (delete) {
1725 *err = security_xfrm_policy_delete(
1726 pol->security);
1727 if (*err) {
1728 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1729 return pol;
1730 }
1731 __xfrm_policy_unlink(pol, dir);
1732 }
1733 ret = pol;
1734 break;
1735 }
1736 }
1737 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1738
1739 if (ret && delete)
1740 xfrm_policy_kill(ret);
1741 return ret;
1742 }
1743 EXPORT_SYMBOL(xfrm_policy_byid);
1744
1745 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1746 static inline int
1747 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1748 {
1749 struct xfrm_policy *pol;
1750 int err = 0;
1751
1752 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1753 if (pol->walk.dead ||
1754 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1755 pol->type != type)
1756 continue;
1757
1758 err = security_xfrm_policy_delete(pol->security);
1759 if (err) {
1760 xfrm_audit_policy_delete(pol, 0, task_valid);
1761 return err;
1762 }
1763 }
1764 return err;
1765 }
1766 #else
1767 static inline int
1768 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1769 {
1770 return 0;
1771 }
1772 #endif
1773
1774 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1775 {
1776 int dir, err = 0, cnt = 0;
1777 struct xfrm_policy *pol;
1778
1779 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1780
1781 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1782 if (err)
1783 goto out;
1784
1785 again:
1786 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1787 dir = xfrm_policy_id2dir(pol->index);
1788 if (pol->walk.dead ||
1789 dir >= XFRM_POLICY_MAX ||
1790 pol->type != type)
1791 continue;
1792
1793 __xfrm_policy_unlink(pol, dir);
1794 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1795 cnt++;
1796 xfrm_audit_policy_delete(pol, 1, task_valid);
1797 xfrm_policy_kill(pol);
1798 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1799 goto again;
1800 }
1801 if (cnt)
1802 __xfrm_policy_inexact_flush(net);
1803 else
1804 err = -ESRCH;
1805 out:
1806 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1807 return err;
1808 }
1809 EXPORT_SYMBOL(xfrm_policy_flush);
1810
1811 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1812 int (*func)(struct xfrm_policy *, int, int, void*),
1813 void *data)
1814 {
1815 struct xfrm_policy *pol;
1816 struct xfrm_policy_walk_entry *x;
1817 int error = 0;
1818
1819 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1820 walk->type != XFRM_POLICY_TYPE_ANY)
1821 return -EINVAL;
1822
1823 if (list_empty(&walk->walk.all) && walk->seq != 0)
1824 return 0;
1825
1826 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1827 if (list_empty(&walk->walk.all))
1828 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1829 else
1830 x = list_first_entry(&walk->walk.all,
1831 struct xfrm_policy_walk_entry, all);
1832
1833 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1834 if (x->dead)
1835 continue;
1836 pol = container_of(x, struct xfrm_policy, walk);
1837 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1838 walk->type != pol->type)
1839 continue;
1840 error = func(pol, xfrm_policy_id2dir(pol->index),
1841 walk->seq, data);
1842 if (error) {
1843 list_move_tail(&walk->walk.all, &x->all);
1844 goto out;
1845 }
1846 walk->seq++;
1847 }
1848 if (walk->seq == 0) {
1849 error = -ENOENT;
1850 goto out;
1851 }
1852 list_del_init(&walk->walk.all);
1853 out:
1854 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1855 return error;
1856 }
1857 EXPORT_SYMBOL(xfrm_policy_walk);
1858
1859 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1860 {
1861 INIT_LIST_HEAD(&walk->walk.all);
1862 walk->walk.dead = 1;
1863 walk->type = type;
1864 walk->seq = 0;
1865 }
1866 EXPORT_SYMBOL(xfrm_policy_walk_init);
1867
1868 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1869 {
1870 if (list_empty(&walk->walk.all))
1871 return;
1872
1873 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1874 list_del(&walk->walk.all);
1875 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1876 }
1877 EXPORT_SYMBOL(xfrm_policy_walk_done);
1878
1879 /*
1880 * Find policy to apply to this flow.
1881 *
1882 * Returns 0 if policy found, else an -errno.
1883 */
1884 static int xfrm_policy_match(const struct xfrm_policy *pol,
1885 const struct flowi *fl,
1886 u8 type, u16 family, int dir, u32 if_id)
1887 {
1888 const struct xfrm_selector *sel = &pol->selector;
1889 int ret = -ESRCH;
1890 bool match;
1891
1892 if (pol->family != family ||
1893 pol->if_id != if_id ||
1894 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1895 pol->type != type)
1896 return ret;
1897
1898 match = xfrm_selector_match(sel, fl, family);
1899 if (match)
1900 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1901 dir);
1902 return ret;
1903 }
1904
1905 static struct xfrm_pol_inexact_node *
1906 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1907 seqcount_t *count,
1908 const xfrm_address_t *addr, u16 family)
1909 {
1910 const struct rb_node *parent;
1911 int seq;
1912
1913 again:
1914 seq = read_seqcount_begin(count);
1915
1916 parent = rcu_dereference_raw(r->rb_node);
1917 while (parent) {
1918 struct xfrm_pol_inexact_node *node;
1919 int delta;
1920
1921 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1922
1923 delta = xfrm_policy_addr_delta(addr, &node->addr,
1924 node->prefixlen, family);
1925 if (delta < 0) {
1926 parent = rcu_dereference_raw(parent->rb_left);
1927 continue;
1928 } else if (delta > 0) {
1929 parent = rcu_dereference_raw(parent->rb_right);
1930 continue;
1931 }
1932
1933 return node;
1934 }
1935
1936 if (read_seqcount_retry(count, seq))
1937 goto again;
1938
1939 return NULL;
1940 }
1941
1942 static bool
1943 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1944 struct xfrm_pol_inexact_bin *b,
1945 const xfrm_address_t *saddr,
1946 const xfrm_address_t *daddr)
1947 {
1948 struct xfrm_pol_inexact_node *n;
1949 u16 family;
1950
1951 if (!b)
1952 return false;
1953
1954 family = b->k.family;
1955 memset(cand, 0, sizeof(*cand));
1956 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1957
1958 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1959 family);
1960 if (n) {
1961 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1962 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1963 family);
1964 if (n)
1965 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1966 }
1967
1968 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1969 family);
1970 if (n)
1971 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1972
1973 return true;
1974 }
1975
1976 static struct xfrm_pol_inexact_bin *
1977 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1978 u8 dir, u32 if_id)
1979 {
1980 struct xfrm_pol_inexact_key k = {
1981 .family = family,
1982 .type = type,
1983 .dir = dir,
1984 .if_id = if_id,
1985 };
1986
1987 write_pnet(&k.net, net);
1988
1989 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1990 xfrm_pol_inexact_params);
1991 }
1992
1993 static struct xfrm_pol_inexact_bin *
1994 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1995 u8 dir, u32 if_id)
1996 {
1997 struct xfrm_pol_inexact_bin *bin;
1998
1999 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2000
2001 rcu_read_lock();
2002 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2003 rcu_read_unlock();
2004
2005 return bin;
2006 }
2007
2008 static struct xfrm_policy *
2009 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2010 struct xfrm_policy *prefer,
2011 const struct flowi *fl,
2012 u8 type, u16 family, int dir, u32 if_id)
2013 {
2014 u32 priority = prefer ? prefer->priority : ~0u;
2015 struct xfrm_policy *pol;
2016
2017 if (!chain)
2018 return NULL;
2019
2020 hlist_for_each_entry_rcu(pol, chain, bydst) {
2021 int err;
2022
2023 if (pol->priority > priority)
2024 break;
2025
2026 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2027 if (err) {
2028 if (err != -ESRCH)
2029 return ERR_PTR(err);
2030
2031 continue;
2032 }
2033
2034 if (prefer) {
2035 /* matches. Is it older than *prefer? */
2036 if (pol->priority == priority &&
2037 prefer->pos < pol->pos)
2038 return prefer;
2039 }
2040
2041 return pol;
2042 }
2043
2044 return NULL;
2045 }
2046
2047 static struct xfrm_policy *
2048 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2049 struct xfrm_policy *prefer,
2050 const struct flowi *fl,
2051 u8 type, u16 family, int dir, u32 if_id)
2052 {
2053 struct xfrm_policy *tmp;
2054 int i;
2055
2056 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2057 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2058 prefer,
2059 fl, type, family, dir,
2060 if_id);
2061 if (!tmp)
2062 continue;
2063
2064 if (IS_ERR(tmp))
2065 return tmp;
2066 prefer = tmp;
2067 }
2068
2069 return prefer;
2070 }
2071
2072 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2073 const struct flowi *fl,
2074 u16 family, u8 dir,
2075 u32 if_id)
2076 {
2077 struct xfrm_pol_inexact_candidates cand;
2078 const xfrm_address_t *daddr, *saddr;
2079 struct xfrm_pol_inexact_bin *bin;
2080 struct xfrm_policy *pol, *ret;
2081 struct hlist_head *chain;
2082 unsigned int sequence;
2083 int err;
2084
2085 daddr = xfrm_flowi_daddr(fl, family);
2086 saddr = xfrm_flowi_saddr(fl, family);
2087 if (unlikely(!daddr || !saddr))
2088 return NULL;
2089
2090 rcu_read_lock();
2091 retry:
2092 do {
2093 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2094 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2095 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
2096
2097 ret = NULL;
2098 hlist_for_each_entry_rcu(pol, chain, bydst) {
2099 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2100 if (err) {
2101 if (err == -ESRCH)
2102 continue;
2103 else {
2104 ret = ERR_PTR(err);
2105 goto fail;
2106 }
2107 } else {
2108 ret = pol;
2109 break;
2110 }
2111 }
2112 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2113 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2114 daddr))
2115 goto skip_inexact;
2116
2117 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2118 family, dir, if_id);
2119 if (pol) {
2120 ret = pol;
2121 if (IS_ERR(pol))
2122 goto fail;
2123 }
2124
2125 skip_inexact:
2126 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
2127 goto retry;
2128
2129 if (ret && !xfrm_pol_hold_rcu(ret))
2130 goto retry;
2131 fail:
2132 rcu_read_unlock();
2133
2134 return ret;
2135 }
2136
2137 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2138 const struct flowi *fl,
2139 u16 family, u8 dir, u32 if_id)
2140 {
2141 #ifdef CONFIG_XFRM_SUB_POLICY
2142 struct xfrm_policy *pol;
2143
2144 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2145 dir, if_id);
2146 if (pol != NULL)
2147 return pol;
2148 #endif
2149 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2150 dir, if_id);
2151 }
2152
2153 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2154 const struct flowi *fl,
2155 u16 family, u32 if_id)
2156 {
2157 struct xfrm_policy *pol;
2158
2159 rcu_read_lock();
2160 again:
2161 pol = rcu_dereference(sk->sk_policy[dir]);
2162 if (pol != NULL) {
2163 bool match;
2164 int err = 0;
2165
2166 if (pol->family != family) {
2167 pol = NULL;
2168 goto out;
2169 }
2170
2171 match = xfrm_selector_match(&pol->selector, fl, family);
2172 if (match) {
2173 if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2174 pol->if_id != if_id) {
2175 pol = NULL;
2176 goto out;
2177 }
2178 err = security_xfrm_policy_lookup(pol->security,
2179 fl->flowi_secid,
2180 dir);
2181 if (!err) {
2182 if (!xfrm_pol_hold_rcu(pol))
2183 goto again;
2184 } else if (err == -ESRCH) {
2185 pol = NULL;
2186 } else {
2187 pol = ERR_PTR(err);
2188 }
2189 } else
2190 pol = NULL;
2191 }
2192 out:
2193 rcu_read_unlock();
2194 return pol;
2195 }
2196
2197 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2198 {
2199 struct net *net = xp_net(pol);
2200
2201 list_add(&pol->walk.all, &net->xfrm.policy_all);
2202 net->xfrm.policy_count[dir]++;
2203 xfrm_pol_hold(pol);
2204 }
2205
2206 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2207 int dir)
2208 {
2209 struct net *net = xp_net(pol);
2210
2211 if (list_empty(&pol->walk.all))
2212 return NULL;
2213
2214 /* Socket policies are not hashed. */
2215 if (!hlist_unhashed(&pol->bydst)) {
2216 hlist_del_rcu(&pol->bydst);
2217 hlist_del_init(&pol->bydst_inexact_list);
2218 hlist_del(&pol->byidx);
2219 }
2220
2221 list_del_init(&pol->walk.all);
2222 net->xfrm.policy_count[dir]--;
2223
2224 return pol;
2225 }
2226
2227 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2228 {
2229 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2230 }
2231
2232 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2233 {
2234 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2235 }
2236
2237 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2238 {
2239 struct net *net = xp_net(pol);
2240
2241 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2242 pol = __xfrm_policy_unlink(pol, dir);
2243 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2244 if (pol) {
2245 xfrm_policy_kill(pol);
2246 return 0;
2247 }
2248 return -ENOENT;
2249 }
2250 EXPORT_SYMBOL(xfrm_policy_delete);
2251
2252 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2253 {
2254 struct net *net = sock_net(sk);
2255 struct xfrm_policy *old_pol;
2256
2257 #ifdef CONFIG_XFRM_SUB_POLICY
2258 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2259 return -EINVAL;
2260 #endif
2261
2262 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2263 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2264 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2265 if (pol) {
2266 pol->curlft.add_time = ktime_get_real_seconds();
2267 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2268 xfrm_sk_policy_link(pol, dir);
2269 }
2270 rcu_assign_pointer(sk->sk_policy[dir], pol);
2271 if (old_pol) {
2272 if (pol)
2273 xfrm_policy_requeue(old_pol, pol);
2274
2275 /* Unlinking succeeds always. This is the only function
2276 * allowed to delete or replace socket policy.
2277 */
2278 xfrm_sk_policy_unlink(old_pol, dir);
2279 }
2280 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2281
2282 if (old_pol) {
2283 xfrm_policy_kill(old_pol);
2284 }
2285 return 0;
2286 }
2287
2288 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2289 {
2290 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2291 struct net *net = xp_net(old);
2292
2293 if (newp) {
2294 newp->selector = old->selector;
2295 if (security_xfrm_policy_clone(old->security,
2296 &newp->security)) {
2297 kfree(newp);
2298 return NULL; /* ENOMEM */
2299 }
2300 newp->lft = old->lft;
2301 newp->curlft = old->curlft;
2302 newp->mark = old->mark;
2303 newp->if_id = old->if_id;
2304 newp->action = old->action;
2305 newp->flags = old->flags;
2306 newp->xfrm_nr = old->xfrm_nr;
2307 newp->index = old->index;
2308 newp->type = old->type;
2309 newp->family = old->family;
2310 memcpy(newp->xfrm_vec, old->xfrm_vec,
2311 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2312 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2313 xfrm_sk_policy_link(newp, dir);
2314 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2315 xfrm_pol_put(newp);
2316 }
2317 return newp;
2318 }
2319
2320 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2321 {
2322 const struct xfrm_policy *p;
2323 struct xfrm_policy *np;
2324 int i, ret = 0;
2325
2326 rcu_read_lock();
2327 for (i = 0; i < 2; i++) {
2328 p = rcu_dereference(osk->sk_policy[i]);
2329 if (p) {
2330 np = clone_policy(p, i);
2331 if (unlikely(!np)) {
2332 ret = -ENOMEM;
2333 break;
2334 }
2335 rcu_assign_pointer(sk->sk_policy[i], np);
2336 }
2337 }
2338 rcu_read_unlock();
2339 return ret;
2340 }
2341
2342 static int
2343 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2344 xfrm_address_t *remote, unsigned short family, u32 mark)
2345 {
2346 int err;
2347 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2348
2349 if (unlikely(afinfo == NULL))
2350 return -EINVAL;
2351 err = afinfo->get_saddr(net, oif, local, remote, mark);
2352 rcu_read_unlock();
2353 return err;
2354 }
2355
2356 /* Resolve list of templates for the flow, given policy. */
2357
2358 static int
2359 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2360 struct xfrm_state **xfrm, unsigned short family)
2361 {
2362 struct net *net = xp_net(policy);
2363 int nx;
2364 int i, error;
2365 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2366 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2367 xfrm_address_t tmp;
2368
2369 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2370 struct xfrm_state *x;
2371 xfrm_address_t *remote = daddr;
2372 xfrm_address_t *local = saddr;
2373 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2374
2375 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2376 tmpl->mode == XFRM_MODE_BEET) {
2377 remote = &tmpl->id.daddr;
2378 local = &tmpl->saddr;
2379 if (xfrm_addr_any(local, tmpl->encap_family)) {
2380 error = xfrm_get_saddr(net, fl->flowi_oif,
2381 &tmp, remote,
2382 tmpl->encap_family, 0);
2383 if (error)
2384 goto fail;
2385 local = &tmp;
2386 }
2387 }
2388
2389 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2390 family, policy->if_id);
2391
2392 if (x && x->km.state == XFRM_STATE_VALID) {
2393 xfrm[nx++] = x;
2394 daddr = remote;
2395 saddr = local;
2396 continue;
2397 }
2398 if (x) {
2399 error = (x->km.state == XFRM_STATE_ERROR ?
2400 -EINVAL : -EAGAIN);
2401 xfrm_state_put(x);
2402 } else if (error == -ESRCH) {
2403 error = -EAGAIN;
2404 }
2405
2406 if (!tmpl->optional)
2407 goto fail;
2408 }
2409 return nx;
2410
2411 fail:
2412 for (nx--; nx >= 0; nx--)
2413 xfrm_state_put(xfrm[nx]);
2414 return error;
2415 }
2416
2417 static int
2418 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2419 struct xfrm_state **xfrm, unsigned short family)
2420 {
2421 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2422 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2423 int cnx = 0;
2424 int error;
2425 int ret;
2426 int i;
2427
2428 for (i = 0; i < npols; i++) {
2429 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2430 error = -ENOBUFS;
2431 goto fail;
2432 }
2433
2434 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2435 if (ret < 0) {
2436 error = ret;
2437 goto fail;
2438 } else
2439 cnx += ret;
2440 }
2441
2442 /* found states are sorted for outbound processing */
2443 if (npols > 1)
2444 xfrm_state_sort(xfrm, tpp, cnx, family);
2445
2446 return cnx;
2447
2448 fail:
2449 for (cnx--; cnx >= 0; cnx--)
2450 xfrm_state_put(tpp[cnx]);
2451 return error;
2452
2453 }
2454
2455 static int xfrm_get_tos(const struct flowi *fl, int family)
2456 {
2457 if (family == AF_INET)
2458 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2459
2460 return 0;
2461 }
2462
2463 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2464 {
2465 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2466 struct dst_ops *dst_ops;
2467 struct xfrm_dst *xdst;
2468
2469 if (!afinfo)
2470 return ERR_PTR(-EINVAL);
2471
2472 switch (family) {
2473 case AF_INET:
2474 dst_ops = &net->xfrm.xfrm4_dst_ops;
2475 break;
2476 #if IS_ENABLED(CONFIG_IPV6)
2477 case AF_INET6:
2478 dst_ops = &net->xfrm.xfrm6_dst_ops;
2479 break;
2480 #endif
2481 default:
2482 BUG();
2483 }
2484 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2485
2486 if (likely(xdst)) {
2487 struct dst_entry *dst = &xdst->u.dst;
2488
2489 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
2490 } else
2491 xdst = ERR_PTR(-ENOBUFS);
2492
2493 rcu_read_unlock();
2494
2495 return xdst;
2496 }
2497
2498 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2499 int nfheader_len)
2500 {
2501 if (dst->ops->family == AF_INET6) {
2502 struct rt6_info *rt = (struct rt6_info *)dst;
2503 path->path_cookie = rt6_get_cookie(rt);
2504 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2505 }
2506 }
2507
2508 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2509 const struct flowi *fl)
2510 {
2511 const struct xfrm_policy_afinfo *afinfo =
2512 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2513 int err;
2514
2515 if (!afinfo)
2516 return -EINVAL;
2517
2518 err = afinfo->fill_dst(xdst, dev, fl);
2519
2520 rcu_read_unlock();
2521
2522 return err;
2523 }
2524
2525
2526 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2527 * all the metrics... Shortly, bundle a bundle.
2528 */
2529
2530 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2531 struct xfrm_state **xfrm,
2532 struct xfrm_dst **bundle,
2533 int nx,
2534 const struct flowi *fl,
2535 struct dst_entry *dst)
2536 {
2537 const struct xfrm_state_afinfo *afinfo;
2538 const struct xfrm_mode *inner_mode;
2539 struct net *net = xp_net(policy);
2540 unsigned long now = jiffies;
2541 struct net_device *dev;
2542 struct xfrm_dst *xdst_prev = NULL;
2543 struct xfrm_dst *xdst0 = NULL;
2544 int i = 0;
2545 int err;
2546 int header_len = 0;
2547 int nfheader_len = 0;
2548 int trailer_len = 0;
2549 int tos;
2550 int family = policy->selector.family;
2551 xfrm_address_t saddr, daddr;
2552
2553 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2554
2555 tos = xfrm_get_tos(fl, family);
2556
2557 dst_hold(dst);
2558
2559 for (; i < nx; i++) {
2560 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2561 struct dst_entry *dst1 = &xdst->u.dst;
2562
2563 err = PTR_ERR(xdst);
2564 if (IS_ERR(xdst)) {
2565 dst_release(dst);
2566 goto put_states;
2567 }
2568
2569 bundle[i] = xdst;
2570 if (!xdst_prev)
2571 xdst0 = xdst;
2572 else
2573 /* Ref count is taken during xfrm_alloc_dst()
2574 * No need to do dst_clone() on dst1
2575 */
2576 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2577
2578 if (xfrm[i]->sel.family == AF_UNSPEC) {
2579 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2580 xfrm_af2proto(family));
2581 if (!inner_mode) {
2582 err = -EAFNOSUPPORT;
2583 dst_release(dst);
2584 goto put_states;
2585 }
2586 } else
2587 inner_mode = &xfrm[i]->inner_mode;
2588
2589 xdst->route = dst;
2590 dst_copy_metrics(dst1, dst);
2591
2592 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2593 __u32 mark = 0;
2594
2595 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2596 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2597
2598 family = xfrm[i]->props.family;
2599 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2600 &saddr, &daddr, family, mark);
2601 err = PTR_ERR(dst);
2602 if (IS_ERR(dst))
2603 goto put_states;
2604 } else
2605 dst_hold(dst);
2606
2607 dst1->xfrm = xfrm[i];
2608 xdst->xfrm_genid = xfrm[i]->genid;
2609
2610 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2611 dst1->flags |= DST_HOST;
2612 dst1->lastuse = now;
2613
2614 dst1->input = dst_discard;
2615
2616 rcu_read_lock();
2617 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2618 if (likely(afinfo))
2619 dst1->output = afinfo->output;
2620 else
2621 dst1->output = dst_discard_out;
2622 rcu_read_unlock();
2623
2624 xdst_prev = xdst;
2625
2626 header_len += xfrm[i]->props.header_len;
2627 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2628 nfheader_len += xfrm[i]->props.header_len;
2629 trailer_len += xfrm[i]->props.trailer_len;
2630 }
2631
2632 xfrm_dst_set_child(xdst_prev, dst);
2633 xdst0->path = dst;
2634
2635 err = -ENODEV;
2636 dev = dst->dev;
2637 if (!dev)
2638 goto free_dst;
2639
2640 xfrm_init_path(xdst0, dst, nfheader_len);
2641 xfrm_init_pmtu(bundle, nx);
2642
2643 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2644 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2645 err = xfrm_fill_dst(xdst_prev, dev, fl);
2646 if (err)
2647 goto free_dst;
2648
2649 xdst_prev->u.dst.header_len = header_len;
2650 xdst_prev->u.dst.trailer_len = trailer_len;
2651 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2652 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2653 }
2654
2655 return &xdst0->u.dst;
2656
2657 put_states:
2658 for (; i < nx; i++)
2659 xfrm_state_put(xfrm[i]);
2660 free_dst:
2661 if (xdst0)
2662 dst_release_immediate(&xdst0->u.dst);
2663
2664 return ERR_PTR(err);
2665 }
2666
2667 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2668 struct xfrm_policy **pols,
2669 int *num_pols, int *num_xfrms)
2670 {
2671 int i;
2672
2673 if (*num_pols == 0 || !pols[0]) {
2674 *num_pols = 0;
2675 *num_xfrms = 0;
2676 return 0;
2677 }
2678 if (IS_ERR(pols[0]))
2679 return PTR_ERR(pols[0]);
2680
2681 *num_xfrms = pols[0]->xfrm_nr;
2682
2683 #ifdef CONFIG_XFRM_SUB_POLICY
2684 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2685 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2686 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2687 XFRM_POLICY_TYPE_MAIN,
2688 fl, family,
2689 XFRM_POLICY_OUT,
2690 pols[0]->if_id);
2691 if (pols[1]) {
2692 if (IS_ERR(pols[1])) {
2693 xfrm_pols_put(pols, *num_pols);
2694 return PTR_ERR(pols[1]);
2695 }
2696 (*num_pols)++;
2697 (*num_xfrms) += pols[1]->xfrm_nr;
2698 }
2699 }
2700 #endif
2701 for (i = 0; i < *num_pols; i++) {
2702 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2703 *num_xfrms = -1;
2704 break;
2705 }
2706 }
2707
2708 return 0;
2709
2710 }
2711
2712 static struct xfrm_dst *
2713 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2714 const struct flowi *fl, u16 family,
2715 struct dst_entry *dst_orig)
2716 {
2717 struct net *net = xp_net(pols[0]);
2718 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2719 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2720 struct xfrm_dst *xdst;
2721 struct dst_entry *dst;
2722 int err;
2723
2724 /* Try to instantiate a bundle */
2725 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2726 if (err <= 0) {
2727 if (err == 0)
2728 return NULL;
2729
2730 if (err != -EAGAIN)
2731 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2732 return ERR_PTR(err);
2733 }
2734
2735 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2736 if (IS_ERR(dst)) {
2737 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2738 return ERR_CAST(dst);
2739 }
2740
2741 xdst = (struct xfrm_dst *)dst;
2742 xdst->num_xfrms = err;
2743 xdst->num_pols = num_pols;
2744 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2745 xdst->policy_genid = atomic_read(&pols[0]->genid);
2746
2747 return xdst;
2748 }
2749
2750 static void xfrm_policy_queue_process(struct timer_list *t)
2751 {
2752 struct sk_buff *skb;
2753 struct sock *sk;
2754 struct dst_entry *dst;
2755 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2756 struct net *net = xp_net(pol);
2757 struct xfrm_policy_queue *pq = &pol->polq;
2758 struct flowi fl;
2759 struct sk_buff_head list;
2760
2761 spin_lock(&pq->hold_queue.lock);
2762 skb = skb_peek(&pq->hold_queue);
2763 if (!skb) {
2764 spin_unlock(&pq->hold_queue.lock);
2765 goto out;
2766 }
2767 dst = skb_dst(skb);
2768 sk = skb->sk;
2769 xfrm_decode_session(skb, &fl, dst->ops->family);
2770 spin_unlock(&pq->hold_queue.lock);
2771
2772 dst_hold(xfrm_dst_path(dst));
2773 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2774 if (IS_ERR(dst))
2775 goto purge_queue;
2776
2777 if (dst->flags & DST_XFRM_QUEUE) {
2778 dst_release(dst);
2779
2780 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2781 goto purge_queue;
2782
2783 pq->timeout = pq->timeout << 1;
2784 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2785 xfrm_pol_hold(pol);
2786 goto out;
2787 }
2788
2789 dst_release(dst);
2790
2791 __skb_queue_head_init(&list);
2792
2793 spin_lock(&pq->hold_queue.lock);
2794 pq->timeout = 0;
2795 skb_queue_splice_init(&pq->hold_queue, &list);
2796 spin_unlock(&pq->hold_queue.lock);
2797
2798 while (!skb_queue_empty(&list)) {
2799 skb = __skb_dequeue(&list);
2800
2801 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2802 dst_hold(xfrm_dst_path(skb_dst(skb)));
2803 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2804 if (IS_ERR(dst)) {
2805 kfree_skb(skb);
2806 continue;
2807 }
2808
2809 nf_reset(skb);
2810 skb_dst_drop(skb);
2811 skb_dst_set(skb, dst);
2812
2813 dst_output(net, skb->sk, skb);
2814 }
2815
2816 out:
2817 xfrm_pol_put(pol);
2818 return;
2819
2820 purge_queue:
2821 pq->timeout = 0;
2822 skb_queue_purge(&pq->hold_queue);
2823 xfrm_pol_put(pol);
2824 }
2825
2826 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2827 {
2828 unsigned long sched_next;
2829 struct dst_entry *dst = skb_dst(skb);
2830 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2831 struct xfrm_policy *pol = xdst->pols[0];
2832 struct xfrm_policy_queue *pq = &pol->polq;
2833
2834 if (unlikely(skb_fclone_busy(sk, skb))) {
2835 kfree_skb(skb);
2836 return 0;
2837 }
2838
2839 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2840 kfree_skb(skb);
2841 return -EAGAIN;
2842 }
2843
2844 skb_dst_force(skb);
2845
2846 spin_lock_bh(&pq->hold_queue.lock);
2847
2848 if (!pq->timeout)
2849 pq->timeout = XFRM_QUEUE_TMO_MIN;
2850
2851 sched_next = jiffies + pq->timeout;
2852
2853 if (del_timer(&pq->hold_timer)) {
2854 if (time_before(pq->hold_timer.expires, sched_next))
2855 sched_next = pq->hold_timer.expires;
2856 xfrm_pol_put(pol);
2857 }
2858
2859 __skb_queue_tail(&pq->hold_queue, skb);
2860 if (!mod_timer(&pq->hold_timer, sched_next))
2861 xfrm_pol_hold(pol);
2862
2863 spin_unlock_bh(&pq->hold_queue.lock);
2864
2865 return 0;
2866 }
2867
2868 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2869 struct xfrm_flo *xflo,
2870 const struct flowi *fl,
2871 int num_xfrms,
2872 u16 family)
2873 {
2874 int err;
2875 struct net_device *dev;
2876 struct dst_entry *dst;
2877 struct dst_entry *dst1;
2878 struct xfrm_dst *xdst;
2879
2880 xdst = xfrm_alloc_dst(net, family);
2881 if (IS_ERR(xdst))
2882 return xdst;
2883
2884 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2885 net->xfrm.sysctl_larval_drop ||
2886 num_xfrms <= 0)
2887 return xdst;
2888
2889 dst = xflo->dst_orig;
2890 dst1 = &xdst->u.dst;
2891 dst_hold(dst);
2892 xdst->route = dst;
2893
2894 dst_copy_metrics(dst1, dst);
2895
2896 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2897 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2898 dst1->lastuse = jiffies;
2899
2900 dst1->input = dst_discard;
2901 dst1->output = xdst_queue_output;
2902
2903 dst_hold(dst);
2904 xfrm_dst_set_child(xdst, dst);
2905 xdst->path = dst;
2906
2907 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2908
2909 err = -ENODEV;
2910 dev = dst->dev;
2911 if (!dev)
2912 goto free_dst;
2913
2914 err = xfrm_fill_dst(xdst, dev, fl);
2915 if (err)
2916 goto free_dst;
2917
2918 out:
2919 return xdst;
2920
2921 free_dst:
2922 dst_release(dst1);
2923 xdst = ERR_PTR(err);
2924 goto out;
2925 }
2926
2927 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2928 const struct flowi *fl,
2929 u16 family, u8 dir,
2930 struct xfrm_flo *xflo, u32 if_id)
2931 {
2932 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2933 int num_pols = 0, num_xfrms = 0, err;
2934 struct xfrm_dst *xdst;
2935
2936 /* Resolve policies to use if we couldn't get them from
2937 * previous cache entry */
2938 num_pols = 1;
2939 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2940 err = xfrm_expand_policies(fl, family, pols,
2941 &num_pols, &num_xfrms);
2942 if (err < 0)
2943 goto inc_error;
2944 if (num_pols == 0)
2945 return NULL;
2946 if (num_xfrms <= 0)
2947 goto make_dummy_bundle;
2948
2949 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2950 xflo->dst_orig);
2951 if (IS_ERR(xdst)) {
2952 err = PTR_ERR(xdst);
2953 if (err == -EREMOTE) {
2954 xfrm_pols_put(pols, num_pols);
2955 return NULL;
2956 }
2957
2958 if (err != -EAGAIN)
2959 goto error;
2960 goto make_dummy_bundle;
2961 } else if (xdst == NULL) {
2962 num_xfrms = 0;
2963 goto make_dummy_bundle;
2964 }
2965
2966 return xdst;
2967
2968 make_dummy_bundle:
2969 /* We found policies, but there's no bundles to instantiate:
2970 * either because the policy blocks, has no transformations or
2971 * we could not build template (no xfrm_states).*/
2972 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2973 if (IS_ERR(xdst)) {
2974 xfrm_pols_put(pols, num_pols);
2975 return ERR_CAST(xdst);
2976 }
2977 xdst->num_pols = num_pols;
2978 xdst->num_xfrms = num_xfrms;
2979 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2980
2981 return xdst;
2982
2983 inc_error:
2984 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2985 error:
2986 xfrm_pols_put(pols, num_pols);
2987 return ERR_PTR(err);
2988 }
2989
2990 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2991 struct dst_entry *dst_orig)
2992 {
2993 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2994 struct dst_entry *ret;
2995
2996 if (!afinfo) {
2997 dst_release(dst_orig);
2998 return ERR_PTR(-EINVAL);
2999 } else {
3000 ret = afinfo->blackhole_route(net, dst_orig);
3001 }
3002 rcu_read_unlock();
3003
3004 return ret;
3005 }
3006
3007 /* Finds/creates a bundle for given flow and if_id
3008 *
3009 * At the moment we eat a raw IP route. Mostly to speed up lookups
3010 * on interfaces with disabled IPsec.
3011 *
3012 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3013 * compatibility
3014 */
3015 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3016 struct dst_entry *dst_orig,
3017 const struct flowi *fl,
3018 const struct sock *sk,
3019 int flags, u32 if_id)
3020 {
3021 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3022 struct xfrm_dst *xdst;
3023 struct dst_entry *dst, *route;
3024 u16 family = dst_orig->ops->family;
3025 u8 dir = XFRM_POLICY_OUT;
3026 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3027
3028 dst = NULL;
3029 xdst = NULL;
3030 route = NULL;
3031
3032 sk = sk_const_to_full_sk(sk);
3033 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3034 num_pols = 1;
3035 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3036 if_id);
3037 err = xfrm_expand_policies(fl, family, pols,
3038 &num_pols, &num_xfrms);
3039 if (err < 0)
3040 goto dropdst;
3041
3042 if (num_pols) {
3043 if (num_xfrms <= 0) {
3044 drop_pols = num_pols;
3045 goto no_transform;
3046 }
3047
3048 xdst = xfrm_resolve_and_create_bundle(
3049 pols, num_pols, fl,
3050 family, dst_orig);
3051
3052 if (IS_ERR(xdst)) {
3053 xfrm_pols_put(pols, num_pols);
3054 err = PTR_ERR(xdst);
3055 if (err == -EREMOTE)
3056 goto nopol;
3057
3058 goto dropdst;
3059 } else if (xdst == NULL) {
3060 num_xfrms = 0;
3061 drop_pols = num_pols;
3062 goto no_transform;
3063 }
3064
3065 route = xdst->route;
3066 }
3067 }
3068
3069 if (xdst == NULL) {
3070 struct xfrm_flo xflo;
3071
3072 xflo.dst_orig = dst_orig;
3073 xflo.flags = flags;
3074
3075 /* To accelerate a bit... */
3076 if ((dst_orig->flags & DST_NOXFRM) ||
3077 !net->xfrm.policy_count[XFRM_POLICY_OUT])
3078 goto nopol;
3079
3080 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3081 if (xdst == NULL)
3082 goto nopol;
3083 if (IS_ERR(xdst)) {
3084 err = PTR_ERR(xdst);
3085 goto dropdst;
3086 }
3087
3088 num_pols = xdst->num_pols;
3089 num_xfrms = xdst->num_xfrms;
3090 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3091 route = xdst->route;
3092 }
3093
3094 dst = &xdst->u.dst;
3095 if (route == NULL && num_xfrms > 0) {
3096 /* The only case when xfrm_bundle_lookup() returns a
3097 * bundle with null route, is when the template could
3098 * not be resolved. It means policies are there, but
3099 * bundle could not be created, since we don't yet
3100 * have the xfrm_state's. We need to wait for KM to
3101 * negotiate new SA's or bail out with error.*/
3102 if (net->xfrm.sysctl_larval_drop) {
3103 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3104 err = -EREMOTE;
3105 goto error;
3106 }
3107
3108 err = -EAGAIN;
3109
3110 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3111 goto error;
3112 }
3113
3114 no_transform:
3115 if (num_pols == 0)
3116 goto nopol;
3117
3118 if ((flags & XFRM_LOOKUP_ICMP) &&
3119 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3120 err = -ENOENT;
3121 goto error;
3122 }
3123
3124 for (i = 0; i < num_pols; i++)
3125 pols[i]->curlft.use_time = ktime_get_real_seconds();
3126
3127 if (num_xfrms < 0) {
3128 /* Prohibit the flow */
3129 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3130 err = -EPERM;
3131 goto error;
3132 } else if (num_xfrms > 0) {
3133 /* Flow transformed */
3134 dst_release(dst_orig);
3135 } else {
3136 /* Flow passes untransformed */
3137 dst_release(dst);
3138 dst = dst_orig;
3139 }
3140 ok:
3141 xfrm_pols_put(pols, drop_pols);
3142 if (dst && dst->xfrm &&
3143 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3144 dst->flags |= DST_XFRM_TUNNEL;
3145 return dst;
3146
3147 nopol:
3148 if (!(flags & XFRM_LOOKUP_ICMP)) {
3149 dst = dst_orig;
3150 goto ok;
3151 }
3152 err = -ENOENT;
3153 error:
3154 dst_release(dst);
3155 dropdst:
3156 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3157 dst_release(dst_orig);
3158 xfrm_pols_put(pols, drop_pols);
3159 return ERR_PTR(err);
3160 }
3161 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3162
3163 /* Main function: finds/creates a bundle for given flow.
3164 *
3165 * At the moment we eat a raw IP route. Mostly to speed up lookups
3166 * on interfaces with disabled IPsec.
3167 */
3168 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3169 const struct flowi *fl, const struct sock *sk,
3170 int flags)
3171 {
3172 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3173 }
3174 EXPORT_SYMBOL(xfrm_lookup);
3175
3176 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3177 * Otherwise we may send out blackholed packets.
3178 */
3179 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3180 const struct flowi *fl,
3181 const struct sock *sk, int flags)
3182 {
3183 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3184 flags | XFRM_LOOKUP_QUEUE |
3185 XFRM_LOOKUP_KEEP_DST_REF);
3186
3187 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
3188 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3189
3190 if (IS_ERR(dst))
3191 dst_release(dst_orig);
3192
3193 return dst;
3194 }
3195 EXPORT_SYMBOL(xfrm_lookup_route);
3196
3197 static inline int
3198 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3199 {
3200 struct sec_path *sp = skb_sec_path(skb);
3201 struct xfrm_state *x;
3202
3203 if (!sp || idx < 0 || idx >= sp->len)
3204 return 0;
3205 x = sp->xvec[idx];
3206 if (!x->type->reject)
3207 return 0;
3208 return x->type->reject(x, skb, fl);
3209 }
3210
3211 /* When skb is transformed back to its "native" form, we have to
3212 * check policy restrictions. At the moment we make this in maximally
3213 * stupid way. Shame on me. :-) Of course, connected sockets must
3214 * have policy cached at them.
3215 */
3216
3217 static inline int
3218 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3219 unsigned short family)
3220 {
3221 if (xfrm_state_kern(x))
3222 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3223 return x->id.proto == tmpl->id.proto &&
3224 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3225 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3226 x->props.mode == tmpl->mode &&
3227 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3228 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3229 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3230 xfrm_state_addr_cmp(tmpl, x, family));
3231 }
3232
3233 /*
3234 * 0 or more than 0 is returned when validation is succeeded (either bypass
3235 * because of optional transport mode, or next index of the mathced secpath
3236 * state with the template.
3237 * -1 is returned when no matching template is found.
3238 * Otherwise "-2 - errored_index" is returned.
3239 */
3240 static inline int
3241 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3242 unsigned short family)
3243 {
3244 int idx = start;
3245
3246 if (tmpl->optional) {
3247 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3248 return start;
3249 } else
3250 start = -1;
3251 for (; idx < sp->len; idx++) {
3252 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3253 return ++idx;
3254 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3255 if (start == -1)
3256 start = -2-idx;
3257 break;
3258 }
3259 }
3260 return start;
3261 }
3262
3263 static void
3264 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3265 {
3266 const struct iphdr *iph = ip_hdr(skb);
3267 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
3268 struct flowi4 *fl4 = &fl->u.ip4;
3269 int oif = 0;
3270
3271 if (skb_dst(skb))
3272 oif = skb_dst(skb)->dev->ifindex;
3273
3274 memset(fl4, 0, sizeof(struct flowi4));
3275 fl4->flowi4_mark = skb->mark;
3276 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3277
3278 if (!ip_is_fragment(iph)) {
3279 switch (iph->protocol) {
3280 case IPPROTO_UDP:
3281 case IPPROTO_UDPLITE:
3282 case IPPROTO_TCP:
3283 case IPPROTO_SCTP:
3284 case IPPROTO_DCCP:
3285 if (xprth + 4 < skb->data ||
3286 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3287 __be16 *ports;
3288
3289 xprth = skb_network_header(skb) + iph->ihl * 4;
3290 ports = (__be16 *)xprth;
3291
3292 fl4->fl4_sport = ports[!!reverse];
3293 fl4->fl4_dport = ports[!reverse];
3294 }
3295 break;
3296 case IPPROTO_ICMP:
3297 if (xprth + 2 < skb->data ||
3298 pskb_may_pull(skb, xprth + 2 - skb->data)) {
3299 u8 *icmp;
3300
3301 xprth = skb_network_header(skb) + iph->ihl * 4;
3302 icmp = xprth;
3303
3304 fl4->fl4_icmp_type = icmp[0];
3305 fl4->fl4_icmp_code = icmp[1];
3306 }
3307 break;
3308 case IPPROTO_ESP:
3309 if (xprth + 4 < skb->data ||
3310 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3311 __be32 *ehdr;
3312
3313 xprth = skb_network_header(skb) + iph->ihl * 4;
3314 ehdr = (__be32 *)xprth;
3315
3316 fl4->fl4_ipsec_spi = ehdr[0];
3317 }
3318 break;
3319 case IPPROTO_AH:
3320 if (xprth + 8 < skb->data ||
3321 pskb_may_pull(skb, xprth + 8 - skb->data)) {
3322 __be32 *ah_hdr;
3323
3324 xprth = skb_network_header(skb) + iph->ihl * 4;
3325 ah_hdr = (__be32 *)xprth;
3326
3327 fl4->fl4_ipsec_spi = ah_hdr[1];
3328 }
3329 break;
3330 case IPPROTO_COMP:
3331 if (xprth + 4 < skb->data ||
3332 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3333 __be16 *ipcomp_hdr;
3334
3335 xprth = skb_network_header(skb) + iph->ihl * 4;
3336 ipcomp_hdr = (__be16 *)xprth;
3337
3338 fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3339 }
3340 break;
3341 case IPPROTO_GRE:
3342 if (xprth + 12 < skb->data ||
3343 pskb_may_pull(skb, xprth + 12 - skb->data)) {
3344 __be16 *greflags;
3345 __be32 *gre_hdr;
3346
3347 xprth = skb_network_header(skb) + iph->ihl * 4;
3348 greflags = (__be16 *)xprth;
3349 gre_hdr = (__be32 *)xprth;
3350
3351 if (greflags[0] & GRE_KEY) {
3352 if (greflags[0] & GRE_CSUM)
3353 gre_hdr++;
3354 fl4->fl4_gre_key = gre_hdr[1];
3355 }
3356 }
3357 break;
3358 default:
3359 fl4->fl4_ipsec_spi = 0;
3360 break;
3361 }
3362 }
3363 fl4->flowi4_proto = iph->protocol;
3364 fl4->daddr = reverse ? iph->saddr : iph->daddr;
3365 fl4->saddr = reverse ? iph->daddr : iph->saddr;
3366 fl4->flowi4_tos = iph->tos;
3367 }
3368
3369 #if IS_ENABLED(CONFIG_IPV6)
3370 static void
3371 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3372 {
3373 struct flowi6 *fl6 = &fl->u.ip6;
3374 int onlyproto = 0;
3375 const struct ipv6hdr *hdr = ipv6_hdr(skb);
3376 u32 offset = sizeof(*hdr);
3377 struct ipv6_opt_hdr *exthdr;
3378 const unsigned char *nh = skb_network_header(skb);
3379 u16 nhoff = IP6CB(skb)->nhoff;
3380 int oif = 0;
3381 u8 nexthdr;
3382
3383 if (!nhoff)
3384 nhoff = offsetof(struct ipv6hdr, nexthdr);
3385
3386 nexthdr = nh[nhoff];
3387
3388 if (skb_dst(skb))
3389 oif = skb_dst(skb)->dev->ifindex;
3390
3391 memset(fl6, 0, sizeof(struct flowi6));
3392 fl6->flowi6_mark = skb->mark;
3393 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3394
3395 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3396 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3397
3398 while (nh + offset + sizeof(*exthdr) < skb->data ||
3399 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3400 nh = skb_network_header(skb);
3401 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3402
3403 switch (nexthdr) {
3404 case NEXTHDR_FRAGMENT:
3405 onlyproto = 1;
3406 /* fall through */
3407 case NEXTHDR_ROUTING:
3408 case NEXTHDR_HOP:
3409 case NEXTHDR_DEST:
3410 offset += ipv6_optlen(exthdr);
3411 nexthdr = exthdr->nexthdr;
3412 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3413 break;
3414 case IPPROTO_UDP:
3415 case IPPROTO_UDPLITE:
3416 case IPPROTO_TCP:
3417 case IPPROTO_SCTP:
3418 case IPPROTO_DCCP:
3419 if (!onlyproto && (nh + offset + 4 < skb->data ||
3420 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3421 __be16 *ports;
3422
3423 nh = skb_network_header(skb);
3424 ports = (__be16 *)(nh + offset);
3425 fl6->fl6_sport = ports[!!reverse];
3426 fl6->fl6_dport = ports[!reverse];
3427 }
3428 fl6->flowi6_proto = nexthdr;
3429 return;
3430 case IPPROTO_ICMPV6:
3431 if (!onlyproto && (nh + offset + 2 < skb->data ||
3432 pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3433 u8 *icmp;
3434
3435 nh = skb_network_header(skb);
3436 icmp = (u8 *)(nh + offset);
3437 fl6->fl6_icmp_type = icmp[0];
3438 fl6->fl6_icmp_code = icmp[1];
3439 }
3440 fl6->flowi6_proto = nexthdr;
3441 return;
3442 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3443 case IPPROTO_MH:
3444 offset += ipv6_optlen(exthdr);
3445 if (!onlyproto && (nh + offset + 3 < skb->data ||
3446 pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3447 struct ip6_mh *mh;
3448
3449 nh = skb_network_header(skb);
3450 mh = (struct ip6_mh *)(nh + offset);
3451 fl6->fl6_mh_type = mh->ip6mh_type;
3452 }
3453 fl6->flowi6_proto = nexthdr;
3454 return;
3455 #endif
3456 /* XXX Why are there these headers? */
3457 case IPPROTO_AH:
3458 case IPPROTO_ESP:
3459 case IPPROTO_COMP:
3460 default:
3461 fl6->fl6_ipsec_spi = 0;
3462 fl6->flowi6_proto = nexthdr;
3463 return;
3464 }
3465 }
3466 }
3467 #endif
3468
3469 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3470 unsigned int family, int reverse)
3471 {
3472 switch (family) {
3473 case AF_INET:
3474 decode_session4(skb, fl, reverse);
3475 break;
3476 #if IS_ENABLED(CONFIG_IPV6)
3477 case AF_INET6:
3478 decode_session6(skb, fl, reverse);
3479 break;
3480 #endif
3481 default:
3482 return -EAFNOSUPPORT;
3483 }
3484
3485 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3486 }
3487 EXPORT_SYMBOL(__xfrm_decode_session);
3488
3489 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3490 {
3491 for (; k < sp->len; k++) {
3492 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3493 *idxp = k;
3494 return 1;
3495 }
3496 }
3497
3498 return 0;
3499 }
3500
3501 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3502 unsigned short family)
3503 {
3504 struct net *net = dev_net(skb->dev);
3505 struct xfrm_policy *pol;
3506 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3507 int npols = 0;
3508 int xfrm_nr;
3509 int pi;
3510 int reverse;
3511 struct flowi fl;
3512 int xerr_idx = -1;
3513 const struct xfrm_if_cb *ifcb;
3514 struct sec_path *sp;
3515 struct xfrm_if *xi;
3516 u32 if_id = 0;
3517
3518 rcu_read_lock();
3519 ifcb = xfrm_if_get_cb();
3520
3521 if (ifcb) {
3522 xi = ifcb->decode_session(skb, family);
3523 if (xi) {
3524 if_id = xi->p.if_id;
3525 net = xi->net;
3526 }
3527 }
3528 rcu_read_unlock();
3529
3530 reverse = dir & ~XFRM_POLICY_MASK;
3531 dir &= XFRM_POLICY_MASK;
3532
3533 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3534 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3535 return 0;
3536 }
3537
3538 nf_nat_decode_session(skb, &fl, family);
3539
3540 /* First, check used SA against their selectors. */
3541 sp = skb_sec_path(skb);
3542 if (sp) {
3543 int i;
3544
3545 for (i = sp->len - 1; i >= 0; i--) {
3546 struct xfrm_state *x = sp->xvec[i];
3547 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3548 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3549 return 0;
3550 }
3551 }
3552 }
3553
3554 pol = NULL;
3555 sk = sk_to_full_sk(sk);
3556 if (sk && sk->sk_policy[dir]) {
3557 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3558 if (IS_ERR(pol)) {
3559 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3560 return 0;
3561 }
3562 }
3563
3564 if (!pol)
3565 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3566
3567 if (IS_ERR(pol)) {
3568 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3569 return 0;
3570 }
3571
3572 if (!pol) {
3573 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3574 xfrm_secpath_reject(xerr_idx, skb, &fl);
3575 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3576 return 0;
3577 }
3578 return 1;
3579 }
3580
3581 pol->curlft.use_time = ktime_get_real_seconds();
3582
3583 pols[0] = pol;
3584 npols++;
3585 #ifdef CONFIG_XFRM_SUB_POLICY
3586 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3587 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3588 &fl, family,
3589 XFRM_POLICY_IN, if_id);
3590 if (pols[1]) {
3591 if (IS_ERR(pols[1])) {
3592 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3593 return 0;
3594 }
3595 pols[1]->curlft.use_time = ktime_get_real_seconds();
3596 npols++;
3597 }
3598 }
3599 #endif
3600
3601 if (pol->action == XFRM_POLICY_ALLOW) {
3602 static struct sec_path dummy;
3603 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3604 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3605 struct xfrm_tmpl **tpp = tp;
3606 int ti = 0;
3607 int i, k;
3608
3609 sp = skb_sec_path(skb);
3610 if (!sp)
3611 sp = &dummy;
3612
3613 for (pi = 0; pi < npols; pi++) {
3614 if (pols[pi] != pol &&
3615 pols[pi]->action != XFRM_POLICY_ALLOW) {
3616 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3617 goto reject;
3618 }
3619 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3620 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3621 goto reject_error;
3622 }
3623 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3624 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3625 }
3626 xfrm_nr = ti;
3627 if (npols > 1) {
3628 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
3629 tpp = stp;
3630 }
3631
3632 /* For each tunnel xfrm, find the first matching tmpl.
3633 * For each tmpl before that, find corresponding xfrm.
3634 * Order is _important_. Later we will implement
3635 * some barriers, but at the moment barriers
3636 * are implied between each two transformations.
3637 */
3638 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3639 k = xfrm_policy_ok(tpp[i], sp, k, family);
3640 if (k < 0) {
3641 if (k < -1)
3642 /* "-2 - errored_index" returned */
3643 xerr_idx = -(2+k);
3644 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3645 goto reject;
3646 }
3647 }
3648
3649 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3650 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3651 goto reject;
3652 }
3653
3654 xfrm_pols_put(pols, npols);
3655 return 1;
3656 }
3657 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3658
3659 reject:
3660 xfrm_secpath_reject(xerr_idx, skb, &fl);
3661 reject_error:
3662 xfrm_pols_put(pols, npols);
3663 return 0;
3664 }
3665 EXPORT_SYMBOL(__xfrm_policy_check);
3666
3667 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3668 {
3669 struct net *net = dev_net(skb->dev);
3670 struct flowi fl;
3671 struct dst_entry *dst;
3672 int res = 1;
3673
3674 if (xfrm_decode_session(skb, &fl, family) < 0) {
3675 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3676 return 0;
3677 }
3678
3679 skb_dst_force(skb);
3680 if (!skb_dst(skb)) {
3681 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3682 return 0;
3683 }
3684
3685 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3686 if (IS_ERR(dst)) {
3687 res = 0;
3688 dst = NULL;
3689 }
3690 skb_dst_set(skb, dst);
3691 return res;
3692 }
3693 EXPORT_SYMBOL(__xfrm_route_forward);
3694
3695 /* Optimize later using cookies and generation ids. */
3696
3697 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3698 {
3699 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3700 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3701 * get validated by dst_ops->check on every use. We do this
3702 * because when a normal route referenced by an XFRM dst is
3703 * obsoleted we do not go looking around for all parent
3704 * referencing XFRM dsts so that we can invalidate them. It
3705 * is just too much work. Instead we make the checks here on
3706 * every use. For example:
3707 *
3708 * XFRM dst A --> IPv4 dst X
3709 *
3710 * X is the "xdst->route" of A (X is also the "dst->path" of A
3711 * in this example). If X is marked obsolete, "A" will not
3712 * notice. That's what we are validating here via the
3713 * stale_bundle() check.
3714 *
3715 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3716 * be marked on it.
3717 * This will force stale_bundle() to fail on any xdst bundle with
3718 * this dst linked in it.
3719 */
3720 if (dst->obsolete < 0 && !stale_bundle(dst))
3721 return dst;
3722
3723 return NULL;
3724 }
3725
3726 static int stale_bundle(struct dst_entry *dst)
3727 {
3728 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3729 }
3730
3731 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3732 {
3733 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3734 dst->dev = dev_net(dev)->loopback_dev;
3735 dev_hold(dst->dev);
3736 dev_put(dev);
3737 }
3738 }
3739 EXPORT_SYMBOL(xfrm_dst_ifdown);
3740
3741 static void xfrm_link_failure(struct sk_buff *skb)
3742 {
3743 /* Impossible. Such dst must be popped before reaches point of failure. */
3744 }
3745
3746 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3747 {
3748 if (dst) {
3749 if (dst->obsolete) {
3750 dst_release(dst);
3751 dst = NULL;
3752 }
3753 }
3754 return dst;
3755 }
3756
3757 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3758 {
3759 while (nr--) {
3760 struct xfrm_dst *xdst = bundle[nr];
3761 u32 pmtu, route_mtu_cached;
3762 struct dst_entry *dst;
3763
3764 dst = &xdst->u.dst;
3765 pmtu = dst_mtu(xfrm_dst_child(dst));
3766 xdst->child_mtu_cached = pmtu;
3767
3768 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3769
3770 route_mtu_cached = dst_mtu(xdst->route);
3771 xdst->route_mtu_cached = route_mtu_cached;
3772
3773 if (pmtu > route_mtu_cached)
3774 pmtu = route_mtu_cached;
3775
3776 dst_metric_set(dst, RTAX_MTU, pmtu);
3777 }
3778 }
3779
3780 /* Check that the bundle accepts the flow and its components are
3781 * still valid.
3782 */
3783
3784 static int xfrm_bundle_ok(struct xfrm_dst *first)
3785 {
3786 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3787 struct dst_entry *dst = &first->u.dst;
3788 struct xfrm_dst *xdst;
3789 int start_from, nr;
3790 u32 mtu;
3791
3792 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3793 (dst->dev && !netif_running(dst->dev)))
3794 return 0;
3795
3796 if (dst->flags & DST_XFRM_QUEUE)
3797 return 1;
3798
3799 start_from = nr = 0;
3800 do {
3801 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3802
3803 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3804 return 0;
3805 if (xdst->xfrm_genid != dst->xfrm->genid)
3806 return 0;
3807 if (xdst->num_pols > 0 &&
3808 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3809 return 0;
3810
3811 bundle[nr++] = xdst;
3812
3813 mtu = dst_mtu(xfrm_dst_child(dst));
3814 if (xdst->child_mtu_cached != mtu) {
3815 start_from = nr;
3816 xdst->child_mtu_cached = mtu;
3817 }
3818
3819 if (!dst_check(xdst->route, xdst->route_cookie))
3820 return 0;
3821 mtu = dst_mtu(xdst->route);
3822 if (xdst->route_mtu_cached != mtu) {
3823 start_from = nr;
3824 xdst->route_mtu_cached = mtu;
3825 }
3826
3827 dst = xfrm_dst_child(dst);
3828 } while (dst->xfrm);
3829
3830 if (likely(!start_from))
3831 return 1;
3832
3833 xdst = bundle[start_from - 1];
3834 mtu = xdst->child_mtu_cached;
3835 while (start_from--) {
3836 dst = &xdst->u.dst;
3837
3838 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3839 if (mtu > xdst->route_mtu_cached)
3840 mtu = xdst->route_mtu_cached;
3841 dst_metric_set(dst, RTAX_MTU, mtu);
3842 if (!start_from)
3843 break;
3844
3845 xdst = bundle[start_from - 1];
3846 xdst->child_mtu_cached = mtu;
3847 }
3848
3849 return 1;
3850 }
3851
3852 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3853 {
3854 return dst_metric_advmss(xfrm_dst_path(dst));
3855 }
3856
3857 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3858 {
3859 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3860
3861 return mtu ? : dst_mtu(xfrm_dst_path(dst));
3862 }
3863
3864 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3865 const void *daddr)
3866 {
3867 while (dst->xfrm) {
3868 const struct xfrm_state *xfrm = dst->xfrm;
3869
3870 dst = xfrm_dst_child(dst);
3871
3872 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3873 continue;
3874 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3875 daddr = xfrm->coaddr;
3876 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3877 daddr = &xfrm->id.daddr;
3878 }
3879 return daddr;
3880 }
3881
3882 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3883 struct sk_buff *skb,
3884 const void *daddr)
3885 {
3886 const struct dst_entry *path = xfrm_dst_path(dst);
3887
3888 if (!skb)
3889 daddr = xfrm_get_dst_nexthop(dst, daddr);
3890 return path->ops->neigh_lookup(path, skb, daddr);
3891 }
3892
3893 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3894 {
3895 const struct dst_entry *path = xfrm_dst_path(dst);
3896
3897 daddr = xfrm_get_dst_nexthop(dst, daddr);
3898 path->ops->confirm_neigh(path, daddr);
3899 }
3900
3901 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3902 {
3903 int err = 0;
3904
3905 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3906 return -EAFNOSUPPORT;
3907
3908 spin_lock(&xfrm_policy_afinfo_lock);
3909 if (unlikely(xfrm_policy_afinfo[family] != NULL))
3910 err = -EEXIST;
3911 else {
3912 struct dst_ops *dst_ops = afinfo->dst_ops;
3913 if (likely(dst_ops->kmem_cachep == NULL))
3914 dst_ops->kmem_cachep = xfrm_dst_cache;
3915 if (likely(dst_ops->check == NULL))
3916 dst_ops->check = xfrm_dst_check;
3917 if (likely(dst_ops->default_advmss == NULL))
3918 dst_ops->default_advmss = xfrm_default_advmss;
3919 if (likely(dst_ops->mtu == NULL))
3920 dst_ops->mtu = xfrm_mtu;
3921 if (likely(dst_ops->negative_advice == NULL))
3922 dst_ops->negative_advice = xfrm_negative_advice;
3923 if (likely(dst_ops->link_failure == NULL))
3924 dst_ops->link_failure = xfrm_link_failure;
3925 if (likely(dst_ops->neigh_lookup == NULL))
3926 dst_ops->neigh_lookup = xfrm_neigh_lookup;
3927 if (likely(!dst_ops->confirm_neigh))
3928 dst_ops->confirm_neigh = xfrm_confirm_neigh;
3929 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3930 }
3931 spin_unlock(&xfrm_policy_afinfo_lock);
3932
3933 return err;
3934 }
3935 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3936
3937 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3938 {
3939 struct dst_ops *dst_ops = afinfo->dst_ops;
3940 int i;
3941
3942 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3943 if (xfrm_policy_afinfo[i] != afinfo)
3944 continue;
3945 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3946 break;
3947 }
3948
3949 synchronize_rcu();
3950
3951 dst_ops->kmem_cachep = NULL;
3952 dst_ops->check = NULL;
3953 dst_ops->negative_advice = NULL;
3954 dst_ops->link_failure = NULL;
3955 }
3956 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3957
3958 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3959 {
3960 spin_lock(&xfrm_if_cb_lock);
3961 rcu_assign_pointer(xfrm_if_cb, ifcb);
3962 spin_unlock(&xfrm_if_cb_lock);
3963 }
3964 EXPORT_SYMBOL(xfrm_if_register_cb);
3965
3966 void xfrm_if_unregister_cb(void)
3967 {
3968 RCU_INIT_POINTER(xfrm_if_cb, NULL);
3969 synchronize_rcu();
3970 }
3971 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3972
3973 #ifdef CONFIG_XFRM_STATISTICS
3974 static int __net_init xfrm_statistics_init(struct net *net)
3975 {
3976 int rv;
3977 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3978 if (!net->mib.xfrm_statistics)
3979 return -ENOMEM;
3980 rv = xfrm_proc_init(net);
3981 if (rv < 0)
3982 free_percpu(net->mib.xfrm_statistics);
3983 return rv;
3984 }
3985
3986 static void xfrm_statistics_fini(struct net *net)
3987 {
3988 xfrm_proc_fini(net);
3989 free_percpu(net->mib.xfrm_statistics);
3990 }
3991 #else
3992 static int __net_init xfrm_statistics_init(struct net *net)
3993 {
3994 return 0;
3995 }
3996
3997 static void xfrm_statistics_fini(struct net *net)
3998 {
3999 }
4000 #endif
4001
4002 static int __net_init xfrm_policy_init(struct net *net)
4003 {
4004 unsigned int hmask, sz;
4005 int dir, err;
4006
4007 if (net_eq(net, &init_net)) {
4008 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4009 sizeof(struct xfrm_dst),
4010 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4011 NULL);
4012 err = rhashtable_init(&xfrm_policy_inexact_table,
4013 &xfrm_pol_inexact_params);
4014 BUG_ON(err);
4015 }
4016
4017 hmask = 8 - 1;
4018 sz = (hmask+1) * sizeof(struct hlist_head);
4019
4020 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4021 if (!net->xfrm.policy_byidx)
4022 goto out_byidx;
4023 net->xfrm.policy_idx_hmask = hmask;
4024
4025 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4026 struct xfrm_policy_hash *htab;
4027
4028 net->xfrm.policy_count[dir] = 0;
4029 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4030 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4031
4032 htab = &net->xfrm.policy_bydst[dir];
4033 htab->table = xfrm_hash_alloc(sz);
4034 if (!htab->table)
4035 goto out_bydst;
4036 htab->hmask = hmask;
4037 htab->dbits4 = 32;
4038 htab->sbits4 = 32;
4039 htab->dbits6 = 128;
4040 htab->sbits6 = 128;
4041 }
4042 net->xfrm.policy_hthresh.lbits4 = 32;
4043 net->xfrm.policy_hthresh.rbits4 = 32;
4044 net->xfrm.policy_hthresh.lbits6 = 128;
4045 net->xfrm.policy_hthresh.rbits6 = 128;
4046
4047 seqlock_init(&net->xfrm.policy_hthresh.lock);
4048
4049 INIT_LIST_HEAD(&net->xfrm.policy_all);
4050 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4051 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4052 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4053 return 0;
4054
4055 out_bydst:
4056 for (dir--; dir >= 0; dir--) {
4057 struct xfrm_policy_hash *htab;
4058
4059 htab = &net->xfrm.policy_bydst[dir];
4060 xfrm_hash_free(htab->table, sz);
4061 }
4062 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4063 out_byidx:
4064 return -ENOMEM;
4065 }
4066
4067 static void xfrm_policy_fini(struct net *net)
4068 {
4069 struct xfrm_pol_inexact_bin *b, *t;
4070 unsigned int sz;
4071 int dir;
4072
4073 flush_work(&net->xfrm.policy_hash_work);
4074 #ifdef CONFIG_XFRM_SUB_POLICY
4075 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4076 #endif
4077 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4078
4079 WARN_ON(!list_empty(&net->xfrm.policy_all));
4080
4081 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4082 struct xfrm_policy_hash *htab;
4083
4084 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4085
4086 htab = &net->xfrm.policy_bydst[dir];
4087 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4088 WARN_ON(!hlist_empty(htab->table));
4089 xfrm_hash_free(htab->table, sz);
4090 }
4091
4092 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4093 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4094 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4095
4096 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4097 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4098 __xfrm_policy_inexact_prune_bin(b, true);
4099 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4100 }
4101
4102 static int __net_init xfrm_net_init(struct net *net)
4103 {
4104 int rv;
4105
4106 /* Initialize the per-net locks here */
4107 spin_lock_init(&net->xfrm.xfrm_state_lock);
4108 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4109 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4110
4111 rv = xfrm_statistics_init(net);
4112 if (rv < 0)
4113 goto out_statistics;
4114 rv = xfrm_state_init(net);
4115 if (rv < 0)
4116 goto out_state;
4117 rv = xfrm_policy_init(net);
4118 if (rv < 0)
4119 goto out_policy;
4120 rv = xfrm_sysctl_init(net);
4121 if (rv < 0)
4122 goto out_sysctl;
4123
4124 return 0;
4125
4126 out_sysctl:
4127 xfrm_policy_fini(net);
4128 out_policy:
4129 xfrm_state_fini(net);
4130 out_state:
4131 xfrm_statistics_fini(net);
4132 out_statistics:
4133 return rv;
4134 }
4135
4136 static void __net_exit xfrm_net_exit(struct net *net)
4137 {
4138 xfrm_sysctl_fini(net);
4139 xfrm_policy_fini(net);
4140 xfrm_state_fini(net);
4141 xfrm_statistics_fini(net);
4142 }
4143
4144 static struct pernet_operations __net_initdata xfrm_net_ops = {
4145 .init = xfrm_net_init,
4146 .exit = xfrm_net_exit,
4147 };
4148
4149 void __init xfrm_init(void)
4150 {
4151 register_pernet_subsys(&xfrm_net_ops);
4152 xfrm_dev_init();
4153 seqcount_init(&xfrm_policy_hash_generation);
4154 xfrm_input_init();
4155
4156 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4157 synchronize_rcu();
4158 }
4159
4160 #ifdef CONFIG_AUDITSYSCALL
4161 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4162 struct audit_buffer *audit_buf)
4163 {
4164 struct xfrm_sec_ctx *ctx = xp->security;
4165 struct xfrm_selector *sel = &xp->selector;
4166
4167 if (ctx)
4168 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4169 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4170
4171 switch (sel->family) {
4172 case AF_INET:
4173 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4174 if (sel->prefixlen_s != 32)
4175 audit_log_format(audit_buf, " src_prefixlen=%d",
4176 sel->prefixlen_s);
4177 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4178 if (sel->prefixlen_d != 32)
4179 audit_log_format(audit_buf, " dst_prefixlen=%d",
4180 sel->prefixlen_d);
4181 break;
4182 case AF_INET6:
4183 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4184 if (sel->prefixlen_s != 128)
4185 audit_log_format(audit_buf, " src_prefixlen=%d",
4186 sel->prefixlen_s);
4187 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4188 if (sel->prefixlen_d != 128)
4189 audit_log_format(audit_buf, " dst_prefixlen=%d",
4190 sel->prefixlen_d);
4191 break;
4192 }
4193 }
4194
4195 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4196 {
4197 struct audit_buffer *audit_buf;
4198
4199 audit_buf = xfrm_audit_start("SPD-add");
4200 if (audit_buf == NULL)
4201 return;
4202 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4203 audit_log_format(audit_buf, " res=%u", result);
4204 xfrm_audit_common_policyinfo(xp, audit_buf);
4205 audit_log_end(audit_buf);
4206 }
4207 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4208
4209 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4210 bool task_valid)
4211 {
4212 struct audit_buffer *audit_buf;
4213
4214 audit_buf = xfrm_audit_start("SPD-delete");
4215 if (audit_buf == NULL)
4216 return;
4217 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4218 audit_log_format(audit_buf, " res=%u", result);
4219 xfrm_audit_common_policyinfo(xp, audit_buf);
4220 audit_log_end(audit_buf);
4221 }
4222 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4223 #endif
4224
4225 #ifdef CONFIG_XFRM_MIGRATE
4226 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4227 const struct xfrm_selector *sel_tgt)
4228 {
4229 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4230 if (sel_tgt->family == sel_cmp->family &&
4231 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4232 sel_cmp->family) &&
4233 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4234 sel_cmp->family) &&
4235 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4236 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4237 return true;
4238 }
4239 } else {
4240 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4241 return true;
4242 }
4243 }
4244 return false;
4245 }
4246
4247 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4248 u8 dir, u8 type, struct net *net)
4249 {
4250 struct xfrm_policy *pol, *ret = NULL;
4251 struct hlist_head *chain;
4252 u32 priority = ~0U;
4253
4254 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4255 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4256 hlist_for_each_entry(pol, chain, bydst) {
4257 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4258 pol->type == type) {
4259 ret = pol;
4260 priority = ret->priority;
4261 break;
4262 }
4263 }
4264 chain = &net->xfrm.policy_inexact[dir];
4265 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4266 if ((pol->priority >= priority) && ret)
4267 break;
4268
4269 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4270 pol->type == type) {
4271 ret = pol;
4272 break;
4273 }
4274 }
4275
4276 xfrm_pol_hold(ret);
4277
4278 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4279
4280 return ret;
4281 }
4282
4283 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4284 {
4285 int match = 0;
4286
4287 if (t->mode == m->mode && t->id.proto == m->proto &&
4288 (m->reqid == 0 || t->reqid == m->reqid)) {
4289 switch (t->mode) {
4290 case XFRM_MODE_TUNNEL:
4291 case XFRM_MODE_BEET:
4292 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4293 m->old_family) &&
4294 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4295 m->old_family)) {
4296 match = 1;
4297 }
4298 break;
4299 case XFRM_MODE_TRANSPORT:
4300 /* in case of transport mode, template does not store
4301 any IP addresses, hence we just compare mode and
4302 protocol */
4303 match = 1;
4304 break;
4305 default:
4306 break;
4307 }
4308 }
4309 return match;
4310 }
4311
4312 /* update endpoint address(es) of template(s) */
4313 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4314 struct xfrm_migrate *m, int num_migrate)
4315 {
4316 struct xfrm_migrate *mp;
4317 int i, j, n = 0;
4318
4319 write_lock_bh(&pol->lock);
4320 if (unlikely(pol->walk.dead)) {
4321 /* target policy has been deleted */
4322 write_unlock_bh(&pol->lock);
4323 return -ENOENT;
4324 }
4325
4326 for (i = 0; i < pol->xfrm_nr; i++) {
4327 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4328 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4329 continue;
4330 n++;
4331 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4332 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4333 continue;
4334 /* update endpoints */
4335 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4336 sizeof(pol->xfrm_vec[i].id.daddr));
4337 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4338 sizeof(pol->xfrm_vec[i].saddr));
4339 pol->xfrm_vec[i].encap_family = mp->new_family;
4340 /* flush bundles */
4341 atomic_inc(&pol->genid);
4342 }
4343 }
4344
4345 write_unlock_bh(&pol->lock);
4346
4347 if (!n)
4348 return -ENODATA;
4349
4350 return 0;
4351 }
4352
4353 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4354 {
4355 int i, j;
4356
4357 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4358 return -EINVAL;
4359
4360 for (i = 0; i < num_migrate; i++) {
4361 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4362 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4363 return -EINVAL;
4364
4365 /* check if there is any duplicated entry */
4366 for (j = i + 1; j < num_migrate; j++) {
4367 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4368 sizeof(m[i].old_daddr)) &&
4369 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4370 sizeof(m[i].old_saddr)) &&
4371 m[i].proto == m[j].proto &&
4372 m[i].mode == m[j].mode &&
4373 m[i].reqid == m[j].reqid &&
4374 m[i].old_family == m[j].old_family)
4375 return -EINVAL;
4376 }
4377 }
4378
4379 return 0;
4380 }
4381
4382 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4383 struct xfrm_migrate *m, int num_migrate,
4384 struct xfrm_kmaddress *k, struct net *net,
4385 struct xfrm_encap_tmpl *encap)
4386 {
4387 int i, err, nx_cur = 0, nx_new = 0;
4388 struct xfrm_policy *pol = NULL;
4389 struct xfrm_state *x, *xc;
4390 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4391 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4392 struct xfrm_migrate *mp;
4393
4394 /* Stage 0 - sanity checks */
4395 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4396 goto out;
4397
4398 if (dir >= XFRM_POLICY_MAX) {
4399 err = -EINVAL;
4400 goto out;
4401 }
4402
4403 /* Stage 1 - find policy */
4404 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4405 err = -ENOENT;
4406 goto out;
4407 }
4408
4409 /* Stage 2 - find and update state(s) */
4410 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4411 if ((x = xfrm_migrate_state_find(mp, net))) {
4412 x_cur[nx_cur] = x;
4413 nx_cur++;
4414 xc = xfrm_state_migrate(x, mp, encap);
4415 if (xc) {
4416 x_new[nx_new] = xc;
4417 nx_new++;
4418 } else {
4419 err = -ENODATA;
4420 goto restore_state;
4421 }
4422 }
4423 }
4424
4425 /* Stage 3 - update policy */
4426 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4427 goto restore_state;
4428
4429 /* Stage 4 - delete old state(s) */
4430 if (nx_cur) {
4431 xfrm_states_put(x_cur, nx_cur);
4432 xfrm_states_delete(x_cur, nx_cur);
4433 }
4434
4435 /* Stage 5 - announce */
4436 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4437
4438 xfrm_pol_put(pol);
4439
4440 return 0;
4441 out:
4442 return err;
4443
4444 restore_state:
4445 if (pol)
4446 xfrm_pol_put(pol);
4447 if (nx_cur)
4448 xfrm_states_put(x_cur, nx_cur);
4449 if (nx_new)
4450 xfrm_states_delete(x_new, nx_new);
4451
4452 return err;
4453 }
4454 EXPORT_SYMBOL(xfrm_migrate);
4455 #endif