]> git.proxmox.com Git - ovs.git/blob - datapath/linux/compat/ip_tunnel.c
58870bc2d916375ae38ba30aa835a025317f55e8
[ovs.git] / datapath / linux / compat / ip_tunnel.c
1 /*
2 * Copyright (c) 2013,2018 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/capability.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/skbuff.h>
24 #include <linux/netdevice.h>
25 #include <linux/in.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/if_arp.h>
29 #include <linux/mroute.h>
30 #include <linux/init.h>
31 #include <linux/in6.h>
32 #include <linux/inetdevice.h>
33 #include <linux/igmp.h>
34 #include <linux/netfilter_ipv4.h>
35 #include <linux/etherdevice.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/rculist.h>
39 #include <linux/err.h>
40
41 #include <net/sock.h>
42 #include <net/ip.h>
43 #include <net/icmp.h>
44 #include <net/protocol.h>
45 #include <net/ip_tunnels.h>
46 #include <net/arp.h>
47 #include <net/checksum.h>
48 #include <net/dsfield.h>
49 #include <net/inet_ecn.h>
50 #include <net/xfrm.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53 #include <net/rtnetlink.h>
54
55 #if IS_ENABLED(CONFIG_IPV6)
56 #include <net/ipv6.h>
57 #include <net/ip6_fib.h>
58 #include <net/ip6_route.h>
59 #endif
60
61 #include "compat.h"
62
63 #ifndef USE_UPSTREAM_TUNNEL
64 const struct ip_tunnel_encap_ops __rcu *
65 rpl_iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
66
67 static unsigned int rpl_ip_tunnel_hash(__be32 key, __be32 remote)
68 {
69 return hash_32((__force u32)key ^ (__force u32)remote,
70 IP_TNL_HASH_BITS);
71 }
72
73 static bool rpl_ip_tunnel_key_match(const struct ip_tunnel_parm *p,
74 __be16 flags, __be32 key)
75 {
76 if (p->i_flags & TUNNEL_KEY) {
77 if (flags & TUNNEL_KEY)
78 return key == p->i_key;
79 else
80 /* key expected, none present */
81 return false;
82 } else
83 return !(flags & TUNNEL_KEY);
84 }
85
86 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
87 struct ip_tunnel_parm *parms)
88 {
89 unsigned int h;
90 __be32 remote;
91 __be32 i_key = parms->i_key;
92
93 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
94 remote = parms->iph.daddr;
95 else
96 remote = 0;
97
98 if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
99 i_key = 0;
100
101 h = rpl_ip_tunnel_hash(i_key, remote);
102 return &itn->tunnels[h];
103 }
104
105 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
106 {
107 struct hlist_head *head = ip_bucket(itn, &t->parms);
108
109 if (t->collect_md)
110 rcu_assign_pointer(itn->collect_md_tun, t);
111 hlist_add_head_rcu(&t->hash_node, head);
112 }
113
114 static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
115 {
116 if (t->collect_md)
117 rcu_assign_pointer(itn->collect_md_tun, NULL);
118 hlist_del_init_rcu(&t->hash_node);
119 }
120
121 static struct net_device *__ip_tunnel_create(struct net *net,
122 const struct rtnl_link_ops *ops,
123 struct ip_tunnel_parm *parms)
124 {
125 int err;
126 struct ip_tunnel *tunnel;
127 struct net_device *dev;
128 char name[IFNAMSIZ];
129
130 if (parms->name[0])
131 strlcpy(name, parms->name, IFNAMSIZ);
132 else {
133 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
134 err = -E2BIG;
135 goto failed;
136 }
137 strlcpy(name, ops->kind, IFNAMSIZ);
138 strncat(name, "%d", 2);
139 }
140
141 ASSERT_RTNL();
142 dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
143 if (!dev) {
144 err = -ENOMEM;
145 goto failed;
146 }
147 dev_net_set(dev, net);
148
149 dev->rtnl_link_ops = ops;
150
151 tunnel = netdev_priv(dev);
152 tunnel->parms = *parms;
153 tunnel->net = net;
154
155 err = register_netdevice(dev);
156 if (err)
157 goto failed_free;
158
159 return dev;
160
161 failed_free:
162 free_netdev(dev);
163 failed:
164 return ERR_PTR(err);
165 }
166
167 static inline void init_tunnel_flow(struct flowi4 *fl4,
168 int proto,
169 __be32 daddr, __be32 saddr,
170 __be32 key, __u8 tos, int oif)
171 {
172 memset(fl4, 0, sizeof(*fl4));
173 fl4->flowi4_oif = oif;
174 fl4->daddr = daddr;
175 fl4->saddr = saddr;
176 fl4->flowi4_tos = tos;
177 fl4->flowi4_proto = proto;
178 fl4->fl4_gre_key = key;
179 }
180
181 static int ip_tunnel_bind_dev(struct net_device *dev)
182 {
183 struct net_device *tdev = NULL;
184 struct ip_tunnel *tunnel = netdev_priv(dev);
185 const struct iphdr *iph;
186 int hlen = LL_MAX_HEADER;
187 int mtu = ETH_DATA_LEN;
188 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
189
190 iph = &tunnel->parms.iph;
191
192 /* Guess output device to choose reasonable mtu and needed_headroom */
193 if (iph->daddr) {
194 struct flowi4 fl4;
195 struct rtable *rt;
196
197 init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
198 iph->saddr, tunnel->parms.o_key,
199 RT_TOS(iph->tos), tunnel->parms.link);
200 rt = ip_route_output_key(tunnel->net, &fl4);
201
202 if (!IS_ERR(rt)) {
203 tdev = rt->dst.dev;
204 ip_rt_put(rt);
205 }
206 if (dev->type != ARPHRD_ETHER)
207 dev->flags |= IFF_POINTOPOINT;
208
209 dst_cache_reset(&tunnel->dst_cache);
210 }
211
212 if (!tdev && tunnel->parms.link)
213 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
214
215 if (tdev) {
216 hlen = tdev->hard_header_len + tdev->needed_headroom;
217 mtu = tdev->mtu;
218 }
219
220 dev->needed_headroom = t_hlen + hlen;
221 mtu -= (dev->hard_header_len + t_hlen);
222
223 if (mtu < 68)
224 mtu = 68;
225
226 return mtu;
227 }
228
229 int rpl___ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
230 {
231 struct ip_tunnel *tunnel = netdev_priv(dev);
232 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
233 int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
234
235 if (new_mtu < 68)
236 return -EINVAL;
237
238 if (new_mtu > max_mtu) {
239 if (strict)
240 return -EINVAL;
241
242 new_mtu = max_mtu;
243 }
244
245 dev->mtu = new_mtu;
246 return 0;
247 }
248
249 int rpl_ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
250 {
251 return rpl___ip_tunnel_change_mtu(dev, new_mtu, true);
252 }
253
254 static int rpl_tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
255 struct rtable *rt, __be16 df,
256 const struct iphdr *inner_iph)
257 {
258 struct ip_tunnel *tunnel = netdev_priv(dev);
259 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
260 int mtu;
261
262 if (df)
263 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
264 - sizeof(struct iphdr) - tunnel->hlen;
265 else
266 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
267
268 if (skb_dst(skb))
269 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
270
271 if (skb->protocol == htons(ETH_P_IP)) {
272 if (!skb_is_gso(skb) &&
273 (inner_iph->frag_off & htons(IP_DF)) &&
274 mtu < pkt_size) {
275 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
276 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
277 return -E2BIG;
278 }
279 }
280 #if IS_ENABLED(CONFIG_IPV6)
281 else if (skb->protocol == htons(ETH_P_IPV6)) {
282 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
283
284 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
285 mtu >= IPV6_MIN_MTU) {
286 if ((tunnel->parms.iph.daddr &&
287 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
288 rt6->rt6i_dst.plen == 128) {
289 rt6->rt6i_flags |= RTF_MODIFIED;
290 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
291 }
292 }
293
294 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
295 mtu < pkt_size) {
296 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
297 return -E2BIG;
298 }
299 }
300 #endif
301 return 0;
302 }
303
304 void rpl_ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
305 const struct iphdr *tnl_params, const u8 protocol)
306 {
307 struct ip_tunnel *tunnel = netdev_priv(dev);
308 const struct iphdr *inner_iph;
309 struct flowi4 fl4;
310 u8 tos, ttl;
311 __be16 df;
312 struct rtable *rt; /* Route to the other host */
313 unsigned int max_headroom; /* The extra header space needed */
314 __be32 dst;
315 bool connected;
316
317 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
318 connected = (tunnel->parms.iph.daddr != 0);
319
320 dst = tnl_params->daddr;
321 if (dst == 0) {
322 /* NBMA tunnel */
323
324 if (skb_dst(skb) == NULL) {
325 dev->stats.tx_fifo_errors++;
326 goto tx_error;
327 }
328
329 if (skb->protocol == htons(ETH_P_IP)) {
330 rt = skb_rtable(skb);
331 dst = rt_nexthop(rt, inner_iph->daddr);
332 }
333 #if IS_ENABLED(CONFIG_IPV6)
334 else if (skb->protocol == htons(ETH_P_IPV6)) {
335 const struct in6_addr *addr6;
336 struct neighbour *neigh;
337 bool do_tx_error_icmp;
338 int addr_type;
339
340 neigh = dst_neigh_lookup(skb_dst(skb),
341 &ipv6_hdr(skb)->daddr);
342 if (neigh == NULL)
343 goto tx_error;
344
345 addr6 = (const struct in6_addr *)&neigh->primary_key;
346 addr_type = ipv6_addr_type(addr6);
347
348 if (addr_type == IPV6_ADDR_ANY) {
349 addr6 = &ipv6_hdr(skb)->daddr;
350 addr_type = ipv6_addr_type(addr6);
351 }
352
353 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
354 do_tx_error_icmp = true;
355 else {
356 do_tx_error_icmp = false;
357 dst = addr6->s6_addr32[3];
358 }
359 neigh_release(neigh);
360 if (do_tx_error_icmp)
361 goto tx_error_icmp;
362 }
363 #endif
364 else
365 goto tx_error;
366
367 connected = false;
368 }
369
370 tos = tnl_params->tos;
371 if (tos & 0x1) {
372 tos &= ~0x1;
373 if (skb->protocol == htons(ETH_P_IP)) {
374 tos = inner_iph->tos;
375 connected = false;
376 } else if (skb->protocol == htons(ETH_P_IPV6)) {
377 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
378 connected = false;
379 }
380 }
381
382 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
383 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
384
385 if (ovs_ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
386 goto tx_error;
387
388 rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr) :
389 NULL;
390
391 if (!rt) {
392 rt = ip_route_output_key(tunnel->net, &fl4);
393
394 if (IS_ERR(rt)) {
395 dev->stats.tx_carrier_errors++;
396 goto tx_error;
397 }
398 if (connected)
399 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
400 fl4.saddr);
401 }
402
403 if (rt->dst.dev == dev) {
404 ip_rt_put(rt);
405 dev->stats.collisions++;
406 goto tx_error;
407 }
408
409 if (rpl_tnl_update_pmtu(dev, skb, rt,
410 tnl_params->frag_off, inner_iph)) {
411 ip_rt_put(rt);
412 goto tx_error;
413 }
414
415 if (tunnel->err_count > 0) {
416 if (time_before(jiffies,
417 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
418 tunnel->err_count--;
419
420 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
421 dst_link_failure(skb);
422 } else
423 tunnel->err_count = 0;
424 }
425
426 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
427 ttl = tnl_params->ttl;
428 if (ttl == 0) {
429 if (skb->protocol == htons(ETH_P_IP))
430 ttl = inner_iph->ttl;
431 #if IS_ENABLED(CONFIG_IPV6)
432 else if (skb->protocol == htons(ETH_P_IPV6))
433 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
434 #endif
435 else
436 ttl = ip4_dst_hoplimit(&rt->dst);
437 }
438
439 df = tnl_params->frag_off;
440 if (skb->protocol == htons(ETH_P_IP))
441 df |= (inner_iph->frag_off&htons(IP_DF));
442
443 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
444 + rt->dst.header_len;
445 if (max_headroom > dev->needed_headroom)
446 dev->needed_headroom = max_headroom;
447
448 if (skb_cow_head(skb, dev->needed_headroom)) {
449 ip_rt_put(rt);
450 dev->stats.tx_dropped++;
451 kfree_skb(skb);
452 return;
453 }
454
455 iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol,
456 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
457
458 return;
459
460 #if IS_ENABLED(CONFIG_IPV6)
461 tx_error_icmp:
462 dst_link_failure(skb);
463 #endif
464 tx_error:
465 dev->stats.tx_errors++;
466 kfree_skb(skb);
467 }
468 EXPORT_SYMBOL_GPL(rpl_ip_tunnel_xmit);
469
470 static void ip_tunnel_dev_free(struct net_device *dev)
471 {
472 free_percpu(dev->tstats);
473 free_netdev(dev);
474 }
475
476 void rpl_ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
477 {
478 struct ip_tunnel *tunnel = netdev_priv(dev);
479 struct ip_tunnel_net *itn;
480
481 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
482
483 ip_tunnel_del(itn, netdev_priv(dev));
484 unregister_netdevice_queue(dev, head);
485 }
486
487 int rpl_ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
488 struct rtnl_link_ops *ops, char *devname)
489 {
490 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
491 struct ip_tunnel_parm parms;
492 unsigned int i;
493
494 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
495 INIT_HLIST_HEAD(&itn->tunnels[i]);
496
497 if (!ops) {
498 itn->fb_tunnel_dev = NULL;
499 return 0;
500 }
501
502 memset(&parms, 0, sizeof(parms));
503 if (devname)
504 strlcpy(parms.name, devname, IFNAMSIZ);
505
506 rtnl_lock();
507 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
508 /* FB netdevice is special: we have one, and only one per netns.
509 * * Allowing to move it to another netns is clearly unsafe.
510 * */
511 if (!IS_ERR(itn->fb_tunnel_dev)) {
512 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
513 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
514 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
515 }
516 rtnl_unlock();
517
518 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
519 }
520
521 static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
522 struct rtnl_link_ops *ops)
523 {
524 struct net *net = dev_net(itn->fb_tunnel_dev);
525 struct net_device *dev, *aux;
526 int h;
527
528 for_each_netdev_safe(net, dev, aux)
529 if (dev->rtnl_link_ops == ops)
530 unregister_netdevice_queue(dev, head);
531
532 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
533 struct ip_tunnel *t;
534 struct hlist_node *n;
535 struct hlist_head *thead = &itn->tunnels[h];
536
537 hlist_for_each_entry_safe(t, n, thead, hash_node)
538 /* If dev is in the same netns, it has already
539 * been added to the list by the previous loop.
540 */
541 if (!net_eq(dev_net(t->dev), net))
542 unregister_netdevice_queue(t->dev, head);
543 }
544 }
545
546 void rpl_ip_tunnel_delete_net(struct ip_tunnel_net *itn,
547 struct rtnl_link_ops *ops)
548 {
549 LIST_HEAD(list);
550
551 rtnl_lock();
552 ip_tunnel_destroy(itn, &list, ops);
553 unregister_netdevice_many(&list);
554 rtnl_unlock();
555 }
556
557 int rpl_ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
558 struct ip_tunnel_parm *p)
559 {
560 struct ip_tunnel *nt;
561 struct net *net = dev_net(dev);
562 struct ip_tunnel_net *itn;
563 int mtu;
564 int err;
565
566 nt = netdev_priv(dev);
567 itn = net_generic(net, nt->ip_tnl_net_id);
568
569 if (nt->collect_md) {
570 if (rtnl_dereference(itn->collect_md_tun))
571 return -EEXIST;
572 } else {
573 return -EOPNOTSUPP;
574 }
575
576 nt->net = net;
577 nt->parms = *p;
578 err = register_netdevice(dev);
579 if (err)
580 goto out;
581
582 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
583 eth_hw_addr_random(dev);
584
585 mtu = ip_tunnel_bind_dev(dev);
586 if (!tb[IFLA_MTU])
587 dev->mtu = mtu;
588
589 ip_tunnel_add(itn, nt);
590 out:
591 return err;
592 }
593
594 int rpl_ip_tunnel_init(struct net_device *dev)
595 {
596 struct ip_tunnel *tunnel = netdev_priv(dev);
597 struct iphdr *iph = &tunnel->parms.iph;
598 int err;
599
600 #ifndef HAVE_NEEDS_FREE_NETDEV
601 dev->destructor = ip_tunnel_dev_free;
602 #else
603 dev->needs_free_netdev = true;
604 dev->priv_destructor = ip_tunnel_dev_free;
605 #endif
606 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
607 if (!dev->tstats)
608 return -ENOMEM;
609
610 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
611 if (err) {
612 free_percpu(dev->tstats);
613 return err;
614 }
615
616 err = gro_cells_init(&tunnel->gro_cells, dev);
617 if (err) {
618 dst_cache_destroy(&tunnel->dst_cache);
619 free_percpu(dev->tstats);
620 return err;
621 }
622
623 tunnel->dev = dev;
624 tunnel->net = dev_net(dev);
625 strcpy(tunnel->parms.name, dev->name);
626 iph->version = 4;
627 iph->ihl = 5;
628
629 if (tunnel->collect_md) {
630 dev->features |= NETIF_F_NETNS_LOCAL;
631 netif_keep_dst(dev);
632 }
633 return 0;
634 }
635
636 void rpl_ip_tunnel_uninit(struct net_device *dev)
637 {
638 struct ip_tunnel *tunnel = netdev_priv(dev);
639 struct net *net = tunnel->net;
640 struct ip_tunnel_net *itn;
641
642 itn = net_generic(net, tunnel->ip_tnl_net_id);
643 ip_tunnel_del(itn, netdev_priv(dev));
644 }
645
646 /* Do least required initialization, rest of init is done in tunnel_init call */
647 void rpl_ip_tunnel_setup(struct net_device *dev, int net_id)
648 {
649 struct ip_tunnel *tunnel = netdev_priv(dev);
650
651 tunnel->ip_tnl_net_id = net_id;
652 }
653
654 int rpl_ip_tunnel_get_iflink(const struct net_device *dev)
655 {
656 struct ip_tunnel *tunnel = netdev_priv(dev);
657
658 return tunnel->parms.link;
659 }
660
661 struct net *rpl_ip_tunnel_get_link_net(const struct net_device *dev)
662 {
663 struct ip_tunnel *tunnel = netdev_priv(dev);
664
665 return tunnel->net;
666 }
667
668 struct ip_tunnel *rpl_ip_tunnel_lookup(struct ip_tunnel_net *itn,
669 int link, __be16 flags,
670 __be32 remote, __be32 local,
671 __be32 key)
672 {
673 unsigned int hash;
674 struct ip_tunnel *t, *cand = NULL;
675 struct hlist_head *head;
676
677 hash = rpl_ip_tunnel_hash(key, remote);
678 head = &itn->tunnels[hash];
679
680 hlist_for_each_entry_rcu(t, head, hash_node) {
681 if (local != t->parms.iph.saddr ||
682 remote != t->parms.iph.daddr ||
683 !(t->dev->flags & IFF_UP))
684 continue;
685
686 if (!rpl_ip_tunnel_key_match(&t->parms, flags, key))
687 continue;
688
689 if (t->parms.link == link)
690 return t;
691 else
692 cand = t;
693 }
694
695 hlist_for_each_entry_rcu(t, head, hash_node) {
696 if (remote != t->parms.iph.daddr ||
697 t->parms.iph.saddr != 0 ||
698 !(t->dev->flags & IFF_UP))
699 continue;
700
701 if (!rpl_ip_tunnel_key_match(&t->parms, flags, key))
702 continue;
703
704 if (t->parms.link == link)
705 return t;
706 else if (!cand)
707 cand = t;
708 }
709
710 hash = rpl_ip_tunnel_hash(key, 0);
711 head = &itn->tunnels[hash];
712
713 hlist_for_each_entry_rcu(t, head, hash_node) {
714 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
715 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
716 continue;
717
718 if (!(t->dev->flags & IFF_UP))
719 continue;
720
721 if (!rpl_ip_tunnel_key_match(&t->parms, flags, key))
722 continue;
723
724 if (t->parms.link == link)
725 return t;
726 else if (!cand)
727 cand = t;
728 }
729
730 if (flags & TUNNEL_NO_KEY)
731 goto skip_key_lookup;
732
733 hlist_for_each_entry_rcu(t, head, hash_node) {
734 if (t->parms.i_key != key ||
735 t->parms.iph.saddr != 0 ||
736 t->parms.iph.daddr != 0 ||
737 !(t->dev->flags & IFF_UP))
738 continue;
739
740 if (t->parms.link == link)
741 return t;
742 else if (!cand)
743 cand = t;
744 }
745
746 skip_key_lookup:
747 if (cand)
748 return cand;
749
750 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
751 return netdev_priv(itn->fb_tunnel_dev);
752
753
754 return NULL;
755 }
756 EXPORT_SYMBOL_GPL(rpl_ip_tunnel_lookup);
757
758 #endif