]> git.proxmox.com Git - ovs.git/blame - datapath/linux/compat/ip_tunnel.c
compat: Fixups for newer kernels
[ovs.git] / datapath / linux / compat / ip_tunnel.c
CommitLineData
e23775f2 1/*
8e53509c 2 * Copyright (c) 2013,2018 Nicira, Inc.
e23775f2
PS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/capability.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
e23775f2
PS
21#include <linux/slab.h>
22#include <linux/uaccess.h>
23#include <linux/skbuff.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/tcp.h>
27#include <linux/udp.h>
28#include <linux/if_arp.h>
29#include <linux/mroute.h>
30#include <linux/init.h>
31#include <linux/in6.h>
32#include <linux/inetdevice.h>
33#include <linux/igmp.h>
34#include <linux/netfilter_ipv4.h>
35#include <linux/etherdevice.h>
36#include <linux/if_ether.h>
37#include <linux/if_vlan.h>
38#include <linux/rculist.h>
39#include <linux/err.h>
40
41#include <net/sock.h>
42#include <net/ip.h>
43#include <net/icmp.h>
44#include <net/protocol.h>
45#include <net/ip_tunnels.h>
46#include <net/arp.h>
47#include <net/checksum.h>
48#include <net/dsfield.h>
49#include <net/inet_ecn.h>
50#include <net/xfrm.h>
51#include <net/net_namespace.h>
52#include <net/netns/generic.h>
53#include <net/rtnetlink.h>
e23775f2
PS
54
55#if IS_ENABLED(CONFIG_IPV6)
56#include <net/ipv6.h>
57#include <net/ip6_fib.h>
58#include <net/ip6_route.h>
59#endif
60
61#include "compat.h"
62
1c95839f 63#ifndef USE_UPSTREAM_TUNNEL
8e53509c
WT
64const struct ip_tunnel_encap_ops __rcu *
65 rpl_iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
66
67static unsigned int rpl_ip_tunnel_hash(__be32 key, __be32 remote)
68{
69 return hash_32((__force u32)key ^ (__force u32)remote,
70 IP_TNL_HASH_BITS);
71}
72
73static bool rpl_ip_tunnel_key_match(const struct ip_tunnel_parm *p,
74 __be16 flags, __be32 key)
75{
76 if (p->i_flags & TUNNEL_KEY) {
77 if (flags & TUNNEL_KEY)
78 return key == p->i_key;
79 else
80 /* key expected, none present */
81 return false;
82 } else
83 return !(flags & TUNNEL_KEY);
84}
85
86static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
87 struct ip_tunnel_parm *parms)
88{
89 unsigned int h;
90 __be32 remote;
91 __be32 i_key = parms->i_key;
92
93 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
94 remote = parms->iph.daddr;
95 else
96 remote = 0;
97
98 if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
99 i_key = 0;
100
101 h = rpl_ip_tunnel_hash(i_key, remote);
102 return &itn->tunnels[h];
103}
104
e23775f2
PS
105static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
106{
8e53509c
WT
107 struct hlist_head *head = ip_bucket(itn, &t->parms);
108
e23775f2
PS
109 if (t->collect_md)
110 rcu_assign_pointer(itn->collect_md_tun, t);
8e53509c 111 hlist_add_head_rcu(&t->hash_node, head);
e23775f2
PS
112}
113
114static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
115{
116 if (t->collect_md)
117 rcu_assign_pointer(itn->collect_md_tun, NULL);
8e53509c
WT
118 hlist_del_init_rcu(&t->hash_node);
119}
120
121static struct net_device *__ip_tunnel_create(struct net *net,
122 const struct rtnl_link_ops *ops,
123 struct ip_tunnel_parm *parms)
124{
125 int err;
126 struct ip_tunnel *tunnel;
127 struct net_device *dev;
128 char name[IFNAMSIZ];
129
130 if (parms->name[0])
131 strlcpy(name, parms->name, IFNAMSIZ);
132 else {
133 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
134 err = -E2BIG;
135 goto failed;
136 }
137 strlcpy(name, ops->kind, IFNAMSIZ);
138 strncat(name, "%d", 2);
139 }
140
141 ASSERT_RTNL();
142 dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
143 if (!dev) {
144 err = -ENOMEM;
145 goto failed;
146 }
147 dev_net_set(dev, net);
148
149 dev->rtnl_link_ops = ops;
150
151 tunnel = netdev_priv(dev);
152 tunnel->parms = *parms;
153 tunnel->net = net;
154
155 err = register_netdevice(dev);
156 if (err)
157 goto failed_free;
158
159 return dev;
160
161failed_free:
162 free_netdev(dev);
163failed:
164 return ERR_PTR(err);
e23775f2
PS
165}
166
167static inline void init_tunnel_flow(struct flowi4 *fl4,
168 int proto,
169 __be32 daddr, __be32 saddr,
170 __be32 key, __u8 tos, int oif)
171{
172 memset(fl4, 0, sizeof(*fl4));
173 fl4->flowi4_oif = oif;
174 fl4->daddr = daddr;
175 fl4->saddr = saddr;
176 fl4->flowi4_tos = tos;
177 fl4->flowi4_proto = proto;
178 fl4->fl4_gre_key = key;
179}
180
181static int ip_tunnel_bind_dev(struct net_device *dev)
182{
183 struct net_device *tdev = NULL;
184 struct ip_tunnel *tunnel = netdev_priv(dev);
185 const struct iphdr *iph;
186 int hlen = LL_MAX_HEADER;
187 int mtu = ETH_DATA_LEN;
188 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
189
190 iph = &tunnel->parms.iph;
191
192 /* Guess output device to choose reasonable mtu and needed_headroom */
193 if (iph->daddr) {
194 struct flowi4 fl4;
195 struct rtable *rt;
196
197 init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
198 iph->saddr, tunnel->parms.o_key,
199 RT_TOS(iph->tos), tunnel->parms.link);
200 rt = ip_route_output_key(tunnel->net, &fl4);
201
202 if (!IS_ERR(rt)) {
8063e095 203 tdev = rt->dst.dev;
e23775f2
PS
204 ip_rt_put(rt);
205 }
206 if (dev->type != ARPHRD_ETHER)
207 dev->flags |= IFF_POINTOPOINT;
8e53509c
WT
208
209 dst_cache_reset(&tunnel->dst_cache);
e23775f2
PS
210 }
211
212 if (!tdev && tunnel->parms.link)
213 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
214
215 if (tdev) {
216 hlen = tdev->hard_header_len + tdev->needed_headroom;
217 mtu = tdev->mtu;
218 }
219
220 dev->needed_headroom = t_hlen + hlen;
221 mtu -= (dev->hard_header_len + t_hlen);
222
223 if (mtu < 68)
224 mtu = 68;
225
226 return mtu;
227}
228
06f1a61a 229int rpl___ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
e23775f2
PS
230{
231 struct ip_tunnel *tunnel = netdev_priv(dev);
232 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
06f1a61a 233 int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
e23775f2 234
06f1a61a 235 if (new_mtu < 68)
e23775f2 236 return -EINVAL;
06f1a61a
DW
237
238 if (new_mtu > max_mtu) {
239 if (strict)
240 return -EINVAL;
241
242 new_mtu = max_mtu;
243 }
244
e23775f2
PS
245 dev->mtu = new_mtu;
246 return 0;
247}
248
06f1a61a
DW
249int rpl_ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
250{
251 return rpl___ip_tunnel_change_mtu(dev, new_mtu, true);
252}
253
8e53509c
WT
254static int rpl_tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
255 struct rtable *rt, __be16 df,
256 const struct iphdr *inner_iph)
257{
258 struct ip_tunnel *tunnel = netdev_priv(dev);
259 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
260 int mtu;
261
262 if (df)
263 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
264 - sizeof(struct iphdr) - tunnel->hlen;
265 else
266 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
267
268 if (skb_dst(skb))
269 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
270
271 if (skb->protocol == htons(ETH_P_IP)) {
272 if (!skb_is_gso(skb) &&
273 (inner_iph->frag_off & htons(IP_DF)) &&
274 mtu < pkt_size) {
275 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
276 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
277 return -E2BIG;
278 }
279 }
280#if IS_ENABLED(CONFIG_IPV6)
281 else if (skb->protocol == htons(ETH_P_IPV6)) {
282 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
283
284 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
285 mtu >= IPV6_MIN_MTU) {
286 if ((tunnel->parms.iph.daddr &&
287 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
288 rt6->rt6i_dst.plen == 128) {
289 rt6->rt6i_flags |= RTF_MODIFIED;
290 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
291 }
292 }
293
294 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
295 mtu < pkt_size) {
296 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
297 return -E2BIG;
298 }
299 }
300#endif
301 return 0;
302}
303
304void rpl_ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
305 const struct iphdr *tnl_params, const u8 protocol)
306{
307 struct ip_tunnel *tunnel = netdev_priv(dev);
308 const struct iphdr *inner_iph;
309 struct flowi4 fl4;
310 u8 tos, ttl;
311 __be16 df;
312 struct rtable *rt; /* Route to the other host */
313 unsigned int max_headroom; /* The extra header space needed */
314 __be32 dst;
315 bool connected;
316
317 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
318 connected = (tunnel->parms.iph.daddr != 0);
319
320 dst = tnl_params->daddr;
321 if (dst == 0) {
322 /* NBMA tunnel */
323
324 if (skb_dst(skb) == NULL) {
325 dev->stats.tx_fifo_errors++;
326 goto tx_error;
327 }
328
329 if (skb->protocol == htons(ETH_P_IP)) {
330 rt = skb_rtable(skb);
331 dst = rt_nexthop(rt, inner_iph->daddr);
332 }
333#if IS_ENABLED(CONFIG_IPV6)
334 else if (skb->protocol == htons(ETH_P_IPV6)) {
335 const struct in6_addr *addr6;
336 struct neighbour *neigh;
337 bool do_tx_error_icmp;
338 int addr_type;
339
340 neigh = dst_neigh_lookup(skb_dst(skb),
341 &ipv6_hdr(skb)->daddr);
342 if (neigh == NULL)
343 goto tx_error;
344
345 addr6 = (const struct in6_addr *)&neigh->primary_key;
346 addr_type = ipv6_addr_type(addr6);
347
348 if (addr_type == IPV6_ADDR_ANY) {
349 addr6 = &ipv6_hdr(skb)->daddr;
350 addr_type = ipv6_addr_type(addr6);
351 }
352
353 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
354 do_tx_error_icmp = true;
355 else {
356 do_tx_error_icmp = false;
357 dst = addr6->s6_addr32[3];
358 }
359 neigh_release(neigh);
360 if (do_tx_error_icmp)
361 goto tx_error_icmp;
362 }
363#endif
364 else
365 goto tx_error;
366
367 connected = false;
368 }
369
370 tos = tnl_params->tos;
371 if (tos & 0x1) {
372 tos &= ~0x1;
373 if (skb->protocol == htons(ETH_P_IP)) {
374 tos = inner_iph->tos;
375 connected = false;
376 } else if (skb->protocol == htons(ETH_P_IPV6)) {
377 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
378 connected = false;
379 }
380 }
381
382 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
383 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
384
385 if (ovs_ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
386 goto tx_error;
387
388 rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr) :
389 NULL;
390
391 if (!rt) {
392 rt = ip_route_output_key(tunnel->net, &fl4);
393
394 if (IS_ERR(rt)) {
395 dev->stats.tx_carrier_errors++;
396 goto tx_error;
397 }
398 if (connected)
399 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
400 fl4.saddr);
401 }
402
403 if (rt->dst.dev == dev) {
404 ip_rt_put(rt);
405 dev->stats.collisions++;
406 goto tx_error;
407 }
408
409 if (rpl_tnl_update_pmtu(dev, skb, rt,
410 tnl_params->frag_off, inner_iph)) {
411 ip_rt_put(rt);
412 goto tx_error;
413 }
414
415 if (tunnel->err_count > 0) {
416 if (time_before(jiffies,
417 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
418 tunnel->err_count--;
419
420 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
421 dst_link_failure(skb);
422 } else
423 tunnel->err_count = 0;
424 }
425
426 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
427 ttl = tnl_params->ttl;
428 if (ttl == 0) {
429 if (skb->protocol == htons(ETH_P_IP))
430 ttl = inner_iph->ttl;
431#if IS_ENABLED(CONFIG_IPV6)
432 else if (skb->protocol == htons(ETH_P_IPV6))
433 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
434#endif
435 else
436 ttl = ip4_dst_hoplimit(&rt->dst);
437 }
438
439 df = tnl_params->frag_off;
440 if (skb->protocol == htons(ETH_P_IP))
441 df |= (inner_iph->frag_off&htons(IP_DF));
442
443 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
444 + rt->dst.header_len;
445 if (max_headroom > dev->needed_headroom)
446 dev->needed_headroom = max_headroom;
447
448 if (skb_cow_head(skb, dev->needed_headroom)) {
449 ip_rt_put(rt);
450 dev->stats.tx_dropped++;
451 kfree_skb(skb);
452 return;
453 }
454
455 iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol,
456 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
457
458 return;
459
460#if IS_ENABLED(CONFIG_IPV6)
461tx_error_icmp:
462 dst_link_failure(skb);
463#endif
464tx_error:
465 dev->stats.tx_errors++;
466 kfree_skb(skb);
467}
468EXPORT_SYMBOL_GPL(rpl_ip_tunnel_xmit);
469
e23775f2
PS
470static void ip_tunnel_dev_free(struct net_device *dev)
471{
e23775f2 472 free_percpu(dev->tstats);
436d36db 473#ifndef HAVE_NEEDS_FREE_NETDEV
e23775f2 474 free_netdev(dev);
436d36db 475#endif
e23775f2
PS
476}
477
e23775f2 478void rpl_ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
e23775f2
PS
479{
480 struct ip_tunnel *tunnel = netdev_priv(dev);
481 struct ip_tunnel_net *itn;
482
483 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
484
485 ip_tunnel_del(itn, netdev_priv(dev));
d97d7f77 486 unregister_netdevice_queue(dev, head);
e23775f2
PS
487}
488
489int rpl_ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
490 struct rtnl_link_ops *ops, char *devname)
491{
492 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
8e53509c
WT
493 struct ip_tunnel_parm parms;
494 unsigned int i;
e23775f2 495
8e53509c
WT
496 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
497 INIT_HLIST_HEAD(&itn->tunnels[i]);
498
499 if (!ops) {
500 itn->fb_tunnel_dev = NULL;
501 return 0;
502 }
503
504 memset(&parms, 0, sizeof(parms));
505 if (devname)
506 strlcpy(parms.name, devname, IFNAMSIZ);
507
508 rtnl_lock();
509 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
510 /* FB netdevice is special: we have one, and only one per netns.
511 * * Allowing to move it to another netns is clearly unsafe.
512 * */
513 if (!IS_ERR(itn->fb_tunnel_dev)) {
514 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
515 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
516 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
517 }
518 rtnl_unlock();
519
520 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
e23775f2
PS
521}
522
523static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
524 struct rtnl_link_ops *ops)
525{
8e53509c
WT
526 struct net *net = dev_net(itn->fb_tunnel_dev);
527 struct net_device *dev, *aux;
528 int h;
529
530 for_each_netdev_safe(net, dev, aux)
531 if (dev->rtnl_link_ops == ops)
532 unregister_netdevice_queue(dev, head);
533
534 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
535 struct ip_tunnel *t;
536 struct hlist_node *n;
537 struct hlist_head *thead = &itn->tunnels[h];
538
539 hlist_for_each_entry_safe(t, n, thead, hash_node)
540 /* If dev is in the same netns, it has already
541 * been added to the list by the previous loop.
542 */
543 if (!net_eq(dev_net(t->dev), net))
544 unregister_netdevice_queue(t->dev, head);
545 }
e23775f2
PS
546}
547
8e53509c
WT
548void rpl_ip_tunnel_delete_net(struct ip_tunnel_net *itn,
549 struct rtnl_link_ops *ops)
e23775f2
PS
550{
551 LIST_HEAD(list);
552
553 rtnl_lock();
554 ip_tunnel_destroy(itn, &list, ops);
555 unregister_netdevice_many(&list);
556 rtnl_unlock();
557}
558
559int rpl_ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
560 struct ip_tunnel_parm *p)
561{
562 struct ip_tunnel *nt;
563 struct net *net = dev_net(dev);
564 struct ip_tunnel_net *itn;
565 int mtu;
566 int err;
567
568 nt = netdev_priv(dev);
569 itn = net_generic(net, nt->ip_tnl_net_id);
570
571 if (nt->collect_md) {
572 if (rtnl_dereference(itn->collect_md_tun))
573 return -EEXIST;
574 } else {
575 return -EOPNOTSUPP;
576 }
577
578 nt->net = net;
579 nt->parms = *p;
580 err = register_netdevice(dev);
581 if (err)
582 goto out;
583
584 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
585 eth_hw_addr_random(dev);
586
587 mtu = ip_tunnel_bind_dev(dev);
588 if (!tb[IFLA_MTU])
589 dev->mtu = mtu;
590
591 ip_tunnel_add(itn, nt);
592out:
593 return err;
594}
595
596int rpl_ip_tunnel_init(struct net_device *dev)
597{
598 struct ip_tunnel *tunnel = netdev_priv(dev);
599 struct iphdr *iph = &tunnel->parms.iph;
8e53509c 600 int err;
e23775f2 601
8e53509c
WT
602#ifndef HAVE_NEEDS_FREE_NETDEV
603 dev->destructor = ip_tunnel_dev_free;
604#else
605 dev->needs_free_netdev = true;
606 dev->priv_destructor = ip_tunnel_dev_free;
607#endif
608 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
e23775f2
PS
609 if (!dev->tstats)
610 return -ENOMEM;
8e53509c
WT
611
612 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
613 if (err) {
614 free_percpu(dev->tstats);
615 return err;
616 }
617
618 err = gro_cells_init(&tunnel->gro_cells, dev);
619 if (err) {
620 dst_cache_destroy(&tunnel->dst_cache);
621 free_percpu(dev->tstats);
622 return err;
623 }
624
e23775f2
PS
625 tunnel->dev = dev;
626 tunnel->net = dev_net(dev);
627 strcpy(tunnel->parms.name, dev->name);
628 iph->version = 4;
629 iph->ihl = 5;
630
8e53509c 631 if (tunnel->collect_md) {
e23775f2 632 dev->features |= NETIF_F_NETNS_LOCAL;
8e53509c
WT
633 netif_keep_dst(dev);
634 }
e23775f2
PS
635 return 0;
636}
637
638void rpl_ip_tunnel_uninit(struct net_device *dev)
639{
640 struct ip_tunnel *tunnel = netdev_priv(dev);
641 struct net *net = tunnel->net;
642 struct ip_tunnel_net *itn;
643
644 itn = net_generic(net, tunnel->ip_tnl_net_id);
645 ip_tunnel_del(itn, netdev_priv(dev));
646}
647
648/* Do least required initialization, rest of init is done in tunnel_init call */
649void rpl_ip_tunnel_setup(struct net_device *dev, int net_id)
650{
651 struct ip_tunnel *tunnel = netdev_priv(dev);
652
653 tunnel->ip_tnl_net_id = net_id;
654}
00d662ba
PS
655
656int rpl_ip_tunnel_get_iflink(const struct net_device *dev)
657{
658 struct ip_tunnel *tunnel = netdev_priv(dev);
659
660 return tunnel->parms.link;
661}
662
03469419
PS
663struct net *rpl_ip_tunnel_get_link_net(const struct net_device *dev)
664{
665 struct ip_tunnel *tunnel = netdev_priv(dev);
666
667 return tunnel->net;
668}
669
8e53509c
WT
670struct ip_tunnel *rpl_ip_tunnel_lookup(struct ip_tunnel_net *itn,
671 int link, __be16 flags,
672 __be32 remote, __be32 local,
673 __be32 key)
674{
675 unsigned int hash;
676 struct ip_tunnel *t, *cand = NULL;
677 struct hlist_head *head;
678
679 hash = rpl_ip_tunnel_hash(key, remote);
680 head = &itn->tunnels[hash];
681
682 hlist_for_each_entry_rcu(t, head, hash_node) {
683 if (local != t->parms.iph.saddr ||
684 remote != t->parms.iph.daddr ||
685 !(t->dev->flags & IFF_UP))
686 continue;
687
688 if (!rpl_ip_tunnel_key_match(&t->parms, flags, key))
689 continue;
690
691 if (t->parms.link == link)
692 return t;
693 else
694 cand = t;
695 }
696
697 hlist_for_each_entry_rcu(t, head, hash_node) {
698 if (remote != t->parms.iph.daddr ||
699 t->parms.iph.saddr != 0 ||
700 !(t->dev->flags & IFF_UP))
701 continue;
702
703 if (!rpl_ip_tunnel_key_match(&t->parms, flags, key))
704 continue;
705
706 if (t->parms.link == link)
707 return t;
708 else if (!cand)
709 cand = t;
710 }
711
712 hash = rpl_ip_tunnel_hash(key, 0);
713 head = &itn->tunnels[hash];
714
715 hlist_for_each_entry_rcu(t, head, hash_node) {
716 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
717 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
718 continue;
719
720 if (!(t->dev->flags & IFF_UP))
721 continue;
722
723 if (!rpl_ip_tunnel_key_match(&t->parms, flags, key))
724 continue;
725
726 if (t->parms.link == link)
727 return t;
728 else if (!cand)
729 cand = t;
730 }
731
732 if (flags & TUNNEL_NO_KEY)
733 goto skip_key_lookup;
734
735 hlist_for_each_entry_rcu(t, head, hash_node) {
736 if (t->parms.i_key != key ||
737 t->parms.iph.saddr != 0 ||
738 t->parms.iph.daddr != 0 ||
739 !(t->dev->flags & IFF_UP))
740 continue;
741
742 if (t->parms.link == link)
743 return t;
744 else if (!cand)
745 cand = t;
746 }
747
748skip_key_lookup:
749 if (cand)
750 return cand;
751
752 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
753 return netdev_priv(itn->fb_tunnel_dev);
754
755
756 return NULL;
757}
758EXPORT_SYMBOL_GPL(rpl_ip_tunnel_lookup);
759
e23775f2 760#endif