2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
21 #include <linux/netdev_features.h>
24 #include <linux/udp.h>
25 #include <linux/igmp.h>
26 #include <linux/etherdevice.h>
27 #include <linux/if_ether.h>
28 #include <linux/if_vlan.h>
29 #include <linux/hash.h>
30 #include <linux/ethtool.h>
32 #include <net/dst_metadata.h>
33 #include <net/ndisc.h>
35 #include <net/ip_tunnels.h>
38 #include <net/udp_tunnel.h>
39 #include <net/rtnetlink.h>
40 #include <net/route.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
43 #include <net/net_namespace.h>
44 #include <net/netns/generic.h>
45 #include <net/protocol.h>
47 #if IS_ENABLED(CONFIG_IPV6)
49 #include <net/addrconf.h>
50 #include <net/ip6_tunnel.h>
51 #include <net/ip6_checksum.h>
52 #include <net/ip6_route.h>
55 #include <net/tun_proto.h>
56 #include <net/vxlan.h>
58 #include "vport-netdev.h"
61 #ifndef USE_UPSTREAM_TUNNEL
62 #define VXLAN_VERSION "0.1"
64 #define PORT_HASH_BITS 8
65 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
66 #define FDB_AGE_DEFAULT 300 /* 5 min */
67 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
69 /* UDP port for VXLAN traffic.
70 * The IANA assigned port is 4789, but the Linux default is 8472
71 * for compatibility with early adopters.
73 static unsigned short vxlan_port __read_mostly
= 8472;
74 module_param_named(udp_port
, vxlan_port
, ushort
, 0444);
75 MODULE_PARM_DESC(udp_port
, "Destination UDP port");
77 static int vxlan_net_id
;
78 static struct rtnl_link_ops vxlan_link_ops
;
80 static const u8 all_zeros_mac
[ETH_ALEN
+ 2];
82 static int vxlan_sock_add(struct vxlan_dev
*vxlan
);
84 /* per-network namespace private data for this module */
86 struct list_head vxlan_list
;
87 struct hlist_head sock_list
[PORT_HASH_SIZE
];
91 /* Forwarding table entry */
93 struct hlist_node hlist
; /* linked list of entries */
95 unsigned long updated
; /* jiffies */
97 struct list_head remotes
;
98 u8 eth_addr
[ETH_ALEN
];
99 u16 state
; /* see ndm_state */
100 u8 flags
; /* see ndm_flags */
103 /* salt for hash table */
104 static u32 vxlan_salt __read_mostly
;
106 static inline bool vxlan_collect_metadata(struct vxlan_sock
*vs
)
108 return vs
->flags
& VXLAN_F_COLLECT_METADATA
||
109 ip_tunnel_collect_metadata();
112 #if IS_ENABLED(CONFIG_IPV6)
114 bool vxlan_addr_equal(const union vxlan_addr
*a
, const union vxlan_addr
*b
)
116 if (a
->sa
.sa_family
!= b
->sa
.sa_family
)
118 if (a
->sa
.sa_family
== AF_INET6
)
119 return ipv6_addr_equal(&a
->sin6
.sin6_addr
, &b
->sin6
.sin6_addr
);
121 return a
->sin
.sin_addr
.s_addr
== b
->sin
.sin_addr
.s_addr
;
124 static inline bool vxlan_addr_any(const union vxlan_addr
*ipa
)
126 if (ipa
->sa
.sa_family
== AF_INET6
)
127 return ipv6_addr_any(&ipa
->sin6
.sin6_addr
);
129 return ipa
->sin
.sin_addr
.s_addr
== htonl(INADDR_ANY
);
132 static inline bool vxlan_addr_multicast(const union vxlan_addr
*ipa
)
134 if (ipa
->sa
.sa_family
== AF_INET6
)
135 return ipv6_addr_is_multicast(&ipa
->sin6
.sin6_addr
);
137 return IN_MULTICAST(ntohl(ipa
->sin
.sin_addr
.s_addr
));
140 #else /* !CONFIG_IPV6 */
143 bool vxlan_addr_equal(const union vxlan_addr
*a
, const union vxlan_addr
*b
)
145 return a
->sin
.sin_addr
.s_addr
== b
->sin
.sin_addr
.s_addr
;
148 static inline bool vxlan_addr_any(const union vxlan_addr
*ipa
)
150 return ipa
->sin
.sin_addr
.s_addr
== htonl(INADDR_ANY
);
153 static inline bool vxlan_addr_multicast(const union vxlan_addr
*ipa
)
155 return IN_MULTICAST(ntohl(ipa
->sin
.sin_addr
.s_addr
));
159 /* Virtual Network hash table head */
160 static inline struct hlist_head
*vni_head(struct vxlan_sock
*vs
, __be32 vni
)
162 return &vs
->vni_list
[hash_32((__force u32
)vni
, VNI_HASH_BITS
)];
165 /* Socket hash table head */
166 static inline struct hlist_head
*vs_head(struct net
*net
, __be16 port
)
168 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
170 return &vn
->sock_list
[hash_32(ntohs(port
), PORT_HASH_BITS
)];
173 /* Find VXLAN socket based on network namespace, address family and UDP port
174 * and enabled unshareable flags.
176 static struct vxlan_sock
*vxlan_find_sock(struct net
*net
, sa_family_t family
,
177 __be16 port
, u32 flags
)
179 struct vxlan_sock
*vs
;
181 flags
&= VXLAN_F_RCV_FLAGS
;
183 hlist_for_each_entry_rcu(vs
, vs_head(net
, port
), hlist
) {
184 if (inet_sk(vs
->sock
->sk
)->inet_sport
== port
&&
185 vxlan_get_sk_family(vs
) == family
&&
192 static struct vxlan_dev
*vxlan_vs_find_vni(struct vxlan_sock
*vs
, __be32 vni
)
194 struct vxlan_dev
*vxlan
;
196 /* For flow based devices, map all packets to VNI 0 */
197 if (vs
->flags
& VXLAN_F_COLLECT_METADATA
)
200 hlist_for_each_entry_rcu(vxlan
, vni_head(vs
, vni
), hlist
) {
201 if (vxlan
->default_dst
.remote_vni
== vni
)
208 /* Look up VNI in a per net namespace table */
209 static struct vxlan_dev
*vxlan_find_vni(struct net
*net
, __be32 vni
,
210 sa_family_t family
, __be16 port
,
213 struct vxlan_sock
*vs
;
215 vs
= vxlan_find_sock(net
, family
, port
, flags
);
219 return vxlan_vs_find_vni(vs
, vni
);
222 static int vxlan_fdb_create(struct vxlan_dev
*vxlan
,
223 const u8
*mac
, union vxlan_addr
*ip
,
224 __u16 state
, __u16 flags
,
225 __be16 port
, __be32 vni
, __u32 ifindex
,
231 static void vxlan_fdb_destroy(struct vxlan_dev
*vxlan
, struct vxlan_fdb
*f
)
236 static inline size_t vxlan_nlmsg_size(void)
238 return NLMSG_ALIGN(sizeof(struct ndmsg
))
239 + nla_total_size(ETH_ALEN
) /* NDA_LLADDR */
240 + nla_total_size(sizeof(struct in6_addr
)) /* NDA_DST */
241 + nla_total_size(sizeof(__be16
)) /* NDA_PORT */
242 + nla_total_size(sizeof(__be32
)) /* NDA_VNI */
243 + nla_total_size(sizeof(__u32
)) /* NDA_IFINDEX */
244 + nla_total_size(sizeof(__s32
)) /* NDA_LINK_NETNSID */
245 + nla_total_size(sizeof(struct nda_cacheinfo
));
248 #ifdef HAVE_UDP_OFFLOAD
249 #ifdef HAVE_NETIF_F_GSO_TUNNEL_REMCSUM
251 static struct vxlanhdr
*vxlan_gro_remcsum(struct sk_buff
*skb
,
253 struct vxlanhdr
*vh
, size_t hdrlen
,
255 struct gro_remcsum
*grc
,
258 size_t start
, offset
;
260 if (skb
->remcsum_offload
)
263 if (!NAPI_GRO_CB(skb
)->csum_valid
)
266 start
= vxlan_rco_start(vni_field
);
267 offset
= start
+ vxlan_rco_offset(vni_field
);
269 vh
= skb_gro_remcsum_process(skb
, (void *)vh
, off
, hdrlen
,
270 start
, offset
, grc
, nopartial
);
272 skb
->remcsum_offload
= 1;
277 static struct vxlanhdr
*vxlan_gro_remcsum(struct sk_buff
*skb
,
279 struct vxlanhdr
*vh
, size_t hdrlen
,
280 u32 data
, struct gro_remcsum
*grc
,
287 #ifndef HAVE_UDP_OFFLOAD_ARG_UOFF
288 static struct sk_buff
**vxlan_gro_receive(struct sk_buff
**head
,
291 static struct sk_buff
**vxlan_gro_receive(struct sk_buff
**head
,
293 struct udp_offload
*uoff
)
296 #ifdef HAVE_UDP_OFFLOAD_ARG_UOFF
297 struct vxlan_sock
*vs
= container_of(uoff
, struct vxlan_sock
,
300 struct vxlan_sock
*vs
= NULL
;
302 struct sk_buff
*p
, **pp
= NULL
;
303 struct vxlanhdr
*vh
, *vh2
;
304 unsigned int hlen
, off_vx
;
307 struct gro_remcsum grc
;
309 skb_gro_remcsum_init(&grc
);
311 off_vx
= skb_gro_offset(skb
);
312 hlen
= off_vx
+ sizeof(*vh
);
313 vh
= skb_gro_header_fast(skb
, off_vx
);
314 if (skb_gro_header_hard(skb
, hlen
)) {
315 vh
= skb_gro_header_slow(skb
, hlen
, off_vx
);
320 skb_gro_postpull_rcsum(skb
, vh
, sizeof(struct vxlanhdr
));
322 flags
= vh
->vx_flags
;
324 if ((flags
& VXLAN_HF_RCO
) && vs
&& (vs
->flags
& VXLAN_F_REMCSUM_RX
)) {
325 vh
= vxlan_gro_remcsum(skb
, off_vx
, vh
, sizeof(struct vxlanhdr
),
328 VXLAN_F_REMCSUM_NOPARTIAL
));
334 skb_gro_pull(skb
, sizeof(struct vxlanhdr
)); /* pull vxlan header */
336 for (p
= *head
; p
; p
= p
->next
) {
337 if (!NAPI_GRO_CB(p
)->same_flow
)
340 vh2
= (struct vxlanhdr
*)(p
->data
+ off_vx
);
341 if (vh
->vx_flags
!= vh2
->vx_flags
||
342 vh
->vx_vni
!= vh2
->vx_vni
) {
343 NAPI_GRO_CB(p
)->same_flow
= 0;
348 pp
= eth_gro_receive(head
, skb
);
352 skb_gro_remcsum_cleanup(skb
, &grc
);
353 NAPI_GRO_CB(skb
)->flush
|= flush
;
358 #ifndef HAVE_UDP_OFFLOAD_ARG_UOFF
359 static int vxlan_gro_complete(struct sk_buff
*skb
, int nhoff
)
361 static int vxlan_gro_complete(struct sk_buff
*skb
, int nhoff
,
362 struct udp_offload
*uoff
)
365 /* Sets 'skb->inner_mac_header' since we are always called with
366 * 'skb->encapsulation' set.
368 udp_tunnel_gro_complete(skb
, nhoff
);
370 return eth_gro_complete(skb
, nhoff
+ sizeof(struct vxlanhdr
));
374 /* Notify netdevs that UDP port started listening */
375 static void vxlan_notify_add_rx_port(struct vxlan_sock
*vs
)
377 struct net_device
*dev
;
378 struct sock
*sk
= vs
->sock
->sk
;
379 struct net
*net
= sock_net(sk
);
380 sa_family_t sa_family
= vxlan_get_sk_family(vs
);
383 if (sa_family
== AF_INET
) {
386 err
= udp_add_offload(net
, &vs
->udp_offloads
);
388 pr_warn("vxlan: udp_add_offload failed with status %d\n", err
);
392 for_each_netdev_rcu(net
, dev
) {
393 #ifdef HAVE_NDO_ADD_VXLAN_PORT
394 __be16 port
= inet_sk(sk
)->inet_sport
;
396 if (dev
->netdev_ops
->ndo_add_vxlan_port
)
397 dev
->netdev_ops
->ndo_add_vxlan_port(dev
, sa_family
,
399 #elif defined(HAVE_NDO_UDP_TUNNEL_ADD)
400 struct udp_tunnel_info ti
;
401 if (vs
->flags
& VXLAN_F_GPE
)
402 ti
.type
= UDP_TUNNEL_TYPE_VXLAN_GPE
;
404 ti
.type
= UDP_TUNNEL_TYPE_VXLAN
;
405 ti
.sa_family
= sa_family
;
406 ti
.port
= inet_sk(sk
)->inet_sport
;
408 if (dev
->netdev_ops
->ndo_udp_tunnel_add
)
409 dev
->netdev_ops
->ndo_udp_tunnel_add(dev
, &ti
);
415 /* Notify netdevs that UDP port is no more listening */
416 static void vxlan_notify_del_rx_port(struct vxlan_sock
*vs
)
418 struct net_device
*dev
;
419 struct sock
*sk
= vs
->sock
->sk
;
420 struct net
*net
= sock_net(sk
);
421 sa_family_t sa_family
= vxlan_get_sk_family(vs
);
424 for_each_netdev_rcu(net
, dev
) {
425 #ifdef HAVE_NDO_ADD_VXLAN_PORT
426 __be16 port
= inet_sk(sk
)->inet_sport
;
428 if (dev
->netdev_ops
->ndo_del_vxlan_port
)
429 dev
->netdev_ops
->ndo_del_vxlan_port(dev
, sa_family
,
431 #elif defined(HAVE_NDO_UDP_TUNNEL_ADD)
432 struct udp_tunnel_info ti
;
433 if (vs
->flags
& VXLAN_F_GPE
)
434 ti
.type
= UDP_TUNNEL_TYPE_VXLAN_GPE
;
436 ti
.type
= UDP_TUNNEL_TYPE_VXLAN
;
437 ti
.port
= inet_sk(sk
)->inet_sport
;
438 ti
.sa_family
= sa_family
;
440 if (dev
->netdev_ops
->ndo_udp_tunnel_del
)
441 dev
->netdev_ops
->ndo_udp_tunnel_del(dev
, &ti
);
446 if (sa_family
== AF_INET
) {
447 udp_del_offload(&vs
->udp_offloads
);
451 /* See if multicast group is already in use by other ID */
452 static bool vxlan_group_used(struct vxlan_net
*vn
, struct vxlan_dev
*dev
)
454 struct vxlan_dev
*vxlan
;
455 struct vxlan_sock
*sock4
;
456 struct vxlan_sock
*sock6
= NULL
;
457 unsigned short family
= dev
->default_dst
.remote_ip
.sa
.sa_family
;
459 sock4
= rtnl_dereference(dev
->vn4_sock
);
461 /* The vxlan_sock is only used by dev, leaving group has
462 * no effect on other vxlan devices.
464 if (family
== AF_INET
&& sock4
&& atomic_read(&sock4
->refcnt
) == 1)
466 #if IS_ENABLED(CONFIG_IPV6)
467 sock6
= rtnl_dereference(dev
->vn6_sock
);
468 if (family
== AF_INET6
&& sock6
&& atomic_read(&sock6
->refcnt
) == 1)
472 list_for_each_entry(vxlan
, &vn
->vxlan_list
, next
) {
473 if (!netif_running(vxlan
->dev
) || vxlan
== dev
)
476 if (family
== AF_INET
&&
477 rtnl_dereference(vxlan
->vn4_sock
) != sock4
)
479 #if IS_ENABLED(CONFIG_IPV6)
480 if (family
== AF_INET6
&&
481 rtnl_dereference(vxlan
->vn6_sock
) != sock6
)
485 if (!vxlan_addr_equal(&vxlan
->default_dst
.remote_ip
,
486 &dev
->default_dst
.remote_ip
))
489 if (vxlan
->default_dst
.remote_ifindex
!=
490 dev
->default_dst
.remote_ifindex
)
499 static bool __vxlan_sock_release_prep(struct vxlan_sock
*vs
)
501 struct vxlan_net
*vn
;
505 if (!atomic_dec_and_test(&vs
->refcnt
))
508 vn
= net_generic(sock_net(vs
->sock
->sk
), vxlan_net_id
);
509 spin_lock(&vn
->sock_lock
);
510 hlist_del_rcu(&vs
->hlist
);
511 vxlan_notify_del_rx_port(vs
);
512 spin_unlock(&vn
->sock_lock
);
517 static void vxlan_sock_release(struct vxlan_dev
*vxlan
)
519 struct vxlan_sock
*sock4
= rtnl_dereference(vxlan
->vn4_sock
);
520 #if IS_ENABLED(CONFIG_IPV6)
521 struct vxlan_sock
*sock6
= rtnl_dereference(vxlan
->vn6_sock
);
523 rcu_assign_pointer(vxlan
->vn6_sock
, NULL
);
526 rcu_assign_pointer(vxlan
->vn4_sock
, NULL
);
529 if (__vxlan_sock_release_prep(sock4
)) {
530 udp_tunnel_sock_release(sock4
->sock
);
534 #if IS_ENABLED(CONFIG_IPV6)
535 if (__vxlan_sock_release_prep(sock6
)) {
536 udp_tunnel_sock_release(sock6
->sock
);
542 /* Update multicast group membership when first VNI on
543 * multicast address is brought up
545 static int vxlan_igmp_join(struct vxlan_dev
*vxlan
)
550 /* Inverse of vxlan_igmp_join when last VNI is brought down */
551 static int vxlan_igmp_leave(struct vxlan_dev
*vxlan
)
556 static bool vxlan_remcsum(struct vxlanhdr
*unparsed
,
557 struct sk_buff
*skb
, u32 vxflags
)
559 #ifndef USE_UPSTREAM_TUNNEL
562 size_t start
, offset
;
564 if (!(unparsed
->vx_flags
& VXLAN_HF_RCO
) || skb
->remcsum_offload
)
567 start
= vxlan_rco_start(unparsed
->vx_vni
);
568 offset
= start
+ vxlan_rco_offset(unparsed
->vx_vni
);
570 if (!pskb_may_pull(skb
, offset
+ sizeof(u16
)))
573 skb_remcsum_process(skb
, (void *)(vxlan_hdr(skb
) + 1), start
, offset
,
574 !!(vxflags
& VXLAN_F_REMCSUM_NOPARTIAL
));
576 unparsed
->vx_flags
&= ~VXLAN_HF_RCO
;
577 unparsed
->vx_vni
&= VXLAN_VNI_MASK
;
582 static void vxlan_parse_gbp_hdr(struct vxlanhdr
*unparsed
,
583 struct sk_buff
*skb
, u32 vxflags
,
584 struct vxlan_metadata
*md
)
586 struct vxlanhdr_gbp
*gbp
= (struct vxlanhdr_gbp
*)unparsed
;
587 struct metadata_dst
*tun_dst
;
589 if (!(unparsed
->vx_flags
& VXLAN_HF_GBP
))
592 md
->gbp
= ntohs(gbp
->policy_id
);
594 tun_dst
= (struct metadata_dst
*)skb_dst(skb
);
596 tun_dst
->u
.tun_info
.key
.tun_flags
|= TUNNEL_VXLAN_OPT
;
597 tun_dst
->u
.tun_info
.options_len
= sizeof(*md
);
600 md
->gbp
|= VXLAN_GBP_DONT_LEARN
;
602 if (gbp
->policy_applied
)
603 md
->gbp
|= VXLAN_GBP_POLICY_APPLIED
;
605 /* In flow-based mode, GBP is carried in dst_metadata */
606 if (!(vxflags
& VXLAN_F_COLLECT_METADATA
))
609 unparsed
->vx_flags
&= ~VXLAN_GBP_USED_BITS
;
612 static bool vxlan_parse_gpe_hdr(struct vxlanhdr
*unparsed
,
614 struct sk_buff
*skb
, u32 vxflags
)
616 struct vxlanhdr_gpe
*gpe
= (struct vxlanhdr_gpe
*)unparsed
;
618 /* Need to have Next Protocol set for interfaces in GPE mode. */
619 if (!gpe
->np_applied
)
621 /* "The initial version is 0. If a receiver does not support the
622 * version indicated it MUST drop the packet.
624 if (gpe
->version
!= 0)
626 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
627 * processing MUST occur." However, we don't implement OAM
628 * processing, thus drop the packet.
633 *protocol
= tun_p_to_eth_p(gpe
->next_protocol
);
637 unparsed
->vx_flags
&= ~VXLAN_GPE_USED_BITS
;
641 static bool vxlan_set_mac(struct vxlan_dev
*vxlan
,
642 struct vxlan_sock
*vs
,
648 static bool vxlan_ecn_decapsulate(struct vxlan_sock
*vs
, void *oiph
,
653 if (vxlan_get_sk_family(vs
) == AF_INET
)
654 err
= IP_ECN_decapsulate(oiph
, skb
);
655 #if IS_ENABLED(CONFIG_IPV6)
657 err
= IP6_ECN_decapsulate(oiph
, skb
);
662 /* Callback from net/ipv4/udp.c to receive packets */
663 static int vxlan_rcv(struct sock
*sk
, struct sk_buff
*skb
)
666 struct metadata_dst dst
;
667 char buf
[sizeof(struct metadata_dst
) + sizeof(struct vxlan_metadata
)];
670 struct pcpu_sw_netstats
*stats
;
671 struct vxlan_dev
*vxlan
;
672 struct vxlan_sock
*vs
;
673 struct vxlanhdr unparsed
;
674 struct vxlan_metadata _md
;
675 struct vxlan_metadata
*md
= &_md
;
676 __be16 protocol
= htons(ETH_P_TEB
);
677 bool raw_proto
= false;
680 /* Need UDP and VXLAN header to be present */
681 if (!pskb_may_pull(skb
, VXLAN_HLEN
))
684 unparsed
= *vxlan_hdr(skb
);
685 /* VNI flag always required to be set */
686 if (!(unparsed
.vx_flags
& VXLAN_HF_VNI
)) {
687 netdev_dbg(skb
->dev
, "invalid vxlan flags=%#x vni=%#x\n",
688 ntohl(vxlan_hdr(skb
)->vx_flags
),
689 ntohl(vxlan_hdr(skb
)->vx_vni
));
690 /* Return non vxlan pkt */
694 unparsed
.vx_flags
&= ~VXLAN_HF_VNI
;
695 unparsed
.vx_vni
&= ~VXLAN_VNI_MASK
;
697 vs
= rcu_dereference_sk_user_data(sk
);
701 #if IS_ENABLED(CONFIG_IPV6)
702 #ifdef OVS_CHECK_UDP_TUNNEL_ZERO_CSUM
703 if (vxlan_get_sk_family(vs
) == AF_INET6
&&
704 !udp_hdr(skb
)->check
&&
705 !(vs
->flags
& VXLAN_F_UDP_ZERO_CSUM6_RX
)) {
706 udp6_csum_zero_error(skb
);
711 vxlan
= vxlan_vs_find_vni(vs
, vxlan_vni(vxlan_hdr(skb
)->vx_vni
));
715 /* For backwards compatibility, only allow reserved fields to be
716 * used by VXLAN extensions if explicitly requested.
718 if (vs
->flags
& VXLAN_F_GPE
) {
719 if (!vxlan_parse_gpe_hdr(&unparsed
, &protocol
, skb
, vs
->flags
))
724 if (__iptunnel_pull_header(skb
, VXLAN_HLEN
, protocol
, raw_proto
,
725 !net_eq(vxlan
->net
, dev_net(vxlan
->dev
))))
728 if (vxlan_collect_metadata(vs
)) {
729 __be32 vni
= vxlan_vni(vxlan_hdr(skb
)->vx_vni
);
730 struct metadata_dst
*tun_dst
;
733 ovs_udp_tun_rx_dst(tun_dst
, skb
,
734 vxlan_get_sk_family(vs
), TUNNEL_KEY
,
735 vxlan_vni_to_tun_id(vni
), sizeof(*md
));
740 md
= ip_tunnel_info_opts(&tun_dst
->u
.tun_info
);
742 ovs_skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
744 memset(md
, 0, sizeof(*md
));
747 if (vs
->flags
& VXLAN_F_REMCSUM_RX
)
748 if (!vxlan_remcsum(&unparsed
, skb
, vs
->flags
))
751 if (vs
->flags
& VXLAN_F_GBP
)
752 vxlan_parse_gbp_hdr(&unparsed
, skb
, vs
->flags
, md
);
753 /* Note that GBP and GPE can never be active together. This is
754 * ensured in vxlan_dev_configure.
757 if (unparsed
.vx_flags
|| unparsed
.vx_vni
) {
758 /* If there are any unprocessed flags remaining treat
759 * this as a malformed packet. This behavior diverges from
760 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
761 * in reserved fields are to be ignored. The approach here
762 * maintains compatibility with previous stack code, and also
763 * is more robust and provides a little more security in
764 * adding extensions to VXLAN.
770 if (!vxlan_set_mac(vxlan
, vs
, skb
))
772 skb_reset_mac_header(skb
);
773 skb
->protocol
= eth_type_trans(skb
, vxlan
->dev
);
774 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
776 skb_reset_mac_header(skb
);
777 skb
->dev
= vxlan
->dev
;
778 skb
->pkt_type
= PACKET_HOST
;
781 oiph
= skb_network_header(skb
);
782 skb_reset_network_header(skb
);
784 if (!vxlan_ecn_decapsulate(vs
, oiph
, skb
)) {
785 ++vxlan
->dev
->stats
.rx_frame_errors
;
786 ++vxlan
->dev
->stats
.rx_errors
;
790 stats
= this_cpu_ptr(vxlan
->dev
->tstats
);
791 u64_stats_update_begin(&stats
->syncp
);
793 stats
->rx_bytes
+= skb
->len
;
794 u64_stats_update_end(&stats
->syncp
);
796 netdev_port_receive(skb
, skb_tunnel_info(skb
));
800 /* Consume bad packet */
805 static void vxlan_build_gbp_hdr(struct vxlanhdr
*vxh
, u32 vxflags
,
806 struct vxlan_metadata
*md
)
808 struct vxlanhdr_gbp
*gbp
;
813 gbp
= (struct vxlanhdr_gbp
*)vxh
;
814 vxh
->vx_flags
|= VXLAN_HF_GBP
;
816 if (md
->gbp
& VXLAN_GBP_DONT_LEARN
)
819 if (md
->gbp
& VXLAN_GBP_POLICY_APPLIED
)
820 gbp
->policy_applied
= 1;
822 gbp
->policy_id
= htons(md
->gbp
& VXLAN_GBP_ID_MASK
);
825 static int vxlan_build_gpe_hdr(struct vxlanhdr
*vxh
, u32 vxflags
,
828 struct vxlanhdr_gpe
*gpe
= (struct vxlanhdr_gpe
*)vxh
;
831 gpe
->next_protocol
= tun_p_from_eth_p(protocol
);
832 if (!gpe
->next_protocol
)
833 return -EPFNOSUPPORT
;
837 static int vxlan_build_skb(struct sk_buff
*skb
, struct dst_entry
*dst
,
838 int iphdr_len
, __be32 vni
,
839 struct vxlan_metadata
*md
, u32 vxflags
,
842 void (*fix_segment
)(struct sk_buff
*);
843 struct vxlanhdr
*vxh
;
846 int type
= udp_sum
? SKB_GSO_UDP_TUNNEL_CSUM
: SKB_GSO_UDP_TUNNEL
;
847 __be16 inner_protocol
= htons(ETH_P_TEB
);
849 if ((vxflags
& VXLAN_F_REMCSUM_TX
) &&
850 skb
->ip_summed
== CHECKSUM_PARTIAL
) {
851 int csum_start
= skb_checksum_start_offset(skb
);
853 if (csum_start
<= VXLAN_MAX_REMCSUM_START
&&
854 !(csum_start
& VXLAN_RCO_SHIFT_MASK
) &&
855 (skb
->csum_offset
== offsetof(struct udphdr
, check
) ||
856 skb
->csum_offset
== offsetof(struct tcphdr
, check
)))
857 type
|= SKB_GSO_TUNNEL_REMCSUM
;
860 min_headroom
= LL_RESERVED_SPACE(dst
->dev
) + dst
->header_len
861 + VXLAN_HLEN
+ iphdr_len
862 + (skb_vlan_tag_present(skb
) ? VLAN_HLEN
: 0);
864 /* Need space for new headers (invalidates iph ptr) */
865 err
= skb_cow_head(skb
, min_headroom
);
869 if (skb_vlan_tag_present(skb
))
870 skb
= __vlan_hwaccel_push_inside(skb
);
874 type
|= udp_sum
? SKB_GSO_UDP_TUNNEL_CSUM
: SKB_GSO_UDP_TUNNEL
;
875 #ifndef USE_UPSTREAM_TUNNEL_GSO
876 fix_segment
= !udp_sum
? ovs_udp_gso
: ovs_udp_csum_gso
;
880 err
= ovs_iptunnel_handle_offloads(skb
, type
, fix_segment
);
884 vxh
= (struct vxlanhdr
*) __skb_push(skb
, sizeof(*vxh
));
885 vxh
->vx_flags
= VXLAN_HF_VNI
;
886 vxh
->vx_vni
= vxlan_vni_field(vni
);
888 if (type
& SKB_GSO_TUNNEL_REMCSUM
) {
891 start
= skb_checksum_start_offset(skb
) - sizeof(struct vxlanhdr
);
892 vxh
->vx_vni
|= vxlan_compute_rco(start
, skb
->csum_offset
);
893 vxh
->vx_flags
|= VXLAN_HF_RCO
;
895 if (!skb_is_gso(skb
)) {
896 skb
->ip_summed
= CHECKSUM_NONE
;
897 skb
->encapsulation
= 0;
901 if (vxflags
& VXLAN_F_GBP
)
902 vxlan_build_gbp_hdr(vxh
, vxflags
, md
);
903 if (vxflags
& VXLAN_F_GPE
) {
904 err
= vxlan_build_gpe_hdr(vxh
, vxflags
, skb
->protocol
);
907 inner_protocol
= skb
->protocol
;
910 ovs_skb_set_inner_protocol(skb
, inner_protocol
);
918 static struct rtable
*vxlan_get_route(struct vxlan_dev
*vxlan
,
919 struct sk_buff
*skb
, int oif
, u8 tos
,
920 __be32 daddr
, __be32
*saddr
,
921 __be16 dport
, __be16 sport
,
922 struct dst_cache
*dst_cache
,
923 const struct ip_tunnel_info
*info
)
925 bool use_cache
= (dst_cache
&& ip_tunnel_dst_cache_usable(skb
, info
));
926 struct rtable
*rt
= NULL
;
932 rt
= dst_cache_get_ip4(dst_cache
, saddr
);
937 memset(&fl4
, 0, sizeof(fl4
));
938 fl4
.flowi4_oif
= oif
;
939 fl4
.flowi4_tos
= RT_TOS(tos
);
940 fl4
.flowi4_mark
= skb
->mark
;
941 fl4
.flowi4_proto
= IPPROTO_UDP
;
944 fl4
.fl4_dport
= dport
;
945 fl4
.fl4_sport
= sport
;
947 rt
= ip_route_output_key(vxlan
->net
, &fl4
);
951 dst_cache_set_ip4(dst_cache
, &rt
->dst
, fl4
.saddr
);
956 #if IS_ENABLED(CONFIG_IPV6)
957 static struct dst_entry
*vxlan6_get_route(struct vxlan_dev
*vxlan
,
958 struct sk_buff
*skb
, int oif
, u8 tos
,
960 const struct in6_addr
*daddr
,
961 struct in6_addr
*saddr
,
962 __be16 dport
, __be16 sport
,
963 struct dst_cache
*dst_cache
,
964 const struct ip_tunnel_info
*info
)
966 struct vxlan_sock
*sock6
= rcu_dereference(vxlan
->vn6_sock
);
967 bool use_cache
= (dst_cache
&& ip_tunnel_dst_cache_usable(skb
, info
));
968 struct dst_entry
*ndst
;
973 return ERR_PTR(-EIO
);
978 ndst
= dst_cache_get_ip6(dst_cache
, saddr
);
983 memset(&fl6
, 0, sizeof(fl6
));
984 fl6
.flowi6_oif
= oif
;
987 fl6
.flowlabel
= ip6_make_flowinfo(RT_TOS(tos
), label
);
988 fl6
.flowi6_mark
= skb
->mark
;
989 fl6
.flowi6_proto
= IPPROTO_UDP
;
990 fl6
.fl6_dport
= dport
;
991 fl6
.fl6_sport
= sport
;
993 #ifdef HAVE_IPV6_DST_LOOKUP_NET
994 err
= ipv6_stub
->ipv6_dst_lookup(vxlan
->net
,
998 #ifdef HAVE_IPV6_STUB
999 err
= ipv6_stub
->ipv6_dst_lookup(vxlan
->vn6_sock
->sock
->sk
,
1002 err
= ip6_dst_lookup(vxlan
->vn6_sock
->sock
->sk
, &ndst
, &fl6
);
1006 return ERR_PTR(err
);
1010 dst_cache_set_ip6(dst_cache
, ndst
, saddr
);
1015 /* Bypass encapsulation if the destination is local */
1016 static void vxlan_encap_bypass(struct sk_buff
*skb
, struct vxlan_dev
*src_vxlan
,
1017 struct vxlan_dev
*dst_vxlan
)
1019 skb
->dev
->stats
.rx_dropped
++;
1023 static void vxlan_xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
1024 struct vxlan_rdst
*rdst
, bool did_rsc
)
1026 struct dst_cache
*dst_cache
;
1027 struct ip_tunnel_info
*info
;
1028 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1030 struct rtable
*rt
= NULL
;
1031 const struct iphdr
*old_iph
;
1032 union vxlan_addr
*dst
;
1033 union vxlan_addr remote_ip
, local_ip
;
1034 union vxlan_addr
*src
;
1035 struct vxlan_metadata _md
;
1036 struct vxlan_metadata
*md
= &_md
;
1037 __be16 src_port
= 0, dst_port
;
1042 u32 flags
= vxlan
->flags
;
1043 bool udp_sum
= false;
1044 bool xnet
= !net_eq(vxlan
->net
, dev_net(vxlan
->dev
));
1046 info
= skb_tunnel_info(skb
);
1049 dst_port
= rdst
->remote_port
? rdst
->remote_port
: vxlan
->cfg
.dst_port
;
1050 vni
= rdst
->remote_vni
;
1051 dst
= &rdst
->remote_ip
;
1052 src
= &vxlan
->cfg
.saddr
;
1053 dst_cache
= &rdst
->dst_cache
;
1056 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
1060 dst_port
= info
->key
.tp_dst
? : vxlan
->cfg
.dst_port
;
1061 vni
= vxlan_tun_id_to_vni(info
->key
.tun_id
);
1062 remote_ip
.sa
.sa_family
= ip_tunnel_info_af(info
);
1063 if (remote_ip
.sa
.sa_family
== AF_INET
) {
1064 remote_ip
.sin
.sin_addr
.s_addr
= info
->key
.u
.ipv4
.dst
;
1065 local_ip
.sin
.sin_addr
.s_addr
= info
->key
.u
.ipv4
.src
;
1067 remote_ip
.sin6
.sin6_addr
= info
->key
.u
.ipv6
.dst
;
1068 local_ip
.sin6
.sin6_addr
= info
->key
.u
.ipv6
.src
;
1072 dst_cache
= &info
->dst_cache
;
1075 if (vxlan_addr_any(dst
)) {
1077 /* short-circuited back to local bridge */
1078 vxlan_encap_bypass(skb
, vxlan
, vxlan
);
1084 old_iph
= ip_hdr(skb
);
1086 ttl
= vxlan
->cfg
.ttl
;
1087 if (!ttl
&& vxlan_addr_multicast(dst
))
1090 tos
= vxlan
->cfg
.tos
;
1092 tos
= ip_tunnel_get_dsfield(old_iph
, skb
);
1094 label
= vxlan
->cfg
.label
;
1095 src_port
= udp_flow_src_port(dev_net(dev
), skb
, vxlan
->cfg
.port_min
,
1096 vxlan
->cfg
.port_max
, true);
1099 ttl
= info
->key
.ttl
;
1100 tos
= info
->key
.tos
;
1101 label
= info
->key
.label
;
1102 udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
1104 if (info
->options_len
&&
1105 info
->key
.tun_flags
& TUNNEL_VXLAN_OPT
)
1106 md
= ip_tunnel_info_opts(info
);
1108 md
->gbp
= skb
->mark
;
1111 if (dst
->sa
.sa_family
== AF_INET
) {
1112 struct vxlan_sock
*sock4
= rcu_dereference(vxlan
->vn4_sock
);
1116 sk
= sock4
->sock
->sk
;
1118 rt
= vxlan_get_route(vxlan
, skb
,
1119 rdst
? rdst
->remote_ifindex
: 0, tos
,
1120 dst
->sin
.sin_addr
.s_addr
,
1121 &src
->sin
.sin_addr
.s_addr
,
1125 netdev_dbg(dev
, "no route to %pI4\n",
1126 &dst
->sin
.sin_addr
.s_addr
);
1127 dev
->stats
.tx_carrier_errors
++;
1131 if (rt
->dst
.dev
== dev
) {
1132 netdev_dbg(dev
, "circular route to %pI4\n",
1133 &dst
->sin
.sin_addr
.s_addr
);
1134 dev
->stats
.collisions
++;
1138 /* Bypass encapsulation if the destination is local */
1139 if (!info
&& rt
->rt_flags
& RTCF_LOCAL
&&
1140 !(rt
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))) {
1141 struct vxlan_dev
*dst_vxlan
;
1144 dst_vxlan
= vxlan_find_vni(vxlan
->net
, vni
,
1145 dst
->sa
.sa_family
, dst_port
,
1149 vxlan_encap_bypass(skb
, vxlan
, dst_vxlan
);
1154 udp_sum
= !(flags
& VXLAN_F_UDP_ZERO_CSUM_TX
);
1155 else if (info
->key
.tun_flags
& TUNNEL_DONT_FRAGMENT
)
1158 tos
= ip_tunnel_ecn_encap(tos
, old_iph
, skb
);
1159 ttl
= ttl
? : ip4_dst_hoplimit(&rt
->dst
);
1160 err
= vxlan_build_skb(skb
, &rt
->dst
, sizeof(struct iphdr
),
1161 vni
, md
, flags
, udp_sum
);
1165 udp_tunnel_xmit_skb(rt
, sk
, skb
, src
->sin
.sin_addr
.s_addr
,
1166 dst
->sin
.sin_addr
.s_addr
, tos
, ttl
, df
,
1167 src_port
, dst_port
, xnet
, !udp_sum
);
1168 #if IS_ENABLED(CONFIG_IPV6)
1170 struct vxlan_sock
*sock6
= rcu_dereference(vxlan
->vn6_sock
);
1171 struct dst_entry
*ndst
;
1176 sk
= sock6
->sock
->sk
;
1178 ndst
= vxlan6_get_route(vxlan
, skb
,
1179 rdst
? rdst
->remote_ifindex
: 0, tos
,
1180 label
, &dst
->sin6
.sin6_addr
,
1181 &src
->sin6
.sin6_addr
,
1185 netdev_dbg(dev
, "no route to %pI6\n",
1186 &dst
->sin6
.sin6_addr
);
1187 dev
->stats
.tx_carrier_errors
++;
1191 if (ndst
->dev
== dev
) {
1192 netdev_dbg(dev
, "circular route to %pI6\n",
1193 &dst
->sin6
.sin6_addr
);
1195 dev
->stats
.collisions
++;
1199 /* Bypass encapsulation if the destination is local */
1200 rt6i_flags
= ((struct rt6_info
*)ndst
)->rt6i_flags
;
1201 if (!info
&& rt6i_flags
& RTF_LOCAL
&&
1202 !(rt6i_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))) {
1203 struct vxlan_dev
*dst_vxlan
;
1206 dst_vxlan
= vxlan_find_vni(vxlan
->net
, vni
,
1207 dst
->sa
.sa_family
, dst_port
,
1211 vxlan_encap_bypass(skb
, vxlan
, dst_vxlan
);
1216 udp_sum
= !(flags
& VXLAN_F_UDP_ZERO_CSUM6_TX
);
1218 tos
= ip_tunnel_ecn_encap(tos
, old_iph
, skb
);
1219 ttl
= ttl
? : ip6_dst_hoplimit(ndst
);
1220 skb_scrub_packet(skb
, xnet
);
1221 err
= vxlan_build_skb(skb
, ndst
, sizeof(struct ipv6hdr
),
1222 vni
, md
, flags
, udp_sum
);
1227 udp_tunnel6_xmit_skb(ndst
, sk
, skb
, dev
,
1228 &src
->sin6
.sin6_addr
,
1229 &dst
->sin6
.sin6_addr
, tos
, ttl
,
1230 label
, src_port
, dst_port
, !udp_sum
);
1237 dev
->stats
.tx_dropped
++;
1241 /* skb is already freed. */
1246 dev
->stats
.tx_errors
++;
1251 /* Transmit local packets over Vxlan
1253 * Outer IP header inherits ECN and DF from inner header.
1254 * Outer UDP destination is the VXLAN assigned port.
1255 * source port is based on hash of flow
1257 netdev_tx_t
rpl_vxlan_xmit(struct sk_buff
*skb
)
1259 struct net_device
*dev
= skb
->dev
;
1260 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1261 const struct ip_tunnel_info
*info
;
1263 info
= skb_tunnel_info(skb
);
1264 skb_reset_mac_header(skb
);
1265 if (vxlan
->flags
& VXLAN_F_COLLECT_METADATA
) {
1266 if (info
&& info
->mode
& IP_TUNNEL_INFO_TX
) {
1267 vxlan_xmit_one(skb
, dev
, NULL
, false);
1268 return NETDEV_TX_OK
;
1272 dev
->stats
.tx_dropped
++;
1274 return NETDEV_TX_OK
;
1276 EXPORT_SYMBOL_GPL(rpl_vxlan_xmit
);
1278 /* Walk the forwarding table and purge stale entries */
1279 #ifdef HAVE_INIT_TIMER_DEFERRABLE
1280 static void vxlan_cleanup(unsigned long arg
)
1282 struct vxlan_dev
*vxlan
= (struct vxlan_dev
*) arg
;
1284 static void vxlan_cleanup(struct timer_list
*t
)
1286 struct vxlan_dev
*vxlan
= from_timer(vxlan
, t
, age_timer
);
1288 unsigned long next_timer
= jiffies
+ FDB_AGE_INTERVAL
;
1291 if (!netif_running(vxlan
->dev
))
1294 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
1295 struct hlist_node
*p
, *n
;
1297 spin_lock_bh(&vxlan
->hash_lock
);
1298 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
1300 = container_of(p
, struct vxlan_fdb
, hlist
);
1301 unsigned long timeout
;
1303 if (f
->state
& NUD_PERMANENT
)
1306 timeout
= f
->used
+ vxlan
->cfg
.age_interval
* HZ
;
1307 if (time_before_eq(timeout
, jiffies
)) {
1308 netdev_dbg(vxlan
->dev
,
1309 "garbage collect %pM\n",
1311 f
->state
= NUD_STALE
;
1312 vxlan_fdb_destroy(vxlan
, f
);
1313 } else if (time_before(timeout
, next_timer
))
1314 next_timer
= timeout
;
1316 spin_unlock_bh(&vxlan
->hash_lock
);
1319 mod_timer(&vxlan
->age_timer
, next_timer
);
1322 static void vxlan_vs_add_dev(struct vxlan_sock
*vs
, struct vxlan_dev
*vxlan
)
1324 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
1325 __be32 vni
= vxlan
->default_dst
.remote_vni
;
1327 spin_lock(&vn
->sock_lock
);
1328 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(vs
, vni
));
1329 spin_unlock(&vn
->sock_lock
);
1332 /* Setup stats when device is created */
1333 static int vxlan_init(struct net_device
*dev
)
1335 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1342 static void vxlan_fdb_delete_default(struct vxlan_dev
*vxlan
)
1346 static void vxlan_uninit(struct net_device
*dev
)
1348 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1350 vxlan_fdb_delete_default(vxlan
);
1352 free_percpu(dev
->tstats
);
1355 /* Start ageing timer and join group when device is brought up */
1356 static int vxlan_open(struct net_device
*dev
)
1358 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1361 ret
= vxlan_sock_add(vxlan
);
1365 if (vxlan_addr_multicast(&vxlan
->default_dst
.remote_ip
)) {
1366 ret
= vxlan_igmp_join(vxlan
);
1367 if (ret
== -EADDRINUSE
)
1370 vxlan_sock_release(vxlan
);
1375 if (vxlan
->cfg
.age_interval
)
1376 mod_timer(&vxlan
->age_timer
, jiffies
+ FDB_AGE_INTERVAL
);
1381 /* Purge the forwarding table */
1382 static void vxlan_flush(struct vxlan_dev
*vxlan
)
1386 spin_lock_bh(&vxlan
->hash_lock
);
1387 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
1388 struct hlist_node
*p
, *n
;
1389 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
1391 = container_of(p
, struct vxlan_fdb
, hlist
);
1392 /* the all_zeros_mac entry is deleted at vxlan_uninit */
1393 if (!is_zero_ether_addr(f
->eth_addr
))
1394 vxlan_fdb_destroy(vxlan
, f
);
1397 spin_unlock_bh(&vxlan
->hash_lock
);
1400 /* Cleanup timer and forwarding table on shutdown */
1401 static int vxlan_stop(struct net_device
*dev
)
1403 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1404 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
1407 if (vxlan_addr_multicast(&vxlan
->default_dst
.remote_ip
) &&
1408 !vxlan_group_used(vn
, vxlan
))
1409 ret
= vxlan_igmp_leave(vxlan
);
1411 del_timer_sync(&vxlan
->age_timer
);
1414 vxlan_sock_release(vxlan
);
1419 /* Stub, nothing needs to be done. */
1420 static void vxlan_set_multicast_list(struct net_device
*dev
)
1424 static int __vxlan_change_mtu(struct net_device
*dev
,
1425 struct net_device
*lowerdev
,
1426 struct vxlan_rdst
*dst
, int new_mtu
, bool strict
)
1428 int max_mtu
= IP_MAX_MTU
;
1431 max_mtu
= lowerdev
->mtu
;
1433 if (dst
->remote_ip
.sa
.sa_family
== AF_INET6
)
1434 max_mtu
-= VXLAN6_HEADROOM
;
1436 max_mtu
-= VXLAN_HEADROOM
;
1441 if (new_mtu
> max_mtu
) {
1452 static int vxlan_change_mtu(struct net_device
*dev
, int new_mtu
)
1454 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1455 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
1456 struct net_device
*lowerdev
= __dev_get_by_index(vxlan
->net
,
1457 dst
->remote_ifindex
);
1458 return __vxlan_change_mtu(dev
, lowerdev
, dst
, new_mtu
, true);
1461 int ovs_vxlan_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
1463 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1464 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
1465 __be16 sport
, dport
;
1467 sport
= udp_flow_src_port(dev_net(dev
), skb
, vxlan
->cfg
.port_min
,
1468 vxlan
->cfg
.port_max
, true);
1469 dport
= info
->key
.tp_dst
? : vxlan
->cfg
.dst_port
;
1471 if (ip_tunnel_info_af(info
) == AF_INET
) {
1472 struct vxlan_sock
*sock4
= rcu_dereference(vxlan
->vn4_sock
);
1477 rt
= vxlan_get_route(vxlan
, skb
, 0, info
->key
.tos
,
1478 info
->key
.u
.ipv4
.dst
,
1479 &info
->key
.u
.ipv4
.src
,
1480 dport
, sport
, NULL
, info
);
1485 #if IS_ENABLED(CONFIG_IPV6)
1486 struct dst_entry
*ndst
;
1488 ndst
= vxlan6_get_route(vxlan
, skb
, 0, info
->key
.tos
,
1489 info
->key
.label
, &info
->key
.u
.ipv6
.dst
,
1490 &info
->key
.u
.ipv6
.src
,
1491 dport
, sport
, NULL
, info
);
1493 return PTR_ERR(ndst
);
1495 #else /* !CONFIG_IPV6 */
1496 return -EPFNOSUPPORT
;
1499 info
->key
.tp_src
= sport
;
1500 info
->key
.tp_dst
= dport
;
1503 EXPORT_SYMBOL_GPL(ovs_vxlan_fill_metadata_dst
);
1505 static netdev_tx_t
vxlan_dev_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1507 /* Drop All packets coming from networking stack. OVS-CB is
1508 * not initialized for these packets.
1512 dev
->stats
.tx_dropped
++;
1513 return NETDEV_TX_OK
;
1516 static const struct net_device_ops vxlan_netdev_ether_ops
= {
1517 .ndo_init
= vxlan_init
,
1518 .ndo_uninit
= vxlan_uninit
,
1519 .ndo_open
= vxlan_open
,
1520 .ndo_stop
= vxlan_stop
,
1521 .ndo_start_xmit
= vxlan_dev_xmit
,
1522 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1523 .ndo_set_rx_mode
= vxlan_set_multicast_list
,
1524 #ifdef HAVE_RHEL7_MAX_MTU
1525 .ndo_size
= sizeof(struct net_device_ops
),
1526 .extended
.ndo_change_mtu
= vxlan_change_mtu
,
1528 .ndo_change_mtu
= vxlan_change_mtu
,
1530 .ndo_validate_addr
= eth_validate_addr
,
1531 .ndo_set_mac_address
= eth_mac_addr
,
1532 #ifdef HAVE_NDO_FILL_METADATA_DST
1533 .ndo_fill_metadata_dst
= ovs_vxlan_fill_metadata_dst
,
1537 static const struct net_device_ops vxlan_netdev_raw_ops
= {
1538 .ndo_init
= vxlan_init
,
1539 .ndo_uninit
= vxlan_uninit
,
1540 .ndo_open
= vxlan_open
,
1541 .ndo_stop
= vxlan_stop
,
1542 .ndo_start_xmit
= vxlan_dev_xmit
,
1543 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1544 #ifdef HAVE_RHEL7_MAX_MTU
1545 .ndo_size
= sizeof(struct net_device_ops
),
1546 .extended
.ndo_change_mtu
= vxlan_change_mtu
,
1548 .ndo_change_mtu
= vxlan_change_mtu
,
1550 #ifdef HAVE_NDO_FILL_METADATA_DST
1551 .ndo_fill_metadata_dst
= ovs_vxlan_fill_metadata_dst
,
1555 /* Info for udev, that this is a virtual tunnel endpoint */
1556 static struct device_type vxlan_type
= {
1560 /* Calls the ndo_add_vxlan_port or ndo_udp_tunnel_add of the caller
1561 * in order to supply the listening VXLAN udp ports. Callers are
1562 * expected to implement the ndo_add_vxlan_port.
1564 static void vxlan_push_rx_ports(struct net_device
*dev
)
1566 #ifdef HAVE_NDO_ADD_VXLAN_PORT
1567 struct vxlan_sock
*vs
;
1568 struct net
*net
= dev_net(dev
);
1569 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1570 sa_family_t sa_family
;
1574 if (!dev
->netdev_ops
->ndo_add_vxlan_port
)
1577 spin_lock(&vn
->sock_lock
);
1578 for (i
= 0; i
< PORT_HASH_SIZE
; ++i
) {
1579 hlist_for_each_entry_rcu(vs
, &vn
->sock_list
[i
], hlist
) {
1580 port
= inet_sk(vs
->sock
->sk
)->inet_sport
;
1581 sa_family
= vxlan_get_sk_family(vs
);
1582 dev
->netdev_ops
->ndo_add_vxlan_port(dev
, sa_family
,
1586 spin_unlock(&vn
->sock_lock
);
1587 #elif defined(HAVE_NDO_UDP_TUNNEL_ADD)
1588 struct vxlan_sock
*vs
;
1589 struct net
*net
= dev_net(dev
);
1590 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1593 if (!dev
->netdev_ops
->ndo_udp_tunnel_add
)
1596 spin_lock(&vn
->sock_lock
);
1597 for (i
= 0; i
< PORT_HASH_SIZE
; ++i
) {
1598 hlist_for_each_entry_rcu(vs
, &vn
->sock_list
[i
], hlist
) {
1599 struct udp_tunnel_info ti
;
1600 if (vs
->flags
& VXLAN_F_GPE
)
1601 ti
.type
= UDP_TUNNEL_TYPE_VXLAN_GPE
;
1603 ti
.type
= UDP_TUNNEL_TYPE_VXLAN
;
1604 ti
.port
= inet_sk(vs
->sock
->sk
)->inet_sport
;
1605 ti
.sa_family
= vxlan_get_sk_family(vs
);
1607 dev
->netdev_ops
->ndo_udp_tunnel_add(dev
, &ti
);
1610 spin_unlock(&vn
->sock_lock
);
1614 /* Initialize the device structure. */
1615 static void vxlan_setup(struct net_device
*dev
)
1617 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1620 eth_hw_addr_random(dev
);
1623 #ifndef HAVE_NEEDS_FREE_NETDEV
1624 dev
->destructor
= free_netdev
;
1626 dev
->needs_free_netdev
= true;
1628 SET_NETDEV_DEVTYPE(dev
, &vxlan_type
);
1630 dev
->features
|= NETIF_F_LLTX
;
1631 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
1632 dev
->features
|= NETIF_F_RXCSUM
;
1633 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1635 dev
->vlan_features
= dev
->features
;
1636 dev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_STAG_TX
;
1637 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
1638 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1639 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_STAG_TX
;
1641 netif_keep_dst(dev
);
1643 dev
->priv_flags
|= IFF_NO_QUEUE
;
1645 INIT_LIST_HEAD(&vxlan
->next
);
1646 spin_lock_init(&vxlan
->hash_lock
);
1648 #ifdef HAVE_INIT_TIMER_DEFERRABLE
1649 init_timer_deferrable(&vxlan
->age_timer
);
1650 vxlan
->age_timer
.function
= vxlan_cleanup
;
1651 vxlan
->age_timer
.data
= (unsigned long) vxlan
;
1653 timer_setup(&vxlan
->age_timer
, vxlan_cleanup
, TIMER_DEFERRABLE
);
1656 vxlan
->cfg
.dst_port
= htons(vxlan_port
);
1660 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
)
1661 INIT_HLIST_HEAD(&vxlan
->fdb_head
[h
]);
1664 static void vxlan_ether_setup(struct net_device
*dev
)
1666 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1667 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1668 dev
->netdev_ops
= &vxlan_netdev_ether_ops
;
1671 static void vxlan_raw_setup(struct net_device
*dev
)
1673 dev
->header_ops
= NULL
;
1674 dev
->type
= ARPHRD_NONE
;
1675 dev
->hard_header_len
= 0;
1677 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
1678 dev
->netdev_ops
= &vxlan_netdev_raw_ops
;
1681 static const struct nla_policy vxlan_policy
[IFLA_VXLAN_MAX
+ 1] = {
1682 [IFLA_VXLAN_ID
] = { .type
= NLA_U32
},
1683 [IFLA_VXLAN_GROUP
] = { .len
= sizeof_field(struct iphdr
, daddr
) },
1684 [IFLA_VXLAN_GROUP6
] = { .len
= sizeof(struct in6_addr
) },
1685 [IFLA_VXLAN_LINK
] = { .type
= NLA_U32
},
1686 [IFLA_VXLAN_LOCAL
] = { .len
= sizeof_field(struct iphdr
, saddr
) },
1687 [IFLA_VXLAN_LOCAL6
] = { .len
= sizeof(struct in6_addr
) },
1688 [IFLA_VXLAN_TOS
] = { .type
= NLA_U8
},
1689 [IFLA_VXLAN_TTL
] = { .type
= NLA_U8
},
1690 [IFLA_VXLAN_LABEL
] = { .type
= NLA_U32
},
1691 [IFLA_VXLAN_LEARNING
] = { .type
= NLA_U8
},
1692 [IFLA_VXLAN_AGEING
] = { .type
= NLA_U32
},
1693 [IFLA_VXLAN_LIMIT
] = { .type
= NLA_U32
},
1694 [IFLA_VXLAN_PORT_RANGE
] = { .len
= sizeof(struct ifla_vxlan_port_range
) },
1695 [IFLA_VXLAN_PROXY
] = { .type
= NLA_U8
},
1696 [IFLA_VXLAN_RSC
] = { .type
= NLA_U8
},
1697 [IFLA_VXLAN_L2MISS
] = { .type
= NLA_U8
},
1698 [IFLA_VXLAN_L3MISS
] = { .type
= NLA_U8
},
1699 [IFLA_VXLAN_COLLECT_METADATA
] = { .type
= NLA_U8
},
1700 [IFLA_VXLAN_PORT
] = { .type
= NLA_U16
},
1701 [IFLA_VXLAN_UDP_CSUM
] = { .type
= NLA_U8
},
1702 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX
] = { .type
= NLA_U8
},
1703 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX
] = { .type
= NLA_U8
},
1704 [IFLA_VXLAN_REMCSUM_TX
] = { .type
= NLA_U8
},
1705 [IFLA_VXLAN_REMCSUM_RX
] = { .type
= NLA_U8
},
1706 [IFLA_VXLAN_GBP
] = { .type
= NLA_FLAG
, },
1707 [IFLA_VXLAN_GPE
] = { .type
= NLA_FLAG
, },
1708 [IFLA_VXLAN_REMCSUM_NOPARTIAL
] = { .type
= NLA_FLAG
},
1711 #ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
1712 static int vxlan_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1713 struct netlink_ext_ack
*extack
)
1715 static int vxlan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1718 if (tb
[IFLA_ADDRESS
]) {
1719 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1720 pr_debug("invalid link address (not ethernet)\n");
1724 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1725 pr_debug("invalid all zero ethernet address\n");
1726 return -EADDRNOTAVAIL
;
1733 if (data
[IFLA_VXLAN_ID
]) {
1734 __u32 id
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1735 if (id
>= VXLAN_VID_MASK
)
1739 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
1740 const struct ifla_vxlan_port_range
*p
1741 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
1743 if (ntohs(p
->high
) < ntohs(p
->low
)) {
1744 pr_debug("port range %u .. %u not valid\n",
1745 ntohs(p
->low
), ntohs(p
->high
));
1753 static void vxlan_get_drvinfo(struct net_device
*netdev
,
1754 struct ethtool_drvinfo
*drvinfo
)
1756 strlcpy(drvinfo
->version
, VXLAN_VERSION
, sizeof(drvinfo
->version
));
1757 strlcpy(drvinfo
->driver
, "vxlan", sizeof(drvinfo
->driver
));
1760 static const struct ethtool_ops vxlan_ethtool_ops
= {
1761 .get_drvinfo
= vxlan_get_drvinfo
,
1762 .get_link
= ethtool_op_get_link
,
1765 static struct socket
*vxlan_create_sock(struct net
*net
, bool ipv6
,
1766 __be16 port
, u32 flags
)
1768 struct socket
*sock
;
1769 struct udp_port_cfg udp_conf
;
1772 memset(&udp_conf
, 0, sizeof(udp_conf
));
1775 udp_conf
.family
= AF_INET6
;
1776 udp_conf
.use_udp6_rx_checksums
=
1777 !(flags
& VXLAN_F_UDP_ZERO_CSUM6_RX
);
1778 udp_conf
.ipv6_v6only
= 1;
1780 udp_conf
.family
= AF_INET
;
1783 udp_conf
.local_udp_port
= port
;
1785 /* Open UDP socket */
1786 err
= udp_sock_create(net
, &udp_conf
, &sock
);
1788 return ERR_PTR(err
);
1793 /* Create new listen socket if needed */
1794 static struct vxlan_sock
*vxlan_socket_create(struct net
*net
, bool ipv6
,
1795 __be16 port
, u32 flags
)
1797 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1798 struct vxlan_sock
*vs
;
1799 struct socket
*sock
;
1801 struct udp_tunnel_sock_cfg tunnel_cfg
;
1803 vs
= kzalloc(sizeof(*vs
), GFP_KERNEL
);
1805 return ERR_PTR(-ENOMEM
);
1807 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
1808 INIT_HLIST_HEAD(&vs
->vni_list
[h
]);
1810 sock
= vxlan_create_sock(net
, ipv6
, port
, flags
);
1813 return ERR_CAST(sock
);
1817 atomic_set(&vs
->refcnt
, 1);
1818 vs
->flags
= (flags
& VXLAN_F_RCV_FLAGS
);
1820 #ifdef HAVE_UDP_OFFLOAD
1821 vs
->udp_offloads
.port
= port
;
1822 vs
->udp_offloads
.callbacks
.gro_receive
= vxlan_gro_receive
;
1823 vs
->udp_offloads
.callbacks
.gro_complete
= vxlan_gro_complete
;
1826 spin_lock(&vn
->sock_lock
);
1827 hlist_add_head_rcu(&vs
->hlist
, vs_head(net
, port
));
1828 vxlan_notify_add_rx_port(vs
);
1829 spin_unlock(&vn
->sock_lock
);
1831 /* Mark socket as an encapsulation socket. */
1832 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
1833 tunnel_cfg
.sk_user_data
= vs
;
1834 tunnel_cfg
.encap_type
= 1;
1835 tunnel_cfg
.encap_rcv
= vxlan_rcv
;
1836 tunnel_cfg
.encap_destroy
= NULL
;
1837 #ifdef HAVE_UDP_TUNNEL_SOCK_CFG_GRO_RECEIVE
1838 tunnel_cfg
.gro_receive
= vxlan_gro_receive
;
1839 tunnel_cfg
.gro_complete
= vxlan_gro_complete
;
1841 setup_udp_tunnel_sock(net
, sock
, &tunnel_cfg
);
1846 static int __vxlan_sock_add(struct vxlan_dev
*vxlan
, bool ipv6
)
1848 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
1849 struct vxlan_sock
*vs
= NULL
;
1851 if (!vxlan
->cfg
.no_share
) {
1852 spin_lock(&vn
->sock_lock
);
1853 vs
= vxlan_find_sock(vxlan
->net
, ipv6
? AF_INET6
: AF_INET
,
1854 vxlan
->cfg
.dst_port
, vxlan
->flags
);
1855 if (vs
&& !atomic_add_unless(&vs
->refcnt
, 1, 0)) {
1856 spin_unlock(&vn
->sock_lock
);
1859 spin_unlock(&vn
->sock_lock
);
1862 vs
= vxlan_socket_create(vxlan
->net
, ipv6
,
1863 vxlan
->cfg
.dst_port
, vxlan
->flags
);
1866 #if IS_ENABLED(CONFIG_IPV6)
1868 rcu_assign_pointer(vxlan
->vn6_sock
, vs
);
1871 rcu_assign_pointer(vxlan
->vn4_sock
, vs
);
1872 vxlan_vs_add_dev(vs
, vxlan
);
1876 static int vxlan_sock_add(struct vxlan_dev
*vxlan
)
1878 bool metadata
= vxlan
->flags
& VXLAN_F_COLLECT_METADATA
;
1879 bool ipv6
= vxlan
->flags
& VXLAN_F_IPV6
|| metadata
;
1880 bool ipv4
= !ipv6
|| metadata
;
1883 RCU_INIT_POINTER(vxlan
->vn4_sock
, NULL
);
1884 #if IS_ENABLED(CONFIG_IPV6)
1885 RCU_INIT_POINTER(vxlan
->vn6_sock
, NULL
);
1887 ret
= __vxlan_sock_add(vxlan
, true);
1888 if (ret
< 0 && ret
!= -EAFNOSUPPORT
)
1893 ret
= __vxlan_sock_add(vxlan
, false);
1895 vxlan_sock_release(vxlan
);
1899 static int vxlan_dev_configure(struct net
*src_net
, struct net_device
*dev
,
1900 struct vxlan_config
*conf
)
1902 struct vxlan_net
*vn
= net_generic(src_net
, vxlan_net_id
);
1903 struct vxlan_dev
*vxlan
= netdev_priv(dev
), *tmp
;
1904 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
1905 unsigned short needed_headroom
= ETH_HLEN
;
1907 bool use_ipv6
= false;
1908 __be16 default_port
= vxlan
->cfg
.dst_port
;
1909 struct net_device
*lowerdev
= NULL
;
1911 if (conf
->flags
& VXLAN_F_GPE
) {
1912 if (conf
->flags
& ~VXLAN_F_ALLOWED_GPE
)
1914 /* For now, allow GPE only together with COLLECT_METADATA.
1915 * This can be relaxed later; in such case, the other side
1916 * of the PtP link will have to be provided.
1918 if (!(conf
->flags
& VXLAN_F_COLLECT_METADATA
))
1921 vxlan_raw_setup(dev
);
1923 vxlan_ether_setup(dev
);
1926 vxlan
->net
= src_net
;
1928 dst
->remote_vni
= conf
->vni
;
1930 memcpy(&dst
->remote_ip
, &conf
->remote_ip
, sizeof(conf
->remote_ip
));
1932 /* Unless IPv6 is explicitly requested, assume IPv4 */
1933 if (!dst
->remote_ip
.sa
.sa_family
)
1934 dst
->remote_ip
.sa
.sa_family
= AF_INET
;
1936 if (dst
->remote_ip
.sa
.sa_family
== AF_INET6
||
1937 vxlan
->cfg
.saddr
.sa
.sa_family
== AF_INET6
) {
1938 if (!IS_ENABLED(CONFIG_IPV6
))
1939 return -EPFNOSUPPORT
;
1941 vxlan
->flags
|= VXLAN_F_IPV6
;
1944 if (conf
->label
&& !use_ipv6
) {
1945 pr_info("label only supported in use with IPv6\n");
1949 if (conf
->remote_ifindex
) {
1950 lowerdev
= __dev_get_by_index(src_net
, conf
->remote_ifindex
);
1951 dst
->remote_ifindex
= conf
->remote_ifindex
;
1954 pr_info("ifindex %d does not exist\n", dst
->remote_ifindex
);
1958 #if IS_ENABLED(CONFIG_IPV6)
1960 struct inet6_dev
*idev
= __in6_dev_get(lowerdev
);
1961 if (idev
&& idev
->cnf
.disable_ipv6
) {
1962 pr_info("IPv6 is disabled via sysctl\n");
1969 dev
->mtu
= lowerdev
->mtu
- (use_ipv6
? VXLAN6_HEADROOM
: VXLAN_HEADROOM
);
1971 needed_headroom
= lowerdev
->hard_header_len
;
1975 err
= __vxlan_change_mtu(dev
, lowerdev
, dst
, conf
->mtu
, false);
1980 if (use_ipv6
|| conf
->flags
& VXLAN_F_COLLECT_METADATA
)
1981 needed_headroom
+= VXLAN6_HEADROOM
;
1983 needed_headroom
+= VXLAN_HEADROOM
;
1984 dev
->needed_headroom
= needed_headroom
;
1986 memcpy(&vxlan
->cfg
, conf
, sizeof(*conf
));
1987 if (!vxlan
->cfg
.dst_port
) {
1988 if (conf
->flags
& VXLAN_F_GPE
)
1989 vxlan
->cfg
.dst_port
= 4790; /* IANA assigned VXLAN-GPE port */
1991 vxlan
->cfg
.dst_port
= default_port
;
1993 vxlan
->flags
|= conf
->flags
;
1995 if (!vxlan
->cfg
.age_interval
)
1996 vxlan
->cfg
.age_interval
= FDB_AGE_DEFAULT
;
1998 list_for_each_entry(tmp
, &vn
->vxlan_list
, next
) {
1999 if (tmp
->cfg
.vni
== conf
->vni
&&
2000 (tmp
->default_dst
.remote_ip
.sa
.sa_family
== AF_INET6
||
2001 tmp
->cfg
.saddr
.sa
.sa_family
== AF_INET6
) == use_ipv6
&&
2002 tmp
->cfg
.dst_port
== vxlan
->cfg
.dst_port
&&
2003 (tmp
->flags
& VXLAN_F_RCV_FLAGS
) ==
2004 (vxlan
->flags
& VXLAN_F_RCV_FLAGS
))
2008 dev
->ethtool_ops
= &vxlan_ethtool_ops
;
2010 /* create an fdb entry for a valid default destination */
2011 if (!vxlan_addr_any(&vxlan
->default_dst
.remote_ip
)) {
2012 err
= vxlan_fdb_create(vxlan
, all_zeros_mac
,
2013 &vxlan
->default_dst
.remote_ip
,
2014 NUD_REACHABLE
|NUD_PERMANENT
,
2015 NLM_F_EXCL
|NLM_F_CREATE
,
2016 vxlan
->cfg
.dst_port
,
2017 vxlan
->default_dst
.remote_vni
,
2018 vxlan
->default_dst
.remote_ifindex
,
2024 err
= register_netdevice(dev
);
2026 vxlan_fdb_delete_default(vxlan
);
2030 list_add(&vxlan
->next
, &vn
->vxlan_list
);
2035 #ifdef HAVE_EXT_ACK_IN_RTNL_LINKOPS
2036 static int vxlan_newlink(struct net
*src_net
, struct net_device
*dev
,
2037 struct nlattr
*tb
[], struct nlattr
*data
[],
2038 struct netlink_ext_ack
*extack
)
2040 static int vxlan_newlink(struct net
*src_net
, struct net_device
*dev
,
2041 struct nlattr
*tb
[], struct nlattr
*data
[])
2044 pr_info("unsupported operation\n");
2048 static void vxlan_dellink(struct net_device
*dev
, struct list_head
*head
)
2050 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2051 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
2053 spin_lock(&vn
->sock_lock
);
2054 if (!hlist_unhashed(&vxlan
->hlist
))
2055 hlist_del_rcu(&vxlan
->hlist
);
2056 spin_unlock(&vn
->sock_lock
);
2058 list_del(&vxlan
->next
);
2059 unregister_netdevice_queue(dev
, head
);
2062 static size_t vxlan_get_size(const struct net_device
*dev
)
2065 return nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_ID */
2066 nla_total_size(sizeof(struct in6_addr
)) + /* IFLA_VXLAN_GROUP{6} */
2067 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LINK */
2068 nla_total_size(sizeof(struct in6_addr
)) + /* IFLA_VXLAN_LOCAL{6} */
2069 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TTL */
2070 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TOS */
2071 nla_total_size(sizeof(__be32
)) + /* IFLA_VXLAN_LABEL */
2072 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_LEARNING */
2073 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_PROXY */
2074 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_RSC */
2075 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L2MISS */
2076 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L3MISS */
2077 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_COLLECT_METADATA */
2078 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_AGEING */
2079 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LIMIT */
2080 nla_total_size(sizeof(struct ifla_vxlan_port_range
)) +
2081 nla_total_size(sizeof(__be16
)) + /* IFLA_VXLAN_PORT */
2082 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_UDP_CSUM */
2083 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2084 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2085 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_REMCSUM_TX */
2086 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_REMCSUM_RX */
2090 static int vxlan_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2092 const struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2093 const struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
2094 struct ifla_vxlan_port_range ports
= {
2095 .low
= htons(vxlan
->cfg
.port_min
),
2096 .high
= htons(vxlan
->cfg
.port_max
),
2099 if (nla_put_u32(skb
, IFLA_VXLAN_ID
, be32_to_cpu(dst
->remote_vni
)))
2100 goto nla_put_failure
;
2102 if (!vxlan_addr_any(&dst
->remote_ip
)) {
2103 if (dst
->remote_ip
.sa
.sa_family
== AF_INET
) {
2104 if (nla_put_in_addr(skb
, IFLA_VXLAN_GROUP
,
2105 dst
->remote_ip
.sin
.sin_addr
.s_addr
))
2106 goto nla_put_failure
;
2107 #if IS_ENABLED(CONFIG_IPV6)
2109 if (nla_put_in6_addr(skb
, IFLA_VXLAN_GROUP6
,
2110 &dst
->remote_ip
.sin6
.sin6_addr
))
2111 goto nla_put_failure
;
2116 if (dst
->remote_ifindex
&& nla_put_u32(skb
, IFLA_VXLAN_LINK
, dst
->remote_ifindex
))
2117 goto nla_put_failure
;
2119 if (!vxlan_addr_any(&vxlan
->cfg
.saddr
)) {
2120 if (vxlan
->cfg
.saddr
.sa
.sa_family
== AF_INET
) {
2121 if (nla_put_in_addr(skb
, IFLA_VXLAN_LOCAL
,
2122 vxlan
->cfg
.saddr
.sin
.sin_addr
.s_addr
))
2123 goto nla_put_failure
;
2124 #if IS_ENABLED(CONFIG_IPV6)
2126 if (nla_put_in6_addr(skb
, IFLA_VXLAN_LOCAL6
,
2127 &vxlan
->cfg
.saddr
.sin6
.sin6_addr
))
2128 goto nla_put_failure
;
2133 if (nla_put_u8(skb
, IFLA_VXLAN_TTL
, vxlan
->cfg
.ttl
) ||
2134 nla_put_u8(skb
, IFLA_VXLAN_TOS
, vxlan
->cfg
.tos
) ||
2135 nla_put_be32(skb
, IFLA_VXLAN_LABEL
, vxlan
->cfg
.label
) ||
2136 nla_put_u8(skb
, IFLA_VXLAN_LEARNING
,
2137 !!(vxlan
->flags
& VXLAN_F_LEARN
)) ||
2138 nla_put_u8(skb
, IFLA_VXLAN_PROXY
,
2139 !!(vxlan
->flags
& VXLAN_F_PROXY
)) ||
2140 nla_put_u8(skb
, IFLA_VXLAN_RSC
, !!(vxlan
->flags
& VXLAN_F_RSC
)) ||
2141 nla_put_u8(skb
, IFLA_VXLAN_L2MISS
,
2142 !!(vxlan
->flags
& VXLAN_F_L2MISS
)) ||
2143 nla_put_u8(skb
, IFLA_VXLAN_L3MISS
,
2144 !!(vxlan
->flags
& VXLAN_F_L3MISS
)) ||
2145 nla_put_u8(skb
, IFLA_VXLAN_COLLECT_METADATA
,
2146 !!(vxlan
->flags
& VXLAN_F_COLLECT_METADATA
)) ||
2147 nla_put_u32(skb
, IFLA_VXLAN_AGEING
, vxlan
->cfg
.age_interval
) ||
2148 nla_put_u32(skb
, IFLA_VXLAN_LIMIT
, vxlan
->cfg
.addrmax
) ||
2149 nla_put_be16(skb
, IFLA_VXLAN_PORT
, vxlan
->cfg
.dst_port
) ||
2150 nla_put_u8(skb
, IFLA_VXLAN_UDP_CSUM
,
2151 !(vxlan
->flags
& VXLAN_F_UDP_ZERO_CSUM_TX
)) ||
2152 nla_put_u8(skb
, IFLA_VXLAN_UDP_ZERO_CSUM6_TX
,
2153 !!(vxlan
->flags
& VXLAN_F_UDP_ZERO_CSUM6_TX
)) ||
2154 nla_put_u8(skb
, IFLA_VXLAN_UDP_ZERO_CSUM6_RX
,
2155 !!(vxlan
->flags
& VXLAN_F_UDP_ZERO_CSUM6_RX
)) ||
2156 nla_put_u8(skb
, IFLA_VXLAN_REMCSUM_TX
,
2157 !!(vxlan
->flags
& VXLAN_F_REMCSUM_TX
)) ||
2158 nla_put_u8(skb
, IFLA_VXLAN_REMCSUM_RX
,
2159 !!(vxlan
->flags
& VXLAN_F_REMCSUM_RX
)))
2160 goto nla_put_failure
;
2162 if (nla_put(skb
, IFLA_VXLAN_PORT_RANGE
, sizeof(ports
), &ports
))
2163 goto nla_put_failure
;
2165 if (vxlan
->flags
& VXLAN_F_GBP
&&
2166 nla_put_flag(skb
, IFLA_VXLAN_GBP
))
2167 goto nla_put_failure
;
2169 if (vxlan
->flags
& VXLAN_F_GPE
&&
2170 nla_put_flag(skb
, IFLA_VXLAN_GPE
))
2171 goto nla_put_failure
;
2173 if (vxlan
->flags
& VXLAN_F_REMCSUM_NOPARTIAL
&&
2174 nla_put_flag(skb
, IFLA_VXLAN_REMCSUM_NOPARTIAL
))
2175 goto nla_put_failure
;
2183 #ifdef HAVE_GET_LINK_NET
2184 static struct net
*vxlan_get_link_net(const struct net_device
*dev
)
2186 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2192 static struct rtnl_link_ops vxlan_link_ops __read_mostly
= {
2193 .kind
= "ovs_vxlan",
2194 .maxtype
= IFLA_VXLAN_MAX
,
2195 .policy
= vxlan_policy
,
2196 .priv_size
= sizeof(struct vxlan_dev
),
2197 .setup
= vxlan_setup
,
2198 .validate
= vxlan_validate
,
2199 .newlink
= vxlan_newlink
,
2200 .dellink
= vxlan_dellink
,
2201 .get_size
= vxlan_get_size
,
2202 .fill_info
= vxlan_fill_info
,
2203 #ifdef HAVE_GET_LINK_NET
2204 .get_link_net
= vxlan_get_link_net
,
2208 struct net_device
*rpl_vxlan_dev_create(struct net
*net
, const char *name
,
2209 u8 name_assign_type
,
2210 struct vxlan_config
*conf
)
2212 struct nlattr
*tb
[IFLA_MAX
+ 1];
2213 struct net_device
*dev
;
2216 memset(&tb
, 0, sizeof(tb
));
2218 dev
= rtnl_create_link(net
, name
, name_assign_type
,
2219 &vxlan_link_ops
, tb
);
2223 err
= vxlan_dev_configure(net
, dev
, conf
);
2226 return ERR_PTR(err
);
2229 err
= rtnl_configure_link(dev
, NULL
);
2231 LIST_HEAD(list_kill
);
2233 vxlan_dellink(dev
, &list_kill
);
2234 unregister_netdevice_many(&list_kill
);
2235 return ERR_PTR(err
);
2240 EXPORT_SYMBOL_GPL(rpl_vxlan_dev_create
);
2242 static void vxlan_handle_lowerdev_unregister(struct vxlan_net
*vn
,
2243 struct net_device
*dev
)
2245 struct vxlan_dev
*vxlan
, *next
;
2246 LIST_HEAD(list_kill
);
2248 list_for_each_entry_safe(vxlan
, next
, &vn
->vxlan_list
, next
) {
2249 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
2251 /* In case we created vxlan device with carrier
2252 * and we loose the carrier due to module unload
2253 * we also need to remove vxlan device. In other
2254 * cases, it's not necessary and remote_ifindex
2255 * is 0 here, so no matches.
2257 if (dst
->remote_ifindex
== dev
->ifindex
)
2258 vxlan_dellink(vxlan
->dev
, &list_kill
);
2261 unregister_netdevice_many(&list_kill
);
2264 static int vxlan_netdevice_event(struct notifier_block
*unused
,
2265 unsigned long event
, void *ptr
)
2267 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2268 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
2270 if (event
== NETDEV_UNREGISTER
)
2271 vxlan_handle_lowerdev_unregister(vn
, dev
);
2272 else if (event
== NETDEV_OFFLOAD_PUSH_VXLAN
)
2273 vxlan_push_rx_ports(dev
);
2278 static struct notifier_block vxlan_notifier_block __read_mostly
= {
2279 .notifier_call
= vxlan_netdevice_event
,
2282 static __net_init
int vxlan_init_net(struct net
*net
)
2284 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
2287 INIT_LIST_HEAD(&vn
->vxlan_list
);
2288 spin_lock_init(&vn
->sock_lock
);
2290 for (h
= 0; h
< PORT_HASH_SIZE
; ++h
)
2291 INIT_HLIST_HEAD(&vn
->sock_list
[h
]);
2296 static void __net_exit
vxlan_exit_net(struct net
*net
)
2298 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
2299 struct vxlan_dev
*vxlan
, *next
;
2300 struct net_device
*dev
, *aux
;
2304 for_each_netdev_safe(net
, dev
, aux
)
2305 if (dev
->rtnl_link_ops
== &vxlan_link_ops
)
2306 unregister_netdevice_queue(dev
, &list
);
2308 list_for_each_entry_safe(vxlan
, next
, &vn
->vxlan_list
, next
) {
2309 /* If vxlan->dev is in the same netns, it has already been added
2310 * to the list by the previous loop.
2312 if (!net_eq(dev_net(vxlan
->dev
), net
)) {
2313 unregister_netdevice_queue(vxlan
->dev
, &list
);
2317 unregister_netdevice_many(&list
);
2321 static struct pernet_operations vxlan_net_ops
= {
2322 .init
= vxlan_init_net
,
2323 .exit
= vxlan_exit_net
,
2324 .id
= &vxlan_net_id
,
2325 .size
= sizeof(struct vxlan_net
),
2328 int rpl_vxlan_init_module(void)
2332 get_random_bytes(&vxlan_salt
, sizeof(vxlan_salt
));
2334 rc
= register_pernet_subsys(&vxlan_net_ops
);
2338 rc
= register_netdevice_notifier(&vxlan_notifier_block
);
2342 rc
= rtnl_link_register(&vxlan_link_ops
);
2346 pr_info("VxLAN tunneling driver\n");
2349 unregister_netdevice_notifier(&vxlan_notifier_block
);
2351 unregister_pernet_subsys(&vxlan_net_ops
);
2353 pr_err("Error while initializing VxLAN %d\n", rc
);
2357 void rpl_vxlan_cleanup_module(void)
2359 rtnl_link_unregister(&vxlan_link_ops
);
2360 unregister_netdevice_notifier(&vxlan_notifier_block
);
2361 unregister_pernet_subsys(&vxlan_net_ops
);
2362 /* rcu_barrier() is called by netns */