2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/udp.h>
18 #include <linux/igmp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/if_ether.h>
21 #include <linux/ethtool.h>
23 #include <net/ndisc.h>
26 #include <net/rtnetlink.h>
27 #include <net/inet_ecn.h>
28 #include <net/net_namespace.h>
29 #include <net/netns/generic.h>
30 #include <net/vxlan.h>
32 #if IS_ENABLED(CONFIG_IPV6)
33 #include <net/ip6_tunnel.h>
34 #include <net/ip6_checksum.h>
37 #define VXLAN_VERSION "0.1"
39 #define PORT_HASH_BITS 8
40 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
41 #define FDB_AGE_DEFAULT 300 /* 5 min */
42 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
44 /* UDP port for VXLAN traffic.
45 * The IANA assigned port is 4789, but the Linux default is 8472
46 * for compatibility with early adopters.
48 static unsigned short vxlan_port __read_mostly
= 8472;
49 module_param_named(udp_port
, vxlan_port
, ushort
, 0444);
50 MODULE_PARM_DESC(udp_port
, "Destination UDP port");
52 static bool log_ecn_error
= true;
53 module_param(log_ecn_error
, bool, 0644);
54 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
56 static unsigned int vxlan_net_id
;
57 static struct rtnl_link_ops vxlan_link_ops
;
59 static const u8 all_zeros_mac
[ETH_ALEN
+ 2];
61 static int vxlan_sock_add(struct vxlan_dev
*vxlan
);
63 /* per-network namespace private data for this module */
65 struct list_head vxlan_list
;
66 struct hlist_head sock_list
[PORT_HASH_SIZE
];
70 /* Forwarding table entry */
72 struct hlist_node hlist
; /* linked list of entries */
74 unsigned long updated
; /* jiffies */
76 struct list_head remotes
;
77 u8 eth_addr
[ETH_ALEN
];
78 u16 state
; /* see ndm_state */
79 u8 flags
; /* see ndm_flags */
82 /* salt for hash table */
83 static u32 vxlan_salt __read_mostly
;
85 static inline bool vxlan_collect_metadata(struct vxlan_sock
*vs
)
87 return vs
->flags
& VXLAN_F_COLLECT_METADATA
||
88 ip_tunnel_collect_metadata();
91 static struct ip_fan_map
*vxlan_fan_find_map(struct vxlan_dev
*vxlan
, __be32 daddr
)
93 struct ip_fan_map
*fan_map
;
96 list_for_each_entry_rcu(fan_map
, &vxlan
->fan
.fan_maps
, list
) {
97 if (fan_map
->overlay
==
98 (daddr
& inet_make_mask(fan_map
->overlay_prefix
))) {
108 static void vxlan_fan_flush_map(struct vxlan_dev
*vxlan
)
110 struct ip_fan_map
*fan_map
;
112 list_for_each_entry_rcu(fan_map
, &vxlan
->fan
.fan_maps
, list
) {
113 list_del_rcu(&fan_map
->list
);
114 kfree_rcu(fan_map
, rcu
);
118 static int vxlan_fan_del_map(struct vxlan_dev
*vxlan
, __be32 overlay
)
120 struct ip_fan_map
*fan_map
;
122 fan_map
= vxlan_fan_find_map(vxlan
, overlay
);
126 list_del_rcu(&fan_map
->list
);
127 kfree_rcu(fan_map
, rcu
);
132 static int vxlan_fan_add_map(struct vxlan_dev
*vxlan
, struct ifla_fan_map
*map
)
134 __be32 overlay_mask
, underlay_mask
;
135 struct ip_fan_map
*fan_map
;
137 overlay_mask
= inet_make_mask(map
->overlay_prefix
);
138 underlay_mask
= inet_make_mask(map
->underlay_prefix
);
140 netdev_dbg(vxlan
->dev
, "vfam: map: o %x/%d u %x/%d om %x um %x\n",
141 map
->overlay
, map
->overlay_prefix
,
142 map
->underlay
, map
->underlay_prefix
,
143 overlay_mask
, underlay_mask
);
145 if ((map
->overlay
& ~overlay_mask
) || (map
->underlay
& ~underlay_mask
))
148 if (!(map
->overlay
& overlay_mask
) && (map
->underlay
& underlay_mask
))
151 /* Special case: overlay 0 and underlay 0: flush all mappings */
152 if (!map
->overlay
&& !map
->underlay
) {
153 vxlan_fan_flush_map(vxlan
);
157 /* Special case: overlay set and underlay 0: clear map for overlay */
159 return vxlan_fan_del_map(vxlan
, map
->overlay
);
161 if (vxlan_fan_find_map(vxlan
, map
->overlay
))
164 fan_map
= kmalloc(sizeof(*fan_map
), GFP_KERNEL
);
165 fan_map
->underlay
= map
->underlay
;
166 fan_map
->overlay
= map
->overlay
;
167 fan_map
->underlay_prefix
= map
->underlay_prefix
;
168 fan_map
->overlay_mask
= ntohl(overlay_mask
);
169 fan_map
->overlay_prefix
= map
->overlay_prefix
;
171 list_add_tail_rcu(&fan_map
->list
, &vxlan
->fan
.fan_maps
);
176 static int vxlan_parse_fan_map(struct nlattr
*data
[], struct vxlan_dev
*vxlan
)
178 struct ifla_fan_map
*map
;
182 nla_for_each_nested(attr
, data
[IFLA_IPTUN_FAN_MAP
], rem
) {
183 map
= nla_data(attr
);
184 rv
= vxlan_fan_add_map(vxlan
, map
);
192 static int vxlan_fan_build_rdst(struct vxlan_dev
*vxlan
, struct sk_buff
*skb
,
193 struct vxlan_rdst
*fan_rdst
)
195 struct ip_fan_map
*f_map
;
196 union vxlan_addr
*va
;
204 switch (eth
->h_proto
) {
205 case htons(ETH_P_IP
):
211 case htons(ETH_P_ARP
):
216 netdev_dbg(vxlan
->dev
,
217 "vfbr: arp sha %pM sip %pI4 tha %pM tip %pI4\n",
218 arp_ptr
, arp_ptr
+ skb
->dev
->addr_len
,
219 arp_ptr
+ skb
->dev
->addr_len
+ 4,
220 arp_ptr
+ (skb
->dev
->addr_len
* 2) + 4);
221 arp_ptr
+= (skb
->dev
->addr_len
* 2) + 4;
222 memcpy(&daddr
, arp_ptr
, 4);
225 netdev_dbg(vxlan
->dev
, "vfbr: unknown eth p %x\n", eth
->h_proto
);
229 f_map
= vxlan_fan_find_map(vxlan
, daddr
);
233 daddr
= ntohl(daddr
);
234 underlay
= ntohl(f_map
->underlay
);
238 memset(fan_rdst
, 0, sizeof(*fan_rdst
));
239 va
= &fan_rdst
->remote_ip
;
240 va
->sa
.sa_family
= AF_INET
;
241 fan_rdst
->remote_vni
= vxlan
->default_dst
.remote_vni
;
242 va
->sin
.sin_addr
.s_addr
= htonl(underlay
|
243 ((daddr
& ~f_map
->overlay_mask
) >>
244 (32 - f_map
->overlay_prefix
-
245 (32 - f_map
->underlay_prefix
))));
246 netdev_dbg(vxlan
->dev
, "vfbr: daddr %x ul %x dst %x\n",
247 daddr
, underlay
, va
->sin
.sin_addr
.s_addr
);
252 #if IS_ENABLED(CONFIG_IPV6)
254 bool vxlan_addr_equal(const union vxlan_addr
*a
, const union vxlan_addr
*b
)
256 if (a
->sa
.sa_family
!= b
->sa
.sa_family
)
258 if (a
->sa
.sa_family
== AF_INET6
)
259 return ipv6_addr_equal(&a
->sin6
.sin6_addr
, &b
->sin6
.sin6_addr
);
261 return a
->sin
.sin_addr
.s_addr
== b
->sin
.sin_addr
.s_addr
;
264 static inline bool vxlan_addr_any(const union vxlan_addr
*ipa
)
266 if (ipa
->sa
.sa_family
== AF_INET6
)
267 return ipv6_addr_any(&ipa
->sin6
.sin6_addr
);
269 return ipa
->sin
.sin_addr
.s_addr
== htonl(INADDR_ANY
);
272 static inline bool vxlan_addr_multicast(const union vxlan_addr
*ipa
)
274 if (ipa
->sa
.sa_family
== AF_INET6
)
275 return ipv6_addr_is_multicast(&ipa
->sin6
.sin6_addr
);
277 return IN_MULTICAST(ntohl(ipa
->sin
.sin_addr
.s_addr
));
280 static int vxlan_nla_get_addr(union vxlan_addr
*ip
, struct nlattr
*nla
)
282 if (nla_len(nla
) >= sizeof(struct in6_addr
)) {
283 ip
->sin6
.sin6_addr
= nla_get_in6_addr(nla
);
284 ip
->sa
.sa_family
= AF_INET6
;
286 } else if (nla_len(nla
) >= sizeof(__be32
)) {
287 ip
->sin
.sin_addr
.s_addr
= nla_get_in_addr(nla
);
288 ip
->sa
.sa_family
= AF_INET
;
291 return -EAFNOSUPPORT
;
295 static int vxlan_nla_put_addr(struct sk_buff
*skb
, int attr
,
296 const union vxlan_addr
*ip
)
298 if (ip
->sa
.sa_family
== AF_INET6
)
299 return nla_put_in6_addr(skb
, attr
, &ip
->sin6
.sin6_addr
);
301 return nla_put_in_addr(skb
, attr
, ip
->sin
.sin_addr
.s_addr
);
304 #else /* !CONFIG_IPV6 */
307 bool vxlan_addr_equal(const union vxlan_addr
*a
, const union vxlan_addr
*b
)
309 return a
->sin
.sin_addr
.s_addr
== b
->sin
.sin_addr
.s_addr
;
312 static inline bool vxlan_addr_any(const union vxlan_addr
*ipa
)
314 return ipa
->sin
.sin_addr
.s_addr
== htonl(INADDR_ANY
);
317 static inline bool vxlan_addr_multicast(const union vxlan_addr
*ipa
)
319 return IN_MULTICAST(ntohl(ipa
->sin
.sin_addr
.s_addr
));
322 static int vxlan_nla_get_addr(union vxlan_addr
*ip
, struct nlattr
*nla
)
324 if (nla_len(nla
) >= sizeof(struct in6_addr
)) {
325 return -EAFNOSUPPORT
;
326 } else if (nla_len(nla
) >= sizeof(__be32
)) {
327 ip
->sin
.sin_addr
.s_addr
= nla_get_in_addr(nla
);
328 ip
->sa
.sa_family
= AF_INET
;
331 return -EAFNOSUPPORT
;
335 static int vxlan_nla_put_addr(struct sk_buff
*skb
, int attr
,
336 const union vxlan_addr
*ip
)
338 return nla_put_in_addr(skb
, attr
, ip
->sin
.sin_addr
.s_addr
);
342 /* Virtual Network hash table head */
343 static inline struct hlist_head
*vni_head(struct vxlan_sock
*vs
, __be32 vni
)
345 return &vs
->vni_list
[hash_32((__force u32
)vni
, VNI_HASH_BITS
)];
348 /* Socket hash table head */
349 static inline struct hlist_head
*vs_head(struct net
*net
, __be16 port
)
351 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
353 return &vn
->sock_list
[hash_32(ntohs(port
), PORT_HASH_BITS
)];
356 /* First remote destination for a forwarding entry.
357 * Guaranteed to be non-NULL because remotes are never deleted.
359 static inline struct vxlan_rdst
*first_remote_rcu(struct vxlan_fdb
*fdb
)
361 return list_entry_rcu(fdb
->remotes
.next
, struct vxlan_rdst
, list
);
364 static inline struct vxlan_rdst
*first_remote_rtnl(struct vxlan_fdb
*fdb
)
366 return list_first_entry(&fdb
->remotes
, struct vxlan_rdst
, list
);
369 /* Find VXLAN socket based on network namespace, address family and UDP port
370 * and enabled unshareable flags.
372 static struct vxlan_sock
*vxlan_find_sock(struct net
*net
, sa_family_t family
,
373 __be16 port
, u32 flags
)
375 struct vxlan_sock
*vs
;
377 flags
&= VXLAN_F_RCV_FLAGS
;
379 hlist_for_each_entry_rcu(vs
, vs_head(net
, port
), hlist
) {
380 if (inet_sk(vs
->sock
->sk
)->inet_sport
== port
&&
381 vxlan_get_sk_family(vs
) == family
&&
388 static struct vxlan_dev
*vxlan_vs_find_vni(struct vxlan_sock
*vs
, __be32 vni
)
390 struct vxlan_dev
*vxlan
;
392 /* For flow based devices, map all packets to VNI 0 */
393 if (vs
->flags
& VXLAN_F_COLLECT_METADATA
)
396 hlist_for_each_entry_rcu(vxlan
, vni_head(vs
, vni
), hlist
) {
397 if (vxlan
->default_dst
.remote_vni
== vni
)
404 /* Look up VNI in a per net namespace table */
405 static struct vxlan_dev
*vxlan_find_vni(struct net
*net
, __be32 vni
,
406 sa_family_t family
, __be16 port
,
409 struct vxlan_sock
*vs
;
411 vs
= vxlan_find_sock(net
, family
, port
, flags
);
415 return vxlan_vs_find_vni(vs
, vni
);
418 /* Fill in neighbour message in skbuff. */
419 static int vxlan_fdb_info(struct sk_buff
*skb
, struct vxlan_dev
*vxlan
,
420 const struct vxlan_fdb
*fdb
,
421 u32 portid
, u32 seq
, int type
, unsigned int flags
,
422 const struct vxlan_rdst
*rdst
)
424 unsigned long now
= jiffies
;
425 struct nda_cacheinfo ci
;
426 struct nlmsghdr
*nlh
;
428 bool send_ip
, send_eth
;
430 nlh
= nlmsg_put(skb
, portid
, seq
, type
, sizeof(*ndm
), flags
);
434 ndm
= nlmsg_data(nlh
);
435 memset(ndm
, 0, sizeof(*ndm
));
437 send_eth
= send_ip
= true;
439 if (type
== RTM_GETNEIGH
) {
440 ndm
->ndm_family
= AF_INET
;
441 send_ip
= !vxlan_addr_any(&rdst
->remote_ip
);
442 send_eth
= !is_zero_ether_addr(fdb
->eth_addr
);
444 ndm
->ndm_family
= AF_BRIDGE
;
445 ndm
->ndm_state
= fdb
->state
;
446 ndm
->ndm_ifindex
= vxlan
->dev
->ifindex
;
447 ndm
->ndm_flags
= fdb
->flags
;
448 ndm
->ndm_type
= RTN_UNICAST
;
450 if (!net_eq(dev_net(vxlan
->dev
), vxlan
->net
) &&
451 nla_put_s32(skb
, NDA_LINK_NETNSID
,
452 peernet2id(dev_net(vxlan
->dev
), vxlan
->net
)))
453 goto nla_put_failure
;
455 if (send_eth
&& nla_put(skb
, NDA_LLADDR
, ETH_ALEN
, &fdb
->eth_addr
))
456 goto nla_put_failure
;
458 if (send_ip
&& vxlan_nla_put_addr(skb
, NDA_DST
, &rdst
->remote_ip
))
459 goto nla_put_failure
;
461 if (rdst
->remote_port
&& rdst
->remote_port
!= vxlan
->cfg
.dst_port
&&
462 nla_put_be16(skb
, NDA_PORT
, rdst
->remote_port
))
463 goto nla_put_failure
;
464 if (rdst
->remote_vni
!= vxlan
->default_dst
.remote_vni
&&
465 nla_put_u32(skb
, NDA_VNI
, be32_to_cpu(rdst
->remote_vni
)))
466 goto nla_put_failure
;
467 if (rdst
->remote_ifindex
&&
468 nla_put_u32(skb
, NDA_IFINDEX
, rdst
->remote_ifindex
))
469 goto nla_put_failure
;
471 ci
.ndm_used
= jiffies_to_clock_t(now
- fdb
->used
);
472 ci
.ndm_confirmed
= 0;
473 ci
.ndm_updated
= jiffies_to_clock_t(now
- fdb
->updated
);
476 if (nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
477 goto nla_put_failure
;
483 nlmsg_cancel(skb
, nlh
);
487 static inline size_t vxlan_nlmsg_size(void)
489 return NLMSG_ALIGN(sizeof(struct ndmsg
))
490 + nla_total_size(ETH_ALEN
) /* NDA_LLADDR */
491 + nla_total_size(sizeof(struct in6_addr
)) /* NDA_DST */
492 + nla_total_size(sizeof(__be16
)) /* NDA_PORT */
493 + nla_total_size(sizeof(__be32
)) /* NDA_VNI */
494 + nla_total_size(sizeof(__u32
)) /* NDA_IFINDEX */
495 + nla_total_size(sizeof(__s32
)) /* NDA_LINK_NETNSID */
496 + nla_total_size(sizeof(struct nda_cacheinfo
));
499 static void vxlan_fdb_notify(struct vxlan_dev
*vxlan
, struct vxlan_fdb
*fdb
,
500 struct vxlan_rdst
*rd
, int type
)
502 struct net
*net
= dev_net(vxlan
->dev
);
506 skb
= nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC
);
510 err
= vxlan_fdb_info(skb
, vxlan
, fdb
, 0, 0, type
, 0, rd
);
512 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
513 WARN_ON(err
== -EMSGSIZE
);
518 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
522 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
525 static void vxlan_ip_miss(struct net_device
*dev
, union vxlan_addr
*ipa
)
527 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
528 struct vxlan_fdb f
= {
531 struct vxlan_rdst remote
= {
532 .remote_ip
= *ipa
, /* goes to NDA_DST */
533 .remote_vni
= cpu_to_be32(VXLAN_N_VID
),
536 vxlan_fdb_notify(vxlan
, &f
, &remote
, RTM_GETNEIGH
);
539 static void vxlan_fdb_miss(struct vxlan_dev
*vxlan
, const u8 eth_addr
[ETH_ALEN
])
541 struct vxlan_fdb f
= {
544 struct vxlan_rdst remote
= { };
546 memcpy(f
.eth_addr
, eth_addr
, ETH_ALEN
);
548 vxlan_fdb_notify(vxlan
, &f
, &remote
, RTM_GETNEIGH
);
551 /* Hash Ethernet address */
552 static u32
eth_hash(const unsigned char *addr
)
554 u64 value
= get_unaligned((u64
*)addr
);
556 /* only want 6 bytes */
562 return hash_64(value
, FDB_HASH_BITS
);
565 /* Hash chain to use given mac address */
566 static inline struct hlist_head
*vxlan_fdb_head(struct vxlan_dev
*vxlan
,
569 return &vxlan
->fdb_head
[eth_hash(mac
)];
572 /* Look up Ethernet address in forwarding table */
573 static struct vxlan_fdb
*__vxlan_find_mac(struct vxlan_dev
*vxlan
,
576 struct hlist_head
*head
= vxlan_fdb_head(vxlan
, mac
);
579 hlist_for_each_entry_rcu(f
, head
, hlist
) {
580 if (ether_addr_equal(mac
, f
->eth_addr
))
587 static struct vxlan_fdb
*vxlan_find_mac(struct vxlan_dev
*vxlan
,
592 f
= __vxlan_find_mac(vxlan
, mac
);
599 /* caller should hold vxlan->hash_lock */
600 static struct vxlan_rdst
*vxlan_fdb_find_rdst(struct vxlan_fdb
*f
,
601 union vxlan_addr
*ip
, __be16 port
,
602 __be32 vni
, __u32 ifindex
)
604 struct vxlan_rdst
*rd
;
606 list_for_each_entry(rd
, &f
->remotes
, list
) {
607 if (vxlan_addr_equal(&rd
->remote_ip
, ip
) &&
608 rd
->remote_port
== port
&&
609 rd
->remote_vni
== vni
&&
610 rd
->remote_ifindex
== ifindex
)
617 /* Replace destination of unicast mac */
618 static int vxlan_fdb_replace(struct vxlan_fdb
*f
,
619 union vxlan_addr
*ip
, __be16 port
, __be32 vni
,
622 struct vxlan_rdst
*rd
;
624 rd
= vxlan_fdb_find_rdst(f
, ip
, port
, vni
, ifindex
);
628 rd
= list_first_entry_or_null(&f
->remotes
, struct vxlan_rdst
, list
);
632 dst_cache_reset(&rd
->dst_cache
);
634 rd
->remote_port
= port
;
635 rd
->remote_vni
= vni
;
636 rd
->remote_ifindex
= ifindex
;
640 /* Add/update destinations for multicast */
641 static int vxlan_fdb_append(struct vxlan_fdb
*f
,
642 union vxlan_addr
*ip
, __be16 port
, __be32 vni
,
643 __u32 ifindex
, struct vxlan_rdst
**rdp
)
645 struct vxlan_rdst
*rd
;
647 rd
= vxlan_fdb_find_rdst(f
, ip
, port
, vni
, ifindex
);
651 rd
= kmalloc(sizeof(*rd
), GFP_ATOMIC
);
655 if (dst_cache_init(&rd
->dst_cache
, GFP_ATOMIC
)) {
661 rd
->remote_port
= port
;
662 rd
->remote_vni
= vni
;
663 rd
->remote_ifindex
= ifindex
;
665 list_add_tail_rcu(&rd
->list
, &f
->remotes
);
671 static struct vxlanhdr
*vxlan_gro_remcsum(struct sk_buff
*skb
,
673 struct vxlanhdr
*vh
, size_t hdrlen
,
675 struct gro_remcsum
*grc
,
678 size_t start
, offset
;
680 if (skb
->remcsum_offload
)
683 if (!NAPI_GRO_CB(skb
)->csum_valid
)
686 start
= vxlan_rco_start(vni_field
);
687 offset
= start
+ vxlan_rco_offset(vni_field
);
689 vh
= skb_gro_remcsum_process(skb
, (void *)vh
, off
, hdrlen
,
690 start
, offset
, grc
, nopartial
);
692 skb
->remcsum_offload
= 1;
697 static struct sk_buff
**vxlan_gro_receive(struct sock
*sk
,
698 struct sk_buff
**head
,
701 struct sk_buff
*p
, **pp
= NULL
;
702 struct vxlanhdr
*vh
, *vh2
;
703 unsigned int hlen
, off_vx
;
705 struct vxlan_sock
*vs
= rcu_dereference_sk_user_data(sk
);
707 struct gro_remcsum grc
;
709 skb_gro_remcsum_init(&grc
);
711 off_vx
= skb_gro_offset(skb
);
712 hlen
= off_vx
+ sizeof(*vh
);
713 vh
= skb_gro_header_fast(skb
, off_vx
);
714 if (skb_gro_header_hard(skb
, hlen
)) {
715 vh
= skb_gro_header_slow(skb
, hlen
, off_vx
);
720 skb_gro_postpull_rcsum(skb
, vh
, sizeof(struct vxlanhdr
));
722 flags
= vh
->vx_flags
;
724 if ((flags
& VXLAN_HF_RCO
) && (vs
->flags
& VXLAN_F_REMCSUM_RX
)) {
725 vh
= vxlan_gro_remcsum(skb
, off_vx
, vh
, sizeof(struct vxlanhdr
),
728 VXLAN_F_REMCSUM_NOPARTIAL
));
734 skb_gro_pull(skb
, sizeof(struct vxlanhdr
)); /* pull vxlan header */
736 for (p
= *head
; p
; p
= p
->next
) {
737 if (!NAPI_GRO_CB(p
)->same_flow
)
740 vh2
= (struct vxlanhdr
*)(p
->data
+ off_vx
);
741 if (vh
->vx_flags
!= vh2
->vx_flags
||
742 vh
->vx_vni
!= vh2
->vx_vni
) {
743 NAPI_GRO_CB(p
)->same_flow
= 0;
748 pp
= call_gro_receive(eth_gro_receive
, head
, skb
);
752 skb_gro_remcsum_cleanup(skb
, &grc
);
753 NAPI_GRO_CB(skb
)->flush
|= flush
;
758 static int vxlan_gro_complete(struct sock
*sk
, struct sk_buff
*skb
, int nhoff
)
760 /* Sets 'skb->inner_mac_header' since we are always called with
761 * 'skb->encapsulation' set.
763 return eth_gro_complete(skb
, nhoff
+ sizeof(struct vxlanhdr
));
766 /* Add new entry to forwarding table -- assumes lock held */
767 static int vxlan_fdb_create(struct vxlan_dev
*vxlan
,
768 const u8
*mac
, union vxlan_addr
*ip
,
769 __u16 state
, __u16 flags
,
770 __be16 port
, __be32 vni
, __u32 ifindex
,
773 struct vxlan_rdst
*rd
= NULL
;
778 f
= __vxlan_find_mac(vxlan
, mac
);
780 if (flags
& NLM_F_EXCL
) {
781 netdev_dbg(vxlan
->dev
,
782 "lost race to create %pM\n", mac
);
785 if (f
->state
!= state
) {
787 f
->updated
= jiffies
;
790 if (f
->flags
!= ndm_flags
) {
791 f
->flags
= ndm_flags
;
792 f
->updated
= jiffies
;
795 if ((flags
& NLM_F_REPLACE
)) {
796 /* Only change unicasts */
797 if (!(is_multicast_ether_addr(f
->eth_addr
) ||
798 is_zero_ether_addr(f
->eth_addr
))) {
799 notify
|= vxlan_fdb_replace(f
, ip
, port
, vni
,
804 if ((flags
& NLM_F_APPEND
) &&
805 (is_multicast_ether_addr(f
->eth_addr
) ||
806 is_zero_ether_addr(f
->eth_addr
))) {
807 rc
= vxlan_fdb_append(f
, ip
, port
, vni
, ifindex
, &rd
);
814 if (!(flags
& NLM_F_CREATE
))
817 if (vxlan
->cfg
.addrmax
&&
818 vxlan
->addrcnt
>= vxlan
->cfg
.addrmax
)
821 /* Disallow replace to add a multicast entry */
822 if ((flags
& NLM_F_REPLACE
) &&
823 (is_multicast_ether_addr(mac
) || is_zero_ether_addr(mac
)))
826 netdev_dbg(vxlan
->dev
, "add %pM -> %pIS\n", mac
, ip
);
827 f
= kmalloc(sizeof(*f
), GFP_ATOMIC
);
833 f
->flags
= ndm_flags
;
834 f
->updated
= f
->used
= jiffies
;
835 INIT_LIST_HEAD(&f
->remotes
);
836 memcpy(f
->eth_addr
, mac
, ETH_ALEN
);
838 rc
= vxlan_fdb_append(f
, ip
, port
, vni
, ifindex
, &rd
);
845 hlist_add_head_rcu(&f
->hlist
,
846 vxlan_fdb_head(vxlan
, mac
));
851 rd
= first_remote_rtnl(f
);
852 vxlan_fdb_notify(vxlan
, f
, rd
, RTM_NEWNEIGH
);
858 static void vxlan_fdb_free(struct rcu_head
*head
)
860 struct vxlan_fdb
*f
= container_of(head
, struct vxlan_fdb
, rcu
);
861 struct vxlan_rdst
*rd
, *nd
;
863 list_for_each_entry_safe(rd
, nd
, &f
->remotes
, list
) {
864 dst_cache_destroy(&rd
->dst_cache
);
870 static void vxlan_fdb_destroy(struct vxlan_dev
*vxlan
, struct vxlan_fdb
*f
)
872 netdev_dbg(vxlan
->dev
,
873 "delete %pM\n", f
->eth_addr
);
876 vxlan_fdb_notify(vxlan
, f
, first_remote_rtnl(f
), RTM_DELNEIGH
);
878 hlist_del_rcu(&f
->hlist
);
879 call_rcu(&f
->rcu
, vxlan_fdb_free
);
882 static int vxlan_fdb_parse(struct nlattr
*tb
[], struct vxlan_dev
*vxlan
,
883 union vxlan_addr
*ip
, __be16
*port
, __be32
*vni
,
886 struct net
*net
= dev_net(vxlan
->dev
);
890 err
= vxlan_nla_get_addr(ip
, tb
[NDA_DST
]);
894 union vxlan_addr
*remote
= &vxlan
->default_dst
.remote_ip
;
895 if (remote
->sa
.sa_family
== AF_INET
) {
896 ip
->sin
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
897 ip
->sa
.sa_family
= AF_INET
;
898 #if IS_ENABLED(CONFIG_IPV6)
900 ip
->sin6
.sin6_addr
= in6addr_any
;
901 ip
->sa
.sa_family
= AF_INET6
;
907 if (nla_len(tb
[NDA_PORT
]) != sizeof(__be16
))
909 *port
= nla_get_be16(tb
[NDA_PORT
]);
911 *port
= vxlan
->cfg
.dst_port
;
915 if (nla_len(tb
[NDA_VNI
]) != sizeof(u32
))
917 *vni
= cpu_to_be32(nla_get_u32(tb
[NDA_VNI
]));
919 *vni
= vxlan
->default_dst
.remote_vni
;
922 if (tb
[NDA_IFINDEX
]) {
923 struct net_device
*tdev
;
925 if (nla_len(tb
[NDA_IFINDEX
]) != sizeof(u32
))
927 *ifindex
= nla_get_u32(tb
[NDA_IFINDEX
]);
928 tdev
= __dev_get_by_index(net
, *ifindex
);
930 return -EADDRNOTAVAIL
;
938 /* Add static entry (via netlink) */
939 static int vxlan_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
940 struct net_device
*dev
,
941 const unsigned char *addr
, u16 vid
, u16 flags
)
943 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
944 /* struct net *net = dev_net(vxlan->dev); */
951 if (!(ndm
->ndm_state
& (NUD_PERMANENT
|NUD_REACHABLE
))) {
952 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
957 if (tb
[NDA_DST
] == NULL
)
960 err
= vxlan_fdb_parse(tb
, vxlan
, &ip
, &port
, &vni
, &ifindex
);
964 if (vxlan
->default_dst
.remote_ip
.sa
.sa_family
!= ip
.sa
.sa_family
)
965 return -EAFNOSUPPORT
;
967 spin_lock_bh(&vxlan
->hash_lock
);
968 err
= vxlan_fdb_create(vxlan
, addr
, &ip
, ndm
->ndm_state
, flags
,
969 port
, vni
, ifindex
, ndm
->ndm_flags
);
970 spin_unlock_bh(&vxlan
->hash_lock
);
975 /* Delete entry (via netlink) */
976 static int vxlan_fdb_delete(struct ndmsg
*ndm
, struct nlattr
*tb
[],
977 struct net_device
*dev
,
978 const unsigned char *addr
, u16 vid
)
980 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
982 struct vxlan_rdst
*rd
= NULL
;
989 err
= vxlan_fdb_parse(tb
, vxlan
, &ip
, &port
, &vni
, &ifindex
);
995 spin_lock_bh(&vxlan
->hash_lock
);
996 f
= vxlan_find_mac(vxlan
, addr
);
1000 if (!vxlan_addr_any(&ip
)) {
1001 rd
= vxlan_fdb_find_rdst(f
, &ip
, port
, vni
, ifindex
);
1008 /* remove a destination if it's not the only one on the list,
1009 * otherwise destroy the fdb entry
1011 if (rd
&& !list_is_singular(&f
->remotes
)) {
1012 list_del_rcu(&rd
->list
);
1013 vxlan_fdb_notify(vxlan
, f
, rd
, RTM_DELNEIGH
);
1018 vxlan_fdb_destroy(vxlan
, f
);
1021 spin_unlock_bh(&vxlan
->hash_lock
);
1026 /* Dump forwarding table */
1027 static int vxlan_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
1028 struct net_device
*dev
,
1029 struct net_device
*filter_dev
, int *idx
)
1031 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1035 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
1036 struct vxlan_fdb
*f
;
1038 hlist_for_each_entry_rcu(f
, &vxlan
->fdb_head
[h
], hlist
) {
1039 struct vxlan_rdst
*rd
;
1041 list_for_each_entry_rcu(rd
, &f
->remotes
, list
) {
1042 if (*idx
< cb
->args
[2])
1045 err
= vxlan_fdb_info(skb
, vxlan
, f
,
1046 NETLINK_CB(cb
->skb
).portid
,
1061 /* Watch incoming packets to learn mapping between Ethernet address
1062 * and Tunnel endpoint.
1063 * Return true if packet is bogus and should be dropped.
1065 static bool vxlan_snoop(struct net_device
*dev
,
1066 union vxlan_addr
*src_ip
, const u8
*src_mac
)
1068 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1069 struct vxlan_fdb
*f
;
1071 f
= vxlan_find_mac(vxlan
, src_mac
);
1073 struct vxlan_rdst
*rdst
= first_remote_rcu(f
);
1075 if (likely(vxlan_addr_equal(&rdst
->remote_ip
, src_ip
)))
1078 /* Don't migrate static entries, drop packets */
1079 if (f
->state
& NUD_NOARP
)
1082 if (net_ratelimit())
1084 "%pM migrated from %pIS to %pIS\n",
1085 src_mac
, &rdst
->remote_ip
.sa
, &src_ip
->sa
);
1087 rdst
->remote_ip
= *src_ip
;
1088 f
->updated
= jiffies
;
1089 vxlan_fdb_notify(vxlan
, f
, rdst
, RTM_NEWNEIGH
);
1091 /* learned new entry */
1092 spin_lock(&vxlan
->hash_lock
);
1094 /* close off race between vxlan_flush and incoming packets */
1095 if (netif_running(dev
))
1096 vxlan_fdb_create(vxlan
, src_mac
, src_ip
,
1098 NLM_F_EXCL
|NLM_F_CREATE
,
1099 vxlan
->cfg
.dst_port
,
1100 vxlan
->default_dst
.remote_vni
,
1102 spin_unlock(&vxlan
->hash_lock
);
1108 /* See if multicast group is already in use by other ID */
1109 static bool vxlan_group_used(struct vxlan_net
*vn
, struct vxlan_dev
*dev
)
1111 struct vxlan_dev
*vxlan
;
1112 struct vxlan_sock
*sock4
;
1113 #if IS_ENABLED(CONFIG_IPV6)
1114 struct vxlan_sock
*sock6
;
1116 unsigned short family
= dev
->default_dst
.remote_ip
.sa
.sa_family
;
1118 sock4
= rtnl_dereference(dev
->vn4_sock
);
1120 /* The vxlan_sock is only used by dev, leaving group has
1121 * no effect on other vxlan devices.
1123 if (family
== AF_INET
&& sock4
&& atomic_read(&sock4
->refcnt
) == 1)
1125 #if IS_ENABLED(CONFIG_IPV6)
1126 sock6
= rtnl_dereference(dev
->vn6_sock
);
1127 if (family
== AF_INET6
&& sock6
&& atomic_read(&sock6
->refcnt
) == 1)
1131 list_for_each_entry(vxlan
, &vn
->vxlan_list
, next
) {
1132 if (!netif_running(vxlan
->dev
) || vxlan
== dev
)
1135 if (family
== AF_INET
&&
1136 rtnl_dereference(vxlan
->vn4_sock
) != sock4
)
1138 #if IS_ENABLED(CONFIG_IPV6)
1139 if (family
== AF_INET6
&&
1140 rtnl_dereference(vxlan
->vn6_sock
) != sock6
)
1144 if (!vxlan_addr_equal(&vxlan
->default_dst
.remote_ip
,
1145 &dev
->default_dst
.remote_ip
))
1148 if (vxlan
->default_dst
.remote_ifindex
!=
1149 dev
->default_dst
.remote_ifindex
)
1158 static bool __vxlan_sock_release_prep(struct vxlan_sock
*vs
)
1160 struct vxlan_net
*vn
;
1164 if (!atomic_dec_and_test(&vs
->refcnt
))
1167 vn
= net_generic(sock_net(vs
->sock
->sk
), vxlan_net_id
);
1168 spin_lock(&vn
->sock_lock
);
1169 hlist_del_rcu(&vs
->hlist
);
1170 udp_tunnel_notify_del_rx_port(vs
->sock
,
1171 (vs
->flags
& VXLAN_F_GPE
) ?
1172 UDP_TUNNEL_TYPE_VXLAN_GPE
:
1173 UDP_TUNNEL_TYPE_VXLAN
);
1174 spin_unlock(&vn
->sock_lock
);
1179 static void vxlan_sock_release(struct vxlan_dev
*vxlan
)
1181 struct vxlan_sock
*sock4
= rtnl_dereference(vxlan
->vn4_sock
);
1182 #if IS_ENABLED(CONFIG_IPV6)
1183 struct vxlan_sock
*sock6
= rtnl_dereference(vxlan
->vn6_sock
);
1185 rcu_assign_pointer(vxlan
->vn6_sock
, NULL
);
1188 rcu_assign_pointer(vxlan
->vn4_sock
, NULL
);
1191 if (__vxlan_sock_release_prep(sock4
)) {
1192 udp_tunnel_sock_release(sock4
->sock
);
1196 #if IS_ENABLED(CONFIG_IPV6)
1197 if (__vxlan_sock_release_prep(sock6
)) {
1198 udp_tunnel_sock_release(sock6
->sock
);
1204 /* Update multicast group membership when first VNI on
1205 * multicast address is brought up
1207 static int vxlan_igmp_join(struct vxlan_dev
*vxlan
)
1210 union vxlan_addr
*ip
= &vxlan
->default_dst
.remote_ip
;
1211 int ifindex
= vxlan
->default_dst
.remote_ifindex
;
1214 if (ip
->sa
.sa_family
== AF_INET
) {
1215 struct vxlan_sock
*sock4
= rtnl_dereference(vxlan
->vn4_sock
);
1216 struct ip_mreqn mreq
= {
1217 .imr_multiaddr
.s_addr
= ip
->sin
.sin_addr
.s_addr
,
1218 .imr_ifindex
= ifindex
,
1221 sk
= sock4
->sock
->sk
;
1223 ret
= ip_mc_join_group(sk
, &mreq
);
1225 #if IS_ENABLED(CONFIG_IPV6)
1227 struct vxlan_sock
*sock6
= rtnl_dereference(vxlan
->vn6_sock
);
1229 sk
= sock6
->sock
->sk
;
1231 ret
= ipv6_stub
->ipv6_sock_mc_join(sk
, ifindex
,
1232 &ip
->sin6
.sin6_addr
);
1240 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1241 static int vxlan_igmp_leave(struct vxlan_dev
*vxlan
)
1244 union vxlan_addr
*ip
= &vxlan
->default_dst
.remote_ip
;
1245 int ifindex
= vxlan
->default_dst
.remote_ifindex
;
1248 if (ip
->sa
.sa_family
== AF_INET
) {
1249 struct vxlan_sock
*sock4
= rtnl_dereference(vxlan
->vn4_sock
);
1250 struct ip_mreqn mreq
= {
1251 .imr_multiaddr
.s_addr
= ip
->sin
.sin_addr
.s_addr
,
1252 .imr_ifindex
= ifindex
,
1255 sk
= sock4
->sock
->sk
;
1257 ret
= ip_mc_leave_group(sk
, &mreq
);
1259 #if IS_ENABLED(CONFIG_IPV6)
1261 struct vxlan_sock
*sock6
= rtnl_dereference(vxlan
->vn6_sock
);
1263 sk
= sock6
->sock
->sk
;
1265 ret
= ipv6_stub
->ipv6_sock_mc_drop(sk
, ifindex
,
1266 &ip
->sin6
.sin6_addr
);
1274 static bool vxlan_remcsum(struct vxlanhdr
*unparsed
,
1275 struct sk_buff
*skb
, u32 vxflags
)
1277 size_t start
, offset
;
1279 if (!(unparsed
->vx_flags
& VXLAN_HF_RCO
) || skb
->remcsum_offload
)
1282 start
= vxlan_rco_start(unparsed
->vx_vni
);
1283 offset
= start
+ vxlan_rco_offset(unparsed
->vx_vni
);
1285 if (!pskb_may_pull(skb
, offset
+ sizeof(u16
)))
1288 skb_remcsum_process(skb
, (void *)(vxlan_hdr(skb
) + 1), start
, offset
,
1289 !!(vxflags
& VXLAN_F_REMCSUM_NOPARTIAL
));
1291 unparsed
->vx_flags
&= ~VXLAN_HF_RCO
;
1292 unparsed
->vx_vni
&= VXLAN_VNI_MASK
;
1296 static void vxlan_parse_gbp_hdr(struct vxlanhdr
*unparsed
,
1297 struct sk_buff
*skb
, u32 vxflags
,
1298 struct vxlan_metadata
*md
)
1300 struct vxlanhdr_gbp
*gbp
= (struct vxlanhdr_gbp
*)unparsed
;
1301 struct metadata_dst
*tun_dst
;
1303 if (!(unparsed
->vx_flags
& VXLAN_HF_GBP
))
1306 md
->gbp
= ntohs(gbp
->policy_id
);
1308 tun_dst
= (struct metadata_dst
*)skb_dst(skb
);
1310 tun_dst
->u
.tun_info
.key
.tun_flags
|= TUNNEL_VXLAN_OPT
;
1311 tun_dst
->u
.tun_info
.options_len
= sizeof(*md
);
1313 if (gbp
->dont_learn
)
1314 md
->gbp
|= VXLAN_GBP_DONT_LEARN
;
1316 if (gbp
->policy_applied
)
1317 md
->gbp
|= VXLAN_GBP_POLICY_APPLIED
;
1319 /* In flow-based mode, GBP is carried in dst_metadata */
1320 if (!(vxflags
& VXLAN_F_COLLECT_METADATA
))
1321 skb
->mark
= md
->gbp
;
1323 unparsed
->vx_flags
&= ~VXLAN_GBP_USED_BITS
;
1326 static bool vxlan_parse_gpe_hdr(struct vxlanhdr
*unparsed
,
1328 struct sk_buff
*skb
, u32 vxflags
)
1330 struct vxlanhdr_gpe
*gpe
= (struct vxlanhdr_gpe
*)unparsed
;
1332 /* Need to have Next Protocol set for interfaces in GPE mode. */
1333 if (!gpe
->np_applied
)
1335 /* "The initial version is 0. If a receiver does not support the
1336 * version indicated it MUST drop the packet.
1338 if (gpe
->version
!= 0)
1340 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1341 * processing MUST occur." However, we don't implement OAM
1342 * processing, thus drop the packet.
1347 switch (gpe
->next_protocol
) {
1348 case VXLAN_GPE_NP_IPV4
:
1349 *protocol
= htons(ETH_P_IP
);
1351 case VXLAN_GPE_NP_IPV6
:
1352 *protocol
= htons(ETH_P_IPV6
);
1354 case VXLAN_GPE_NP_ETHERNET
:
1355 *protocol
= htons(ETH_P_TEB
);
1361 unparsed
->vx_flags
&= ~VXLAN_GPE_USED_BITS
;
1365 static bool vxlan_set_mac(struct vxlan_dev
*vxlan
,
1366 struct vxlan_sock
*vs
,
1367 struct sk_buff
*skb
)
1369 union vxlan_addr saddr
;
1371 skb_reset_mac_header(skb
);
1372 skb
->protocol
= eth_type_trans(skb
, vxlan
->dev
);
1373 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1375 /* Ignore packet loops (and multicast echo) */
1376 if (ether_addr_equal(eth_hdr(skb
)->h_source
, vxlan
->dev
->dev_addr
))
1379 /* Get address from the outer IP header */
1380 if (vxlan_get_sk_family(vs
) == AF_INET
) {
1381 saddr
.sin
.sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
1382 saddr
.sa
.sa_family
= AF_INET
;
1383 #if IS_ENABLED(CONFIG_IPV6)
1385 saddr
.sin6
.sin6_addr
= ipv6_hdr(skb
)->saddr
;
1386 saddr
.sa
.sa_family
= AF_INET6
;
1390 if ((vxlan
->flags
& VXLAN_F_LEARN
) &&
1391 vxlan_snoop(skb
->dev
, &saddr
, eth_hdr(skb
)->h_source
))
1397 static bool vxlan_ecn_decapsulate(struct vxlan_sock
*vs
, void *oiph
,
1398 struct sk_buff
*skb
)
1402 if (vxlan_get_sk_family(vs
) == AF_INET
)
1403 err
= IP_ECN_decapsulate(oiph
, skb
);
1404 #if IS_ENABLED(CONFIG_IPV6)
1406 err
= IP6_ECN_decapsulate(oiph
, skb
);
1409 if (unlikely(err
) && log_ecn_error
) {
1410 if (vxlan_get_sk_family(vs
) == AF_INET
)
1411 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1412 &((struct iphdr
*)oiph
)->saddr
,
1413 ((struct iphdr
*)oiph
)->tos
);
1415 net_info_ratelimited("non-ECT from %pI6\n",
1416 &((struct ipv6hdr
*)oiph
)->saddr
);
1421 /* Callback from net/ipv4/udp.c to receive packets */
1422 static int vxlan_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1424 struct pcpu_sw_netstats
*stats
;
1425 struct vxlan_dev
*vxlan
;
1426 struct vxlan_sock
*vs
;
1427 struct vxlanhdr unparsed
;
1428 struct vxlan_metadata _md
;
1429 struct vxlan_metadata
*md
= &_md
;
1430 __be16 protocol
= htons(ETH_P_TEB
);
1431 bool raw_proto
= false;
1434 /* Need UDP and VXLAN header to be present */
1435 if (!pskb_may_pull(skb
, VXLAN_HLEN
))
1438 unparsed
= *vxlan_hdr(skb
);
1439 /* VNI flag always required to be set */
1440 if (!(unparsed
.vx_flags
& VXLAN_HF_VNI
)) {
1441 netdev_dbg(skb
->dev
, "invalid vxlan flags=%#x vni=%#x\n",
1442 ntohl(vxlan_hdr(skb
)->vx_flags
),
1443 ntohl(vxlan_hdr(skb
)->vx_vni
));
1444 /* Return non vxlan pkt */
1447 unparsed
.vx_flags
&= ~VXLAN_HF_VNI
;
1448 unparsed
.vx_vni
&= ~VXLAN_VNI_MASK
;
1450 vs
= rcu_dereference_sk_user_data(sk
);
1454 vxlan
= vxlan_vs_find_vni(vs
, vxlan_vni(vxlan_hdr(skb
)->vx_vni
));
1458 /* For backwards compatibility, only allow reserved fields to be
1459 * used by VXLAN extensions if explicitly requested.
1461 if (vs
->flags
& VXLAN_F_GPE
) {
1462 if (!vxlan_parse_gpe_hdr(&unparsed
, &protocol
, skb
, vs
->flags
))
1467 if (__iptunnel_pull_header(skb
, VXLAN_HLEN
, protocol
, raw_proto
,
1468 !net_eq(vxlan
->net
, dev_net(vxlan
->dev
))))
1471 if (vxlan_collect_metadata(vs
)) {
1472 __be32 vni
= vxlan_vni(vxlan_hdr(skb
)->vx_vni
);
1473 struct metadata_dst
*tun_dst
;
1475 tun_dst
= udp_tun_rx_dst(skb
, vxlan_get_sk_family(vs
), TUNNEL_KEY
,
1476 key32_to_tunnel_id(vni
), sizeof(*md
));
1481 md
= ip_tunnel_info_opts(&tun_dst
->u
.tun_info
);
1483 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
1485 memset(md
, 0, sizeof(*md
));
1488 if (vs
->flags
& VXLAN_F_REMCSUM_RX
)
1489 if (!vxlan_remcsum(&unparsed
, skb
, vs
->flags
))
1491 if (vs
->flags
& VXLAN_F_GBP
)
1492 vxlan_parse_gbp_hdr(&unparsed
, skb
, vs
->flags
, md
);
1493 /* Note that GBP and GPE can never be active together. This is
1494 * ensured in vxlan_dev_configure.
1497 if (unparsed
.vx_flags
|| unparsed
.vx_vni
) {
1498 /* If there are any unprocessed flags remaining treat
1499 * this as a malformed packet. This behavior diverges from
1500 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1501 * in reserved fields are to be ignored. The approach here
1502 * maintains compatibility with previous stack code, and also
1503 * is more robust and provides a little more security in
1504 * adding extensions to VXLAN.
1510 if (!vxlan_set_mac(vxlan
, vs
, skb
))
1513 skb_reset_mac_header(skb
);
1514 skb
->dev
= vxlan
->dev
;
1515 skb
->pkt_type
= PACKET_HOST
;
1518 oiph
= skb_network_header(skb
);
1519 skb_reset_network_header(skb
);
1521 if (!vxlan_ecn_decapsulate(vs
, oiph
, skb
)) {
1522 ++vxlan
->dev
->stats
.rx_frame_errors
;
1523 ++vxlan
->dev
->stats
.rx_errors
;
1527 stats
= this_cpu_ptr(vxlan
->dev
->tstats
);
1528 u64_stats_update_begin(&stats
->syncp
);
1529 stats
->rx_packets
++;
1530 stats
->rx_bytes
+= skb
->len
;
1531 u64_stats_update_end(&stats
->syncp
);
1533 gro_cells_receive(&vxlan
->gro_cells
, skb
);
1537 /* Consume bad packet */
1542 static int arp_reduce(struct net_device
*dev
, struct sk_buff
*skb
)
1544 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1545 struct arphdr
*parp
;
1548 struct neighbour
*n
;
1550 if (dev
->flags
& IFF_NOARP
)
1553 if (!pskb_may_pull(skb
, arp_hdr_len(dev
))) {
1554 dev
->stats
.tx_dropped
++;
1557 parp
= arp_hdr(skb
);
1559 if ((parp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
1560 parp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
1561 parp
->ar_pro
!= htons(ETH_P_IP
) ||
1562 parp
->ar_op
!= htons(ARPOP_REQUEST
) ||
1563 parp
->ar_hln
!= dev
->addr_len
||
1566 arpptr
= (u8
*)parp
+ sizeof(struct arphdr
);
1568 arpptr
+= dev
->addr_len
; /* sha */
1569 memcpy(&sip
, arpptr
, sizeof(sip
));
1570 arpptr
+= sizeof(sip
);
1571 arpptr
+= dev
->addr_len
; /* tha */
1572 memcpy(&tip
, arpptr
, sizeof(tip
));
1574 if (ipv4_is_loopback(tip
) ||
1575 ipv4_is_multicast(tip
))
1578 n
= neigh_lookup(&arp_tbl
, &tip
, dev
);
1581 struct vxlan_fdb
*f
;
1582 struct sk_buff
*reply
;
1584 if (!(n
->nud_state
& NUD_CONNECTED
)) {
1589 f
= vxlan_find_mac(vxlan
, n
->ha
);
1590 if (f
&& vxlan_addr_any(&(first_remote_rcu(f
)->remote_ip
))) {
1591 /* bridge-local neighbor */
1596 reply
= arp_create(ARPOP_REPLY
, ETH_P_ARP
, sip
, dev
, tip
, sha
,
1604 skb_reset_mac_header(reply
);
1605 __skb_pull(reply
, skb_network_offset(reply
));
1606 reply
->ip_summed
= CHECKSUM_UNNECESSARY
;
1607 reply
->pkt_type
= PACKET_HOST
;
1609 if (netif_rx_ni(reply
) == NET_RX_DROP
)
1610 dev
->stats
.rx_dropped
++;
1611 } else if (vxlan
->flags
& VXLAN_F_L3MISS
) {
1612 union vxlan_addr ipa
= {
1613 .sin
.sin_addr
.s_addr
= tip
,
1614 .sin
.sin_family
= AF_INET
,
1617 vxlan_ip_miss(dev
, &ipa
);
1621 return NETDEV_TX_OK
;
1624 #if IS_ENABLED(CONFIG_IPV6)
1625 static struct sk_buff
*vxlan_na_create(struct sk_buff
*request
,
1626 struct neighbour
*n
, bool isrouter
)
1628 struct net_device
*dev
= request
->dev
;
1629 struct sk_buff
*reply
;
1630 struct nd_msg
*ns
, *na
;
1631 struct ipv6hdr
*pip6
;
1633 int na_olen
= 8; /* opt hdr + ETH_ALEN for target */
1640 len
= LL_RESERVED_SPACE(dev
) + sizeof(struct ipv6hdr
) +
1641 sizeof(*na
) + na_olen
+ dev
->needed_tailroom
;
1642 reply
= alloc_skb(len
, GFP_ATOMIC
);
1646 reply
->protocol
= htons(ETH_P_IPV6
);
1648 skb_reserve(reply
, LL_RESERVED_SPACE(request
->dev
));
1649 skb_push(reply
, sizeof(struct ethhdr
));
1650 skb_reset_mac_header(reply
);
1652 ns
= (struct nd_msg
*)skb_transport_header(request
);
1654 daddr
= eth_hdr(request
)->h_source
;
1655 ns_olen
= request
->len
- skb_transport_offset(request
) - sizeof(*ns
);
1656 for (i
= 0; i
< ns_olen
-1; i
+= (ns
->opt
[i
+1]<<3)) {
1657 if (ns
->opt
[i
] == ND_OPT_SOURCE_LL_ADDR
) {
1658 daddr
= ns
->opt
+ i
+ sizeof(struct nd_opt_hdr
);
1663 /* Ethernet header */
1664 ether_addr_copy(eth_hdr(reply
)->h_dest
, daddr
);
1665 ether_addr_copy(eth_hdr(reply
)->h_source
, n
->ha
);
1666 eth_hdr(reply
)->h_proto
= htons(ETH_P_IPV6
);
1667 reply
->protocol
= htons(ETH_P_IPV6
);
1669 skb_pull(reply
, sizeof(struct ethhdr
));
1670 skb_reset_network_header(reply
);
1671 skb_put(reply
, sizeof(struct ipv6hdr
));
1675 pip6
= ipv6_hdr(reply
);
1676 memset(pip6
, 0, sizeof(struct ipv6hdr
));
1678 pip6
->priority
= ipv6_hdr(request
)->priority
;
1679 pip6
->nexthdr
= IPPROTO_ICMPV6
;
1680 pip6
->hop_limit
= 255;
1681 pip6
->daddr
= ipv6_hdr(request
)->saddr
;
1682 pip6
->saddr
= *(struct in6_addr
*)n
->primary_key
;
1684 skb_pull(reply
, sizeof(struct ipv6hdr
));
1685 skb_reset_transport_header(reply
);
1687 na
= (struct nd_msg
*)skb_put(reply
, sizeof(*na
) + na_olen
);
1689 /* Neighbor Advertisement */
1690 memset(na
, 0, sizeof(*na
)+na_olen
);
1691 na
->icmph
.icmp6_type
= NDISC_NEIGHBOUR_ADVERTISEMENT
;
1692 na
->icmph
.icmp6_router
= isrouter
;
1693 na
->icmph
.icmp6_override
= 1;
1694 na
->icmph
.icmp6_solicited
= 1;
1695 na
->target
= ns
->target
;
1696 ether_addr_copy(&na
->opt
[2], n
->ha
);
1697 na
->opt
[0] = ND_OPT_TARGET_LL_ADDR
;
1698 na
->opt
[1] = na_olen
>> 3;
1700 na
->icmph
.icmp6_cksum
= csum_ipv6_magic(&pip6
->saddr
,
1701 &pip6
->daddr
, sizeof(*na
)+na_olen
, IPPROTO_ICMPV6
,
1702 csum_partial(na
, sizeof(*na
)+na_olen
, 0));
1704 pip6
->payload_len
= htons(sizeof(*na
)+na_olen
);
1706 skb_push(reply
, sizeof(struct ipv6hdr
));
1708 reply
->ip_summed
= CHECKSUM_UNNECESSARY
;
1713 static int neigh_reduce(struct net_device
*dev
, struct sk_buff
*skb
)
1715 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1717 const struct ipv6hdr
*iphdr
;
1718 const struct in6_addr
*saddr
, *daddr
;
1719 struct neighbour
*n
;
1720 struct inet6_dev
*in6_dev
;
1722 in6_dev
= __in6_dev_get(dev
);
1726 iphdr
= ipv6_hdr(skb
);
1727 saddr
= &iphdr
->saddr
;
1728 daddr
= &iphdr
->daddr
;
1730 msg
= (struct nd_msg
*)skb_transport_header(skb
);
1731 if (msg
->icmph
.icmp6_code
!= 0 ||
1732 msg
->icmph
.icmp6_type
!= NDISC_NEIGHBOUR_SOLICITATION
)
1735 if (ipv6_addr_loopback(daddr
) ||
1736 ipv6_addr_is_multicast(&msg
->target
))
1739 n
= neigh_lookup(ipv6_stub
->nd_tbl
, &msg
->target
, dev
);
1742 struct vxlan_fdb
*f
;
1743 struct sk_buff
*reply
;
1745 if (!(n
->nud_state
& NUD_CONNECTED
)) {
1750 f
= vxlan_find_mac(vxlan
, n
->ha
);
1751 if (f
&& vxlan_addr_any(&(first_remote_rcu(f
)->remote_ip
))) {
1752 /* bridge-local neighbor */
1757 reply
= vxlan_na_create(skb
, n
,
1758 !!(f
? f
->flags
& NTF_ROUTER
: 0));
1765 if (netif_rx_ni(reply
) == NET_RX_DROP
)
1766 dev
->stats
.rx_dropped
++;
1768 } else if (vxlan
->flags
& VXLAN_F_L3MISS
) {
1769 union vxlan_addr ipa
= {
1770 .sin6
.sin6_addr
= msg
->target
,
1771 .sin6
.sin6_family
= AF_INET6
,
1774 vxlan_ip_miss(dev
, &ipa
);
1779 return NETDEV_TX_OK
;
1783 static bool route_shortcircuit(struct net_device
*dev
, struct sk_buff
*skb
)
1785 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1786 struct neighbour
*n
;
1788 if (is_multicast_ether_addr(eth_hdr(skb
)->h_dest
))
1792 switch (ntohs(eth_hdr(skb
)->h_proto
)) {
1797 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
1800 n
= neigh_lookup(&arp_tbl
, &pip
->daddr
, dev
);
1801 if (!n
&& (vxlan
->flags
& VXLAN_F_L3MISS
)) {
1802 union vxlan_addr ipa
= {
1803 .sin
.sin_addr
.s_addr
= pip
->daddr
,
1804 .sin
.sin_family
= AF_INET
,
1807 vxlan_ip_miss(dev
, &ipa
);
1813 #if IS_ENABLED(CONFIG_IPV6)
1816 struct ipv6hdr
*pip6
;
1818 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
1820 pip6
= ipv6_hdr(skb
);
1821 n
= neigh_lookup(ipv6_stub
->nd_tbl
, &pip6
->daddr
, dev
);
1822 if (!n
&& (vxlan
->flags
& VXLAN_F_L3MISS
)) {
1823 union vxlan_addr ipa
= {
1824 .sin6
.sin6_addr
= pip6
->daddr
,
1825 .sin6
.sin6_family
= AF_INET6
,
1828 vxlan_ip_miss(dev
, &ipa
);
1842 diff
= !ether_addr_equal(eth_hdr(skb
)->h_dest
, n
->ha
);
1844 memcpy(eth_hdr(skb
)->h_source
, eth_hdr(skb
)->h_dest
,
1846 memcpy(eth_hdr(skb
)->h_dest
, n
->ha
, dev
->addr_len
);
1855 static void vxlan_build_gbp_hdr(struct vxlanhdr
*vxh
, u32 vxflags
,
1856 struct vxlan_metadata
*md
)
1858 struct vxlanhdr_gbp
*gbp
;
1863 gbp
= (struct vxlanhdr_gbp
*)vxh
;
1864 vxh
->vx_flags
|= VXLAN_HF_GBP
;
1866 if (md
->gbp
& VXLAN_GBP_DONT_LEARN
)
1867 gbp
->dont_learn
= 1;
1869 if (md
->gbp
& VXLAN_GBP_POLICY_APPLIED
)
1870 gbp
->policy_applied
= 1;
1872 gbp
->policy_id
= htons(md
->gbp
& VXLAN_GBP_ID_MASK
);
1875 static int vxlan_build_gpe_hdr(struct vxlanhdr
*vxh
, u32 vxflags
,
1878 struct vxlanhdr_gpe
*gpe
= (struct vxlanhdr_gpe
*)vxh
;
1880 gpe
->np_applied
= 1;
1883 case htons(ETH_P_IP
):
1884 gpe
->next_protocol
= VXLAN_GPE_NP_IPV4
;
1886 case htons(ETH_P_IPV6
):
1887 gpe
->next_protocol
= VXLAN_GPE_NP_IPV6
;
1889 case htons(ETH_P_TEB
):
1890 gpe
->next_protocol
= VXLAN_GPE_NP_ETHERNET
;
1893 return -EPFNOSUPPORT
;
1896 static int vxlan_build_skb(struct sk_buff
*skb
, struct dst_entry
*dst
,
1897 int iphdr_len
, __be32 vni
,
1898 struct vxlan_metadata
*md
, u32 vxflags
,
1901 struct vxlanhdr
*vxh
;
1904 int type
= udp_sum
? SKB_GSO_UDP_TUNNEL_CSUM
: SKB_GSO_UDP_TUNNEL
;
1905 __be16 inner_protocol
= htons(ETH_P_TEB
);
1907 if ((vxflags
& VXLAN_F_REMCSUM_TX
) &&
1908 skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1909 int csum_start
= skb_checksum_start_offset(skb
);
1911 if (csum_start
<= VXLAN_MAX_REMCSUM_START
&&
1912 !(csum_start
& VXLAN_RCO_SHIFT_MASK
) &&
1913 (skb
->csum_offset
== offsetof(struct udphdr
, check
) ||
1914 skb
->csum_offset
== offsetof(struct tcphdr
, check
)))
1915 type
|= SKB_GSO_TUNNEL_REMCSUM
;
1918 min_headroom
= LL_RESERVED_SPACE(dst
->dev
) + dst
->header_len
1919 + VXLAN_HLEN
+ iphdr_len
;
1921 /* Need space for new headers (invalidates iph ptr) */
1922 err
= skb_cow_head(skb
, min_headroom
);
1926 err
= iptunnel_handle_offloads(skb
, type
);
1930 vxh
= (struct vxlanhdr
*) __skb_push(skb
, sizeof(*vxh
));
1931 vxh
->vx_flags
= VXLAN_HF_VNI
;
1932 vxh
->vx_vni
= vxlan_vni_field(vni
);
1934 if (type
& SKB_GSO_TUNNEL_REMCSUM
) {
1937 start
= skb_checksum_start_offset(skb
) - sizeof(struct vxlanhdr
);
1938 vxh
->vx_vni
|= vxlan_compute_rco(start
, skb
->csum_offset
);
1939 vxh
->vx_flags
|= VXLAN_HF_RCO
;
1941 if (!skb_is_gso(skb
)) {
1942 skb
->ip_summed
= CHECKSUM_NONE
;
1943 skb
->encapsulation
= 0;
1947 if (vxflags
& VXLAN_F_GBP
)
1948 vxlan_build_gbp_hdr(vxh
, vxflags
, md
);
1949 if (vxflags
& VXLAN_F_GPE
) {
1950 err
= vxlan_build_gpe_hdr(vxh
, vxflags
, skb
->protocol
);
1953 inner_protocol
= skb
->protocol
;
1956 skb_set_inner_protocol(skb
, inner_protocol
);
1960 static struct rtable
*vxlan_get_route(struct vxlan_dev
*vxlan
, struct net_device
*dev
,
1961 struct vxlan_sock
*sock4
,
1962 struct sk_buff
*skb
, int oif
, u8 tos
,
1963 __be32 daddr
, __be32
*saddr
, __be16 dport
, __be16 sport
,
1964 struct dst_cache
*dst_cache
,
1965 const struct ip_tunnel_info
*info
)
1967 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
1968 struct rtable
*rt
= NULL
;
1972 return ERR_PTR(-EIO
);
1977 rt
= dst_cache_get_ip4(dst_cache
, saddr
);
1982 memset(&fl4
, 0, sizeof(fl4
));
1983 fl4
.flowi4_oif
= oif
;
1984 fl4
.flowi4_tos
= RT_TOS(tos
);
1985 fl4
.flowi4_mark
= skb
->mark
;
1986 fl4
.flowi4_proto
= IPPROTO_UDP
;
1989 fl4
.fl4_dport
= dport
;
1990 fl4
.fl4_sport
= sport
;
1992 rt
= ip_route_output_key(vxlan
->net
, &fl4
);
1993 if (likely(!IS_ERR(rt
))) {
1994 if (rt
->dst
.dev
== dev
) {
1995 netdev_dbg(dev
, "circular route to %pI4\n", &daddr
);
1997 return ERR_PTR(-ELOOP
);
2002 dst_cache_set_ip4(dst_cache
, &rt
->dst
, fl4
.saddr
);
2004 netdev_dbg(dev
, "no route to %pI4\n", &daddr
);
2005 return ERR_PTR(-ENETUNREACH
);
2010 #if IS_ENABLED(CONFIG_IPV6)
2011 static struct dst_entry
*vxlan6_get_route(struct vxlan_dev
*vxlan
,
2012 struct net_device
*dev
,
2013 struct vxlan_sock
*sock6
,
2014 struct sk_buff
*skb
, int oif
, u8 tos
,
2016 const struct in6_addr
*daddr
,
2017 struct in6_addr
*saddr
,
2018 __be16 dport
, __be16 sport
,
2019 struct dst_cache
*dst_cache
,
2020 const struct ip_tunnel_info
*info
)
2022 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
2023 struct dst_entry
*ndst
;
2028 return ERR_PTR(-EIO
);
2033 ndst
= dst_cache_get_ip6(dst_cache
, saddr
);
2038 memset(&fl6
, 0, sizeof(fl6
));
2039 fl6
.flowi6_oif
= oif
;
2042 fl6
.flowlabel
= ip6_make_flowinfo(RT_TOS(tos
), label
);
2043 fl6
.flowi6_mark
= skb
->mark
;
2044 fl6
.flowi6_proto
= IPPROTO_UDP
;
2045 fl6
.fl6_dport
= dport
;
2046 fl6
.fl6_sport
= sport
;
2048 err
= ipv6_stub
->ipv6_dst_lookup(vxlan
->net
,
2051 if (unlikely(err
< 0)) {
2052 netdev_dbg(dev
, "no route to %pI6\n", daddr
);
2053 return ERR_PTR(-ENETUNREACH
);
2056 if (unlikely(ndst
->dev
== dev
)) {
2057 netdev_dbg(dev
, "circular route to %pI6\n", daddr
);
2059 return ERR_PTR(-ELOOP
);
2064 dst_cache_set_ip6(dst_cache
, ndst
, saddr
);
2069 /* Bypass encapsulation if the destination is local */
2070 static void vxlan_encap_bypass(struct sk_buff
*skb
, struct vxlan_dev
*src_vxlan
,
2071 struct vxlan_dev
*dst_vxlan
)
2073 struct pcpu_sw_netstats
*tx_stats
, *rx_stats
;
2074 union vxlan_addr loopback
;
2075 union vxlan_addr
*remote_ip
= &dst_vxlan
->default_dst
.remote_ip
;
2076 struct net_device
*dev
= skb
->dev
;
2079 tx_stats
= this_cpu_ptr(src_vxlan
->dev
->tstats
);
2080 rx_stats
= this_cpu_ptr(dst_vxlan
->dev
->tstats
);
2081 skb
->pkt_type
= PACKET_HOST
;
2082 skb
->encapsulation
= 0;
2083 skb
->dev
= dst_vxlan
->dev
;
2084 __skb_pull(skb
, skb_network_offset(skb
));
2086 if (remote_ip
->sa
.sa_family
== AF_INET
) {
2087 loopback
.sin
.sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2088 loopback
.sa
.sa_family
= AF_INET
;
2089 #if IS_ENABLED(CONFIG_IPV6)
2091 loopback
.sin6
.sin6_addr
= in6addr_loopback
;
2092 loopback
.sa
.sa_family
= AF_INET6
;
2096 if (dst_vxlan
->flags
& VXLAN_F_LEARN
)
2097 vxlan_snoop(skb
->dev
, &loopback
, eth_hdr(skb
)->h_source
);
2099 u64_stats_update_begin(&tx_stats
->syncp
);
2100 tx_stats
->tx_packets
++;
2101 tx_stats
->tx_bytes
+= len
;
2102 u64_stats_update_end(&tx_stats
->syncp
);
2104 if (netif_rx(skb
) == NET_RX_SUCCESS
) {
2105 u64_stats_update_begin(&rx_stats
->syncp
);
2106 rx_stats
->rx_packets
++;
2107 rx_stats
->rx_bytes
+= len
;
2108 u64_stats_update_end(&rx_stats
->syncp
);
2110 dev
->stats
.rx_dropped
++;
2114 static int encap_bypass_if_local(struct sk_buff
*skb
, struct net_device
*dev
,
2115 struct vxlan_dev
*vxlan
, union vxlan_addr
*daddr
,
2116 __be32 dst_port
, __be32 vni
, struct dst_entry
*dst
,
2119 #if IS_ENABLED(CONFIG_IPV6)
2120 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
2121 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
2122 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
2124 BUILD_BUG_ON(RTCF_LOCAL
!= RTF_LOCAL
);
2126 /* Bypass encapsulation if the destination is local */
2127 if (rt_flags
& RTCF_LOCAL
&&
2128 !(rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))) {
2129 struct vxlan_dev
*dst_vxlan
;
2132 dst_vxlan
= vxlan_find_vni(vxlan
->net
, vni
,
2133 daddr
->sa
.sa_family
, dst_port
,
2136 dev
->stats
.tx_errors
++;
2141 vxlan_encap_bypass(skb
, vxlan
, dst_vxlan
);
2148 static void vxlan_xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
2149 struct vxlan_rdst
*rdst
, bool did_rsc
)
2151 struct dst_cache
*dst_cache
;
2152 struct ip_tunnel_info
*info
;
2153 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2154 const struct iphdr
*old_iph
= ip_hdr(skb
);
2155 union vxlan_addr
*dst
;
2156 union vxlan_addr remote_ip
, local_ip
;
2157 struct vxlan_metadata _md
;
2158 struct vxlan_metadata
*md
= &_md
;
2159 __be16 src_port
= 0, dst_port
;
2160 struct dst_entry
*ndst
= NULL
;
2164 u32 flags
= vxlan
->flags
;
2165 bool udp_sum
= false;
2166 bool xnet
= !net_eq(vxlan
->net
, dev_net(vxlan
->dev
));
2168 info
= skb_tunnel_info(skb
);
2171 dst
= &rdst
->remote_ip
;
2172 if (vxlan_addr_any(dst
)) {
2174 /* short-circuited back to local bridge */
2175 vxlan_encap_bypass(skb
, vxlan
, vxlan
);
2181 dst_port
= rdst
->remote_port
? rdst
->remote_port
: vxlan
->cfg
.dst_port
;
2182 vni
= rdst
->remote_vni
;
2183 local_ip
= vxlan
->cfg
.saddr
;
2184 dst_cache
= &rdst
->dst_cache
;
2185 md
->gbp
= skb
->mark
;
2186 ttl
= vxlan
->cfg
.ttl
;
2187 if (!ttl
&& vxlan_addr_multicast(dst
))
2190 tos
= vxlan
->cfg
.tos
;
2192 tos
= ip_tunnel_get_dsfield(old_iph
, skb
);
2194 if (dst
->sa
.sa_family
== AF_INET
)
2195 udp_sum
= !(flags
& VXLAN_F_UDP_ZERO_CSUM_TX
);
2197 udp_sum
= !(flags
& VXLAN_F_UDP_ZERO_CSUM6_TX
);
2198 label
= vxlan
->cfg
.label
;
2201 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2205 remote_ip
.sa
.sa_family
= ip_tunnel_info_af(info
);
2206 if (remote_ip
.sa
.sa_family
== AF_INET
) {
2207 remote_ip
.sin
.sin_addr
.s_addr
= info
->key
.u
.ipv4
.dst
;
2208 local_ip
.sin
.sin_addr
.s_addr
= info
->key
.u
.ipv4
.src
;
2210 remote_ip
.sin6
.sin6_addr
= info
->key
.u
.ipv6
.dst
;
2211 local_ip
.sin6
.sin6_addr
= info
->key
.u
.ipv6
.src
;
2214 dst_port
= info
->key
.tp_dst
? : vxlan
->cfg
.dst_port
;
2215 vni
= tunnel_id_to_key32(info
->key
.tun_id
);
2216 dst_cache
= &info
->dst_cache
;
2217 if (info
->options_len
)
2218 md
= ip_tunnel_info_opts(info
);
2219 ttl
= info
->key
.ttl
;
2220 tos
= info
->key
.tos
;
2221 label
= info
->key
.label
;
2222 udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
2224 src_port
= udp_flow_src_port(dev_net(dev
), skb
, vxlan
->cfg
.port_min
,
2225 vxlan
->cfg
.port_max
, true);
2228 if (dst
->sa
.sa_family
== AF_INET
) {
2229 struct vxlan_sock
*sock4
= rcu_dereference(vxlan
->vn4_sock
);
2233 rt
= vxlan_get_route(vxlan
, dev
, sock4
, skb
,
2234 rdst
? rdst
->remote_ifindex
: 0, tos
,
2235 dst
->sin
.sin_addr
.s_addr
,
2236 &local_ip
.sin
.sin_addr
.s_addr
,
2244 if (fan_has_map(&vxlan
->fan
) && rt
->rt_flags
& RTCF_LOCAL
) {
2245 netdev_dbg(dev
, "discard fan to localhost %pI4\n",
2246 &dst
->sin
.sin_addr
.s_addr
);
2251 /* Bypass encapsulation if the destination is local */
2253 err
= encap_bypass_if_local(skb
, dev
, vxlan
, dst
,
2254 dst_port
, vni
, &rt
->dst
,
2258 } else if (info
->key
.tun_flags
& TUNNEL_DONT_FRAGMENT
) {
2263 tos
= ip_tunnel_ecn_encap(tos
, old_iph
, skb
);
2264 ttl
= ttl
? : ip4_dst_hoplimit(&rt
->dst
);
2265 err
= vxlan_build_skb(skb
, ndst
, sizeof(struct iphdr
),
2266 vni
, md
, flags
, udp_sum
);
2270 udp_tunnel_xmit_skb(rt
, sock4
->sock
->sk
, skb
, local_ip
.sin
.sin_addr
.s_addr
,
2271 dst
->sin
.sin_addr
.s_addr
, tos
, ttl
, df
,
2272 src_port
, dst_port
, xnet
, !udp_sum
);
2273 #if IS_ENABLED(CONFIG_IPV6)
2275 struct vxlan_sock
*sock6
= rcu_dereference(vxlan
->vn6_sock
);
2277 ndst
= vxlan6_get_route(vxlan
, dev
, sock6
, skb
,
2278 rdst
? rdst
->remote_ifindex
: 0, tos
,
2279 label
, &dst
->sin6
.sin6_addr
,
2280 &local_ip
.sin6
.sin6_addr
,
2284 err
= PTR_ERR(ndst
);
2290 u32 rt6i_flags
= ((struct rt6_info
*)ndst
)->rt6i_flags
;
2292 err
= encap_bypass_if_local(skb
, dev
, vxlan
, dst
,
2293 dst_port
, vni
, ndst
,
2299 tos
= ip_tunnel_ecn_encap(tos
, old_iph
, skb
);
2300 ttl
= ttl
? : ip6_dst_hoplimit(ndst
);
2301 skb_scrub_packet(skb
, xnet
);
2302 err
= vxlan_build_skb(skb
, ndst
, sizeof(struct ipv6hdr
),
2303 vni
, md
, flags
, udp_sum
);
2307 udp_tunnel6_xmit_skb(ndst
, sock6
->sock
->sk
, skb
, dev
,
2308 &local_ip
.sin6
.sin6_addr
,
2309 &dst
->sin6
.sin6_addr
, tos
, ttl
,
2310 label
, src_port
, dst_port
, !udp_sum
);
2318 dev
->stats
.tx_dropped
++;
2325 dev
->stats
.collisions
++;
2326 else if (err
== -ENETUNREACH
)
2327 dev
->stats
.tx_carrier_errors
++;
2329 dev
->stats
.tx_errors
++;
2334 /* Transmit local packets over Vxlan
2336 * Outer IP header inherits ECN and DF from inner header.
2337 * Outer UDP destination is the VXLAN assigned port.
2338 * source port is based on hash of flow
2340 static netdev_tx_t
vxlan_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2342 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2343 const struct ip_tunnel_info
*info
;
2345 bool did_rsc
= false;
2346 struct vxlan_rdst
*rdst
, *fdst
= NULL
;
2347 struct vxlan_fdb
*f
;
2349 info
= skb_tunnel_info(skb
);
2351 skb_reset_mac_header(skb
);
2353 if (vxlan
->flags
& VXLAN_F_COLLECT_METADATA
) {
2354 if (info
&& info
->mode
& IP_TUNNEL_INFO_TX
)
2355 vxlan_xmit_one(skb
, dev
, NULL
, false);
2358 return NETDEV_TX_OK
;
2361 if (vxlan
->flags
& VXLAN_F_PROXY
) {
2363 if (ntohs(eth
->h_proto
) == ETH_P_ARP
)
2364 return arp_reduce(dev
, skb
);
2365 #if IS_ENABLED(CONFIG_IPV6)
2366 else if (ntohs(eth
->h_proto
) == ETH_P_IPV6
&&
2367 pskb_may_pull(skb
, sizeof(struct ipv6hdr
)
2368 + sizeof(struct nd_msg
)) &&
2369 ipv6_hdr(skb
)->nexthdr
== IPPROTO_ICMPV6
) {
2372 msg
= (struct nd_msg
*)skb_transport_header(skb
);
2373 if (msg
->icmph
.icmp6_code
== 0 &&
2374 msg
->icmph
.icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
)
2375 return neigh_reduce(dev
, skb
);
2380 if (fan_has_map(&vxlan
->fan
)) {
2381 struct vxlan_rdst fan_rdst
;
2383 netdev_dbg(vxlan
->dev
, "vxlan_xmit p %x d %pM\n",
2384 eth
->h_proto
, eth
->h_dest
);
2385 if (vxlan_fan_build_rdst(vxlan
, skb
, &fan_rdst
)) {
2386 dev
->stats
.tx_dropped
++;
2388 return NETDEV_TX_OK
;
2390 vxlan_xmit_one(skb
, dev
, &fan_rdst
, 0);
2391 return NETDEV_TX_OK
;
2396 f
= vxlan_find_mac(vxlan
, eth
->h_dest
);
2399 if (f
&& (f
->flags
& NTF_ROUTER
) && (vxlan
->flags
& VXLAN_F_RSC
) &&
2400 (ntohs(eth
->h_proto
) == ETH_P_IP
||
2401 ntohs(eth
->h_proto
) == ETH_P_IPV6
)) {
2402 did_rsc
= route_shortcircuit(dev
, skb
);
2404 f
= vxlan_find_mac(vxlan
, eth
->h_dest
);
2408 f
= vxlan_find_mac(vxlan
, all_zeros_mac
);
2410 if ((vxlan
->flags
& VXLAN_F_L2MISS
) &&
2411 !is_multicast_ether_addr(eth
->h_dest
))
2412 vxlan_fdb_miss(vxlan
, eth
->h_dest
);
2414 dev
->stats
.tx_dropped
++;
2416 return NETDEV_TX_OK
;
2420 list_for_each_entry_rcu(rdst
, &f
->remotes
, list
) {
2421 struct sk_buff
*skb1
;
2427 skb1
= skb_clone(skb
, GFP_ATOMIC
);
2429 vxlan_xmit_one(skb1
, dev
, rdst
, did_rsc
);
2433 vxlan_xmit_one(skb
, dev
, fdst
, did_rsc
);
2436 return NETDEV_TX_OK
;
2439 /* Walk the forwarding table and purge stale entries */
2440 static void vxlan_cleanup(unsigned long arg
)
2442 struct vxlan_dev
*vxlan
= (struct vxlan_dev
*) arg
;
2443 unsigned long next_timer
= jiffies
+ FDB_AGE_INTERVAL
;
2446 if (!netif_running(vxlan
->dev
))
2449 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
2450 struct hlist_node
*p
, *n
;
2452 spin_lock_bh(&vxlan
->hash_lock
);
2453 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
2455 = container_of(p
, struct vxlan_fdb
, hlist
);
2456 unsigned long timeout
;
2458 if (f
->state
& (NUD_PERMANENT
| NUD_NOARP
))
2461 timeout
= f
->used
+ vxlan
->cfg
.age_interval
* HZ
;
2462 if (time_before_eq(timeout
, jiffies
)) {
2463 netdev_dbg(vxlan
->dev
,
2464 "garbage collect %pM\n",
2466 f
->state
= NUD_STALE
;
2467 vxlan_fdb_destroy(vxlan
, f
);
2468 } else if (time_before(timeout
, next_timer
))
2469 next_timer
= timeout
;
2471 spin_unlock_bh(&vxlan
->hash_lock
);
2474 mod_timer(&vxlan
->age_timer
, next_timer
);
2477 static void vxlan_vs_add_dev(struct vxlan_sock
*vs
, struct vxlan_dev
*vxlan
)
2479 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
2480 __be32 vni
= vxlan
->default_dst
.remote_vni
;
2482 spin_lock(&vn
->sock_lock
);
2483 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(vs
, vni
));
2484 spin_unlock(&vn
->sock_lock
);
2487 /* Setup stats when device is created */
2488 static int vxlan_init(struct net_device
*dev
)
2490 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
2497 static void vxlan_fdb_delete_default(struct vxlan_dev
*vxlan
)
2499 struct vxlan_fdb
*f
;
2501 spin_lock_bh(&vxlan
->hash_lock
);
2502 f
= __vxlan_find_mac(vxlan
, all_zeros_mac
);
2504 vxlan_fdb_destroy(vxlan
, f
);
2505 spin_unlock_bh(&vxlan
->hash_lock
);
2508 static void vxlan_uninit(struct net_device
*dev
)
2510 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2512 vxlan_fdb_delete_default(vxlan
);
2514 free_percpu(dev
->tstats
);
2517 /* Start ageing timer and join group when device is brought up */
2518 static int vxlan_open(struct net_device
*dev
)
2520 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2523 ret
= vxlan_sock_add(vxlan
);
2527 if (vxlan_addr_multicast(&vxlan
->default_dst
.remote_ip
)) {
2528 ret
= vxlan_igmp_join(vxlan
);
2529 if (ret
== -EADDRINUSE
)
2532 vxlan_sock_release(vxlan
);
2537 if (vxlan
->cfg
.age_interval
)
2538 mod_timer(&vxlan
->age_timer
, jiffies
+ FDB_AGE_INTERVAL
);
2543 /* Purge the forwarding table */
2544 static void vxlan_flush(struct vxlan_dev
*vxlan
, bool do_all
)
2548 spin_lock_bh(&vxlan
->hash_lock
);
2549 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
2550 struct hlist_node
*p
, *n
;
2551 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
2553 = container_of(p
, struct vxlan_fdb
, hlist
);
2554 if (!do_all
&& (f
->state
& (NUD_PERMANENT
| NUD_NOARP
)))
2556 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2557 if (!is_zero_ether_addr(f
->eth_addr
))
2558 vxlan_fdb_destroy(vxlan
, f
);
2561 spin_unlock_bh(&vxlan
->hash_lock
);
2564 /* Cleanup timer and forwarding table on shutdown */
2565 static int vxlan_stop(struct net_device
*dev
)
2567 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2568 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
2571 if (vxlan_addr_multicast(&vxlan
->default_dst
.remote_ip
) &&
2572 !vxlan_group_used(vn
, vxlan
))
2573 ret
= vxlan_igmp_leave(vxlan
);
2575 del_timer_sync(&vxlan
->age_timer
);
2577 vxlan_flush(vxlan
, false);
2578 vxlan_sock_release(vxlan
);
2583 /* Stub, nothing needs to be done. */
2584 static void vxlan_set_multicast_list(struct net_device
*dev
)
2588 static int vxlan_change_mtu(struct net_device
*dev
, int new_mtu
)
2590 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2591 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
2592 struct net_device
*lowerdev
= __dev_get_by_index(vxlan
->net
,
2593 dst
->remote_ifindex
);
2594 bool use_ipv6
= false;
2596 if (dst
->remote_ip
.sa
.sa_family
== AF_INET6
)
2599 /* This check is different than dev->max_mtu, because it looks at
2600 * the lowerdev->mtu, rather than the static dev->max_mtu
2603 int max_mtu
= lowerdev
->mtu
-
2604 (use_ipv6
? VXLAN6_HEADROOM
: VXLAN_HEADROOM
);
2605 if (new_mtu
> max_mtu
)
2613 static int vxlan_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
2615 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2616 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2617 __be16 sport
, dport
;
2619 sport
= udp_flow_src_port(dev_net(dev
), skb
, vxlan
->cfg
.port_min
,
2620 vxlan
->cfg
.port_max
, true);
2621 dport
= info
->key
.tp_dst
? : vxlan
->cfg
.dst_port
;
2623 if (ip_tunnel_info_af(info
) == AF_INET
) {
2624 struct vxlan_sock
*sock4
= rcu_dereference(vxlan
->vn4_sock
);
2627 rt
= vxlan_get_route(vxlan
, dev
, sock4
, skb
, 0, info
->key
.tos
,
2628 info
->key
.u
.ipv4
.dst
,
2629 &info
->key
.u
.ipv4
.src
, dport
, sport
,
2630 &info
->dst_cache
, info
);
2635 #if IS_ENABLED(CONFIG_IPV6)
2636 struct vxlan_sock
*sock6
= rcu_dereference(vxlan
->vn6_sock
);
2637 struct dst_entry
*ndst
;
2639 ndst
= vxlan6_get_route(vxlan
, dev
, sock6
, skb
, 0, info
->key
.tos
,
2640 info
->key
.label
, &info
->key
.u
.ipv6
.dst
,
2641 &info
->key
.u
.ipv6
.src
, dport
, sport
,
2642 &info
->dst_cache
, info
);
2644 return PTR_ERR(ndst
);
2646 #else /* !CONFIG_IPV6 */
2647 return -EPFNOSUPPORT
;
2650 info
->key
.tp_src
= sport
;
2651 info
->key
.tp_dst
= dport
;
2655 static const struct net_device_ops vxlan_netdev_ether_ops
= {
2656 .ndo_init
= vxlan_init
,
2657 .ndo_uninit
= vxlan_uninit
,
2658 .ndo_open
= vxlan_open
,
2659 .ndo_stop
= vxlan_stop
,
2660 .ndo_start_xmit
= vxlan_xmit
,
2661 .ndo_get_stats64
= ip_tunnel_get_stats64
,
2662 .ndo_set_rx_mode
= vxlan_set_multicast_list
,
2663 .ndo_change_mtu
= vxlan_change_mtu
,
2664 .ndo_validate_addr
= eth_validate_addr
,
2665 .ndo_set_mac_address
= eth_mac_addr
,
2666 .ndo_fdb_add
= vxlan_fdb_add
,
2667 .ndo_fdb_del
= vxlan_fdb_delete
,
2668 .ndo_fdb_dump
= vxlan_fdb_dump
,
2669 .ndo_fill_metadata_dst
= vxlan_fill_metadata_dst
,
2672 static const struct net_device_ops vxlan_netdev_raw_ops
= {
2673 .ndo_init
= vxlan_init
,
2674 .ndo_uninit
= vxlan_uninit
,
2675 .ndo_open
= vxlan_open
,
2676 .ndo_stop
= vxlan_stop
,
2677 .ndo_start_xmit
= vxlan_xmit
,
2678 .ndo_get_stats64
= ip_tunnel_get_stats64
,
2679 .ndo_change_mtu
= vxlan_change_mtu
,
2680 .ndo_fill_metadata_dst
= vxlan_fill_metadata_dst
,
2683 /* Info for udev, that this is a virtual tunnel endpoint */
2684 static struct device_type vxlan_type
= {
2688 /* Calls the ndo_udp_tunnel_add of the caller in order to
2689 * supply the listening VXLAN udp ports. Callers are expected
2690 * to implement the ndo_udp_tunnel_add.
2692 static void vxlan_push_rx_ports(struct net_device
*dev
)
2694 struct vxlan_sock
*vs
;
2695 struct net
*net
= dev_net(dev
);
2696 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
2699 spin_lock(&vn
->sock_lock
);
2700 for (i
= 0; i
< PORT_HASH_SIZE
; ++i
) {
2701 hlist_for_each_entry_rcu(vs
, &vn
->sock_list
[i
], hlist
)
2702 udp_tunnel_push_rx_port(dev
, vs
->sock
,
2703 (vs
->flags
& VXLAN_F_GPE
) ?
2704 UDP_TUNNEL_TYPE_VXLAN_GPE
:
2705 UDP_TUNNEL_TYPE_VXLAN
);
2707 spin_unlock(&vn
->sock_lock
);
2710 /* Initialize the device structure. */
2711 static void vxlan_setup(struct net_device
*dev
)
2713 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
2716 eth_hw_addr_random(dev
);
2719 dev
->destructor
= free_netdev
;
2720 SET_NETDEV_DEVTYPE(dev
, &vxlan_type
);
2722 dev
->features
|= NETIF_F_LLTX
;
2723 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
2724 dev
->features
|= NETIF_F_RXCSUM
;
2725 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
2727 dev
->vlan_features
= dev
->features
;
2728 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
2729 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
2730 netif_keep_dst(dev
);
2731 dev
->priv_flags
|= IFF_NO_QUEUE
;
2733 INIT_LIST_HEAD(&vxlan
->next
);
2734 spin_lock_init(&vxlan
->hash_lock
);
2736 init_timer_deferrable(&vxlan
->age_timer
);
2737 vxlan
->age_timer
.function
= vxlan_cleanup
;
2738 vxlan
->age_timer
.data
= (unsigned long) vxlan
;
2740 vxlan
->cfg
.dst_port
= htons(vxlan_port
);
2744 gro_cells_init(&vxlan
->gro_cells
, dev
);
2746 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
)
2747 INIT_HLIST_HEAD(&vxlan
->fdb_head
[h
]);
2749 INIT_LIST_HEAD(&vxlan
->fan
.fan_maps
);
2752 static void vxlan_ether_setup(struct net_device
*dev
)
2754 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
2755 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
2756 dev
->netdev_ops
= &vxlan_netdev_ether_ops
;
2759 static void vxlan_raw_setup(struct net_device
*dev
)
2761 dev
->header_ops
= NULL
;
2762 dev
->type
= ARPHRD_NONE
;
2763 dev
->hard_header_len
= 0;
2765 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
2766 dev
->netdev_ops
= &vxlan_netdev_raw_ops
;
2769 static const struct nla_policy vxlan_policy
[IFLA_VXLAN_MAX
+ 1] = {
2770 [IFLA_VXLAN_ID
] = { .type
= NLA_U32
},
2771 [IFLA_VXLAN_GROUP
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
2772 [IFLA_VXLAN_GROUP6
] = { .len
= sizeof(struct in6_addr
) },
2773 [IFLA_VXLAN_LINK
] = { .type
= NLA_U32
},
2774 [IFLA_VXLAN_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
2775 [IFLA_VXLAN_LOCAL6
] = { .len
= sizeof(struct in6_addr
) },
2776 [IFLA_VXLAN_TOS
] = { .type
= NLA_U8
},
2777 [IFLA_VXLAN_TTL
] = { .type
= NLA_U8
},
2778 [IFLA_VXLAN_LABEL
] = { .type
= NLA_U32
},
2779 [IFLA_VXLAN_LEARNING
] = { .type
= NLA_U8
},
2780 [IFLA_VXLAN_AGEING
] = { .type
= NLA_U32
},
2781 [IFLA_VXLAN_LIMIT
] = { .type
= NLA_U32
},
2782 [IFLA_VXLAN_PORT_RANGE
] = { .len
= sizeof(struct ifla_vxlan_port_range
) },
2783 [IFLA_VXLAN_PROXY
] = { .type
= NLA_U8
},
2784 [IFLA_VXLAN_RSC
] = { .type
= NLA_U8
},
2785 [IFLA_VXLAN_L2MISS
] = { .type
= NLA_U8
},
2786 [IFLA_VXLAN_L3MISS
] = { .type
= NLA_U8
},
2787 [IFLA_VXLAN_COLLECT_METADATA
] = { .type
= NLA_U8
},
2788 [IFLA_VXLAN_PORT
] = { .type
= NLA_U16
},
2789 [IFLA_VXLAN_UDP_CSUM
] = { .type
= NLA_U8
},
2790 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX
] = { .type
= NLA_U8
},
2791 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX
] = { .type
= NLA_U8
},
2792 [IFLA_VXLAN_REMCSUM_TX
] = { .type
= NLA_U8
},
2793 [IFLA_VXLAN_REMCSUM_RX
] = { .type
= NLA_U8
},
2794 [IFLA_VXLAN_GBP
] = { .type
= NLA_FLAG
, },
2795 [IFLA_VXLAN_GPE
] = { .type
= NLA_FLAG
, },
2796 [IFLA_VXLAN_REMCSUM_NOPARTIAL
] = { .type
= NLA_FLAG
},
2799 static int vxlan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
2801 if (tb
[IFLA_ADDRESS
]) {
2802 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
2803 pr_debug("invalid link address (not ethernet)\n");
2807 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
2808 pr_debug("invalid all zero ethernet address\n");
2809 return -EADDRNOTAVAIL
;
2816 if (data
[IFLA_VXLAN_ID
]) {
2817 __u32 id
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
2818 if (id
>= VXLAN_N_VID
)
2822 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
2823 const struct ifla_vxlan_port_range
*p
2824 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
2826 if (ntohs(p
->high
) < ntohs(p
->low
)) {
2827 pr_debug("port range %u .. %u not valid\n",
2828 ntohs(p
->low
), ntohs(p
->high
));
2836 static void vxlan_get_drvinfo(struct net_device
*netdev
,
2837 struct ethtool_drvinfo
*drvinfo
)
2839 strlcpy(drvinfo
->version
, VXLAN_VERSION
, sizeof(drvinfo
->version
));
2840 strlcpy(drvinfo
->driver
, "vxlan", sizeof(drvinfo
->driver
));
2843 static const struct ethtool_ops vxlan_ethtool_ops
= {
2844 .get_drvinfo
= vxlan_get_drvinfo
,
2845 .get_link
= ethtool_op_get_link
,
2848 static struct socket
*vxlan_create_sock(struct net
*net
, bool ipv6
,
2849 __be16 port
, u32 flags
)
2851 struct socket
*sock
;
2852 struct udp_port_cfg udp_conf
;
2855 memset(&udp_conf
, 0, sizeof(udp_conf
));
2858 udp_conf
.family
= AF_INET6
;
2859 udp_conf
.use_udp6_rx_checksums
=
2860 !(flags
& VXLAN_F_UDP_ZERO_CSUM6_RX
);
2861 udp_conf
.ipv6_v6only
= 1;
2863 udp_conf
.family
= AF_INET
;
2866 udp_conf
.local_udp_port
= port
;
2868 /* Open UDP socket */
2869 err
= udp_sock_create(net
, &udp_conf
, &sock
);
2871 return ERR_PTR(err
);
2876 /* Create new listen socket if needed */
2877 static struct vxlan_sock
*vxlan_socket_create(struct net
*net
, bool ipv6
,
2878 __be16 port
, u32 flags
)
2880 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
2881 struct vxlan_sock
*vs
;
2882 struct socket
*sock
;
2884 struct udp_tunnel_sock_cfg tunnel_cfg
;
2886 vs
= kzalloc(sizeof(*vs
), GFP_KERNEL
);
2888 return ERR_PTR(-ENOMEM
);
2890 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
2891 INIT_HLIST_HEAD(&vs
->vni_list
[h
]);
2893 sock
= vxlan_create_sock(net
, ipv6
, port
, flags
);
2895 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port
),
2898 return ERR_CAST(sock
);
2902 atomic_set(&vs
->refcnt
, 1);
2903 vs
->flags
= (flags
& VXLAN_F_RCV_FLAGS
);
2905 spin_lock(&vn
->sock_lock
);
2906 hlist_add_head_rcu(&vs
->hlist
, vs_head(net
, port
));
2907 udp_tunnel_notify_add_rx_port(sock
,
2908 (vs
->flags
& VXLAN_F_GPE
) ?
2909 UDP_TUNNEL_TYPE_VXLAN_GPE
:
2910 UDP_TUNNEL_TYPE_VXLAN
);
2911 spin_unlock(&vn
->sock_lock
);
2913 /* Mark socket as an encapsulation socket. */
2914 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
2915 tunnel_cfg
.sk_user_data
= vs
;
2916 tunnel_cfg
.encap_type
= 1;
2917 tunnel_cfg
.encap_rcv
= vxlan_rcv
;
2918 tunnel_cfg
.encap_destroy
= NULL
;
2919 tunnel_cfg
.gro_receive
= vxlan_gro_receive
;
2920 tunnel_cfg
.gro_complete
= vxlan_gro_complete
;
2922 setup_udp_tunnel_sock(net
, sock
, &tunnel_cfg
);
2927 static int __vxlan_sock_add(struct vxlan_dev
*vxlan
, bool ipv6
)
2929 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
2930 struct vxlan_sock
*vs
= NULL
;
2932 if (!vxlan
->cfg
.no_share
) {
2933 spin_lock(&vn
->sock_lock
);
2934 vs
= vxlan_find_sock(vxlan
->net
, ipv6
? AF_INET6
: AF_INET
,
2935 vxlan
->cfg
.dst_port
, vxlan
->flags
);
2936 if (vs
&& !atomic_add_unless(&vs
->refcnt
, 1, 0)) {
2937 spin_unlock(&vn
->sock_lock
);
2940 spin_unlock(&vn
->sock_lock
);
2943 vs
= vxlan_socket_create(vxlan
->net
, ipv6
,
2944 vxlan
->cfg
.dst_port
, vxlan
->flags
);
2947 #if IS_ENABLED(CONFIG_IPV6)
2949 rcu_assign_pointer(vxlan
->vn6_sock
, vs
);
2952 rcu_assign_pointer(vxlan
->vn4_sock
, vs
);
2953 vxlan_vs_add_dev(vs
, vxlan
);
2957 static int vxlan_sock_add(struct vxlan_dev
*vxlan
)
2959 bool ipv6
= vxlan
->flags
& VXLAN_F_IPV6
;
2960 bool metadata
= vxlan
->flags
& VXLAN_F_COLLECT_METADATA
;
2963 RCU_INIT_POINTER(vxlan
->vn4_sock
, NULL
);
2964 #if IS_ENABLED(CONFIG_IPV6)
2965 RCU_INIT_POINTER(vxlan
->vn6_sock
, NULL
);
2966 if (ipv6
|| metadata
)
2967 ret
= __vxlan_sock_add(vxlan
, true);
2969 if (!ret
&& (!ipv6
|| metadata
))
2970 ret
= __vxlan_sock_add(vxlan
, false);
2972 vxlan_sock_release(vxlan
);
2976 static int vxlan_dev_configure(struct net
*src_net
, struct net_device
*dev
,
2977 struct vxlan_config
*conf
)
2979 struct vxlan_net
*vn
= net_generic(src_net
, vxlan_net_id
);
2980 struct vxlan_dev
*vxlan
= netdev_priv(dev
), *tmp
;
2981 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
2982 unsigned short needed_headroom
= ETH_HLEN
;
2984 bool use_ipv6
= false;
2985 __be16 default_port
= vxlan
->cfg
.dst_port
;
2986 struct net_device
*lowerdev
= NULL
;
2988 if (conf
->flags
& VXLAN_F_GPE
) {
2989 /* For now, allow GPE only together with COLLECT_METADATA.
2990 * This can be relaxed later; in such case, the other side
2991 * of the PtP link will have to be provided.
2993 if ((conf
->flags
& ~VXLAN_F_ALLOWED_GPE
) ||
2994 !(conf
->flags
& VXLAN_F_COLLECT_METADATA
)) {
2995 pr_info("unsupported combination of extensions\n");
2999 vxlan_raw_setup(dev
);
3001 vxlan_ether_setup(dev
);
3004 /* MTU range: 68 - 65535 */
3005 dev
->min_mtu
= ETH_MIN_MTU
;
3006 dev
->max_mtu
= ETH_MAX_MTU
;
3008 vxlan
->net
= src_net
;
3010 dst
->remote_vni
= conf
->vni
;
3012 memcpy(&dst
->remote_ip
, &conf
->remote_ip
, sizeof(conf
->remote_ip
));
3014 /* Unless IPv6 is explicitly requested, assume IPv4 */
3015 if (!dst
->remote_ip
.sa
.sa_family
)
3016 dst
->remote_ip
.sa
.sa_family
= AF_INET
;
3018 if (dst
->remote_ip
.sa
.sa_family
== AF_INET6
||
3019 vxlan
->cfg
.saddr
.sa
.sa_family
== AF_INET6
) {
3020 if (!IS_ENABLED(CONFIG_IPV6
))
3021 return -EPFNOSUPPORT
;
3023 vxlan
->flags
|= VXLAN_F_IPV6
;
3026 if (conf
->label
&& !use_ipv6
) {
3027 pr_info("label only supported in use with IPv6\n");
3031 if (conf
->remote_ifindex
) {
3032 lowerdev
= __dev_get_by_index(src_net
, conf
->remote_ifindex
);
3033 dst
->remote_ifindex
= conf
->remote_ifindex
;
3036 pr_info("ifindex %d does not exist\n", dst
->remote_ifindex
);
3040 #if IS_ENABLED(CONFIG_IPV6)
3042 struct inet6_dev
*idev
= __in6_dev_get(lowerdev
);
3043 if (idev
&& idev
->cnf
.disable_ipv6
) {
3044 pr_info("IPv6 is disabled via sysctl\n");
3051 dev
->mtu
= lowerdev
->mtu
-
3052 (use_ipv6
? VXLAN6_HEADROOM
: VXLAN_HEADROOM
);
3054 needed_headroom
= lowerdev
->hard_header_len
;
3055 } else if (vxlan_addr_multicast(&dst
->remote_ip
)) {
3056 pr_info("multicast destination requires interface to be specified\n");
3061 int max_mtu
= ETH_MAX_MTU
;
3064 max_mtu
= lowerdev
->mtu
;
3066 max_mtu
-= (use_ipv6
? VXLAN6_HEADROOM
: VXLAN_HEADROOM
);
3068 if (conf
->mtu
< dev
->min_mtu
|| conf
->mtu
> dev
->max_mtu
)
3071 dev
->mtu
= conf
->mtu
;
3073 if (conf
->mtu
> max_mtu
)
3077 if (use_ipv6
|| conf
->flags
& VXLAN_F_COLLECT_METADATA
)
3078 needed_headroom
+= VXLAN6_HEADROOM
;
3080 needed_headroom
+= VXLAN_HEADROOM
;
3081 dev
->needed_headroom
= needed_headroom
;
3083 memcpy(&vxlan
->cfg
, conf
, sizeof(*conf
));
3084 if (!vxlan
->cfg
.dst_port
) {
3085 if (conf
->flags
& VXLAN_F_GPE
)
3086 vxlan
->cfg
.dst_port
= htons(4790); /* IANA VXLAN-GPE port */
3088 vxlan
->cfg
.dst_port
= default_port
;
3090 vxlan
->flags
|= conf
->flags
;
3092 if (!vxlan
->cfg
.age_interval
)
3093 vxlan
->cfg
.age_interval
= FDB_AGE_DEFAULT
;
3095 list_for_each_entry(tmp
, &vn
->vxlan_list
, next
) {
3096 if (tmp
->cfg
.vni
== conf
->vni
&&
3097 (tmp
->default_dst
.remote_ip
.sa
.sa_family
== AF_INET6
||
3098 tmp
->cfg
.saddr
.sa
.sa_family
== AF_INET6
) == use_ipv6
&&
3099 tmp
->cfg
.dst_port
== vxlan
->cfg
.dst_port
&&
3100 (tmp
->flags
& VXLAN_F_RCV_FLAGS
) ==
3101 (vxlan
->flags
& VXLAN_F_RCV_FLAGS
)) {
3102 pr_info("duplicate VNI %u\n", be32_to_cpu(conf
->vni
));
3107 dev
->ethtool_ops
= &vxlan_ethtool_ops
;
3109 /* create an fdb entry for a valid default destination */
3110 if (!vxlan_addr_any(&vxlan
->default_dst
.remote_ip
)) {
3111 err
= vxlan_fdb_create(vxlan
, all_zeros_mac
,
3112 &vxlan
->default_dst
.remote_ip
,
3113 NUD_REACHABLE
|NUD_PERMANENT
,
3114 NLM_F_EXCL
|NLM_F_CREATE
,
3115 vxlan
->cfg
.dst_port
,
3116 vxlan
->default_dst
.remote_vni
,
3117 vxlan
->default_dst
.remote_ifindex
,
3123 err
= register_netdevice(dev
);
3125 vxlan_fdb_delete_default(vxlan
);
3129 list_add(&vxlan
->next
, &vn
->vxlan_list
);
3134 static int vxlan_newlink(struct net
*src_net
, struct net_device
*dev
,
3135 struct nlattr
*tb
[], struct nlattr
*data
[])
3137 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
3138 struct vxlan_config conf
;
3141 memset(&conf
, 0, sizeof(conf
));
3143 if (data
[IFLA_VXLAN_ID
])
3144 conf
.vni
= cpu_to_be32(nla_get_u32(data
[IFLA_VXLAN_ID
]));
3146 if (data
[IFLA_VXLAN_GROUP
]) {
3147 conf
.remote_ip
.sin
.sin_addr
.s_addr
= nla_get_in_addr(data
[IFLA_VXLAN_GROUP
]);
3148 } else if (data
[IFLA_VXLAN_GROUP6
]) {
3149 if (!IS_ENABLED(CONFIG_IPV6
))
3150 return -EPFNOSUPPORT
;
3152 conf
.remote_ip
.sin6
.sin6_addr
= nla_get_in6_addr(data
[IFLA_VXLAN_GROUP6
]);
3153 conf
.remote_ip
.sa
.sa_family
= AF_INET6
;
3156 if (data
[IFLA_VXLAN_FAN_MAP
]) {
3157 err
= vxlan_parse_fan_map(data
, vxlan
);
3162 if (data
[IFLA_VXLAN_LOCAL
]) {
3163 conf
.saddr
.sin
.sin_addr
.s_addr
= nla_get_in_addr(data
[IFLA_VXLAN_LOCAL
]);
3164 conf
.saddr
.sa
.sa_family
= AF_INET
;
3165 } else if (data
[IFLA_VXLAN_LOCAL6
]) {
3166 if (!IS_ENABLED(CONFIG_IPV6
))
3167 return -EPFNOSUPPORT
;
3169 /* TODO: respect scope id */
3170 conf
.saddr
.sin6
.sin6_addr
= nla_get_in6_addr(data
[IFLA_VXLAN_LOCAL6
]);
3171 conf
.saddr
.sa
.sa_family
= AF_INET6
;
3174 if (data
[IFLA_VXLAN_LINK
])
3175 conf
.remote_ifindex
= nla_get_u32(data
[IFLA_VXLAN_LINK
]);
3177 if (data
[IFLA_VXLAN_TOS
])
3178 conf
.tos
= nla_get_u8(data
[IFLA_VXLAN_TOS
]);
3180 if (data
[IFLA_VXLAN_TTL
])
3181 conf
.ttl
= nla_get_u8(data
[IFLA_VXLAN_TTL
]);
3183 if (data
[IFLA_VXLAN_LABEL
])
3184 conf
.label
= nla_get_be32(data
[IFLA_VXLAN_LABEL
]) &
3185 IPV6_FLOWLABEL_MASK
;
3187 if (!data
[IFLA_VXLAN_LEARNING
] || nla_get_u8(data
[IFLA_VXLAN_LEARNING
]))
3188 conf
.flags
|= VXLAN_F_LEARN
;
3190 if (data
[IFLA_VXLAN_AGEING
])
3191 conf
.age_interval
= nla_get_u32(data
[IFLA_VXLAN_AGEING
]);
3193 if (data
[IFLA_VXLAN_PROXY
] && nla_get_u8(data
[IFLA_VXLAN_PROXY
]))
3194 conf
.flags
|= VXLAN_F_PROXY
;
3196 if (data
[IFLA_VXLAN_RSC
] && nla_get_u8(data
[IFLA_VXLAN_RSC
]))
3197 conf
.flags
|= VXLAN_F_RSC
;
3199 if (data
[IFLA_VXLAN_L2MISS
] && nla_get_u8(data
[IFLA_VXLAN_L2MISS
]))
3200 conf
.flags
|= VXLAN_F_L2MISS
;
3202 if (data
[IFLA_VXLAN_L3MISS
] && nla_get_u8(data
[IFLA_VXLAN_L3MISS
]))
3203 conf
.flags
|= VXLAN_F_L3MISS
;
3205 if (data
[IFLA_VXLAN_LIMIT
])
3206 conf
.addrmax
= nla_get_u32(data
[IFLA_VXLAN_LIMIT
]);
3208 if (data
[IFLA_VXLAN_COLLECT_METADATA
] &&
3209 nla_get_u8(data
[IFLA_VXLAN_COLLECT_METADATA
]))
3210 conf
.flags
|= VXLAN_F_COLLECT_METADATA
;
3212 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
3213 const struct ifla_vxlan_port_range
*p
3214 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
3215 conf
.port_min
= ntohs(p
->low
);
3216 conf
.port_max
= ntohs(p
->high
);
3219 if (data
[IFLA_VXLAN_PORT
])
3220 conf
.dst_port
= nla_get_be16(data
[IFLA_VXLAN_PORT
]);
3222 if (data
[IFLA_VXLAN_UDP_CSUM
] &&
3223 !nla_get_u8(data
[IFLA_VXLAN_UDP_CSUM
]))
3224 conf
.flags
|= VXLAN_F_UDP_ZERO_CSUM_TX
;
3226 if (data
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX
] &&
3227 nla_get_u8(data
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX
]))
3228 conf
.flags
|= VXLAN_F_UDP_ZERO_CSUM6_TX
;
3230 if (data
[IFLA_VXLAN_UDP_ZERO_CSUM6_RX
] &&
3231 nla_get_u8(data
[IFLA_VXLAN_UDP_ZERO_CSUM6_RX
]))
3232 conf
.flags
|= VXLAN_F_UDP_ZERO_CSUM6_RX
;
3234 if (data
[IFLA_VXLAN_REMCSUM_TX
] &&
3235 nla_get_u8(data
[IFLA_VXLAN_REMCSUM_TX
]))
3236 conf
.flags
|= VXLAN_F_REMCSUM_TX
;
3238 if (data
[IFLA_VXLAN_REMCSUM_RX
] &&
3239 nla_get_u8(data
[IFLA_VXLAN_REMCSUM_RX
]))
3240 conf
.flags
|= VXLAN_F_REMCSUM_RX
;
3242 if (data
[IFLA_VXLAN_GBP
])
3243 conf
.flags
|= VXLAN_F_GBP
;
3245 if (data
[IFLA_VXLAN_GPE
])
3246 conf
.flags
|= VXLAN_F_GPE
;
3248 if (data
[IFLA_VXLAN_REMCSUM_NOPARTIAL
])
3249 conf
.flags
|= VXLAN_F_REMCSUM_NOPARTIAL
;
3252 conf
.mtu
= nla_get_u32(tb
[IFLA_MTU
]);
3254 return vxlan_dev_configure(src_net
, dev
, &conf
);
3257 static void vxlan_dellink(struct net_device
*dev
, struct list_head
*head
)
3259 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
3260 struct vxlan_net
*vn
= net_generic(vxlan
->net
, vxlan_net_id
);
3262 vxlan_flush(vxlan
, true);
3264 spin_lock(&vn
->sock_lock
);
3265 if (!hlist_unhashed(&vxlan
->hlist
))
3266 hlist_del_rcu(&vxlan
->hlist
);
3267 spin_unlock(&vn
->sock_lock
);
3269 gro_cells_destroy(&vxlan
->gro_cells
);
3270 list_del(&vxlan
->next
);
3271 unregister_netdevice_queue(dev
, head
);
3274 static size_t vxlan_get_size(const struct net_device
*dev
)
3277 return nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_ID */
3278 nla_total_size(sizeof(struct in6_addr
)) + /* IFLA_VXLAN_GROUP{6} */
3279 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LINK */
3280 nla_total_size(sizeof(struct in6_addr
)) + /* IFLA_VXLAN_LOCAL{6} */
3281 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TTL */
3282 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TOS */
3283 nla_total_size(sizeof(__be32
)) + /* IFLA_VXLAN_LABEL */
3284 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_LEARNING */
3285 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_PROXY */
3286 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_RSC */
3287 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L2MISS */
3288 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L3MISS */
3289 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_COLLECT_METADATA */
3290 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_AGEING */
3291 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LIMIT */
3292 nla_total_size(sizeof(struct ifla_vxlan_port_range
)) +
3293 nla_total_size(sizeof(__be16
)) + /* IFLA_VXLAN_PORT */
3294 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_UDP_CSUM */
3295 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3296 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3297 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_REMCSUM_TX */
3298 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_REMCSUM_RX */
3299 nla_total_size(sizeof(struct ip_fan_map
) * 256) +
3303 static int vxlan_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
3305 const struct vxlan_dev
*vxlan
= netdev_priv(dev
);
3306 const struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
3307 struct ifla_vxlan_port_range ports
= {
3308 .low
= htons(vxlan
->cfg
.port_min
),
3309 .high
= htons(vxlan
->cfg
.port_max
),
3312 if (nla_put_u32(skb
, IFLA_VXLAN_ID
, be32_to_cpu(dst
->remote_vni
)))
3313 goto nla_put_failure
;
3315 if (!vxlan_addr_any(&dst
->remote_ip
)) {
3316 if (dst
->remote_ip
.sa
.sa_family
== AF_INET
) {
3317 if (nla_put_in_addr(skb
, IFLA_VXLAN_GROUP
,
3318 dst
->remote_ip
.sin
.sin_addr
.s_addr
))
3319 goto nla_put_failure
;
3320 #if IS_ENABLED(CONFIG_IPV6)
3322 if (nla_put_in6_addr(skb
, IFLA_VXLAN_GROUP6
,
3323 &dst
->remote_ip
.sin6
.sin6_addr
))
3324 goto nla_put_failure
;
3329 if (dst
->remote_ifindex
&& nla_put_u32(skb
, IFLA_VXLAN_LINK
, dst
->remote_ifindex
))
3330 goto nla_put_failure
;
3332 if (!vxlan_addr_any(&vxlan
->cfg
.saddr
)) {
3333 if (vxlan
->cfg
.saddr
.sa
.sa_family
== AF_INET
) {
3334 if (nla_put_in_addr(skb
, IFLA_VXLAN_LOCAL
,
3335 vxlan
->cfg
.saddr
.sin
.sin_addr
.s_addr
))
3336 goto nla_put_failure
;
3337 #if IS_ENABLED(CONFIG_IPV6)
3339 if (nla_put_in6_addr(skb
, IFLA_VXLAN_LOCAL6
,
3340 &vxlan
->cfg
.saddr
.sin6
.sin6_addr
))
3341 goto nla_put_failure
;
3346 if (fan_has_map(&vxlan
->fan
)) {
3347 struct nlattr
*fan_nest
;
3348 struct ip_fan_map
*fan_map
;
3350 fan_nest
= nla_nest_start(skb
, IFLA_VXLAN_FAN_MAP
);
3352 goto nla_put_failure
;
3353 list_for_each_entry_rcu(fan_map
, &vxlan
->fan
.fan_maps
, list
) {
3354 struct ifla_fan_map map
;
3356 map
.underlay
= fan_map
->underlay
;
3357 map
.underlay_prefix
= fan_map
->underlay_prefix
;
3358 map
.overlay
= fan_map
->overlay
;
3359 map
.overlay_prefix
= fan_map
->overlay_prefix
;
3360 if (nla_put(skb
, IFLA_FAN_MAPPING
, sizeof(map
), &map
))
3361 goto nla_put_failure
;
3363 nla_nest_end(skb
, fan_nest
);
3366 if (nla_put_u8(skb
, IFLA_VXLAN_TTL
, vxlan
->cfg
.ttl
) ||
3367 nla_put_u8(skb
, IFLA_VXLAN_TOS
, vxlan
->cfg
.tos
) ||
3368 nla_put_be32(skb
, IFLA_VXLAN_LABEL
, vxlan
->cfg
.label
) ||
3369 nla_put_u8(skb
, IFLA_VXLAN_LEARNING
,
3370 !!(vxlan
->flags
& VXLAN_F_LEARN
)) ||
3371 nla_put_u8(skb
, IFLA_VXLAN_PROXY
,
3372 !!(vxlan
->flags
& VXLAN_F_PROXY
)) ||
3373 nla_put_u8(skb
, IFLA_VXLAN_RSC
, !!(vxlan
->flags
& VXLAN_F_RSC
)) ||
3374 nla_put_u8(skb
, IFLA_VXLAN_L2MISS
,
3375 !!(vxlan
->flags
& VXLAN_F_L2MISS
)) ||
3376 nla_put_u8(skb
, IFLA_VXLAN_L3MISS
,
3377 !!(vxlan
->flags
& VXLAN_F_L3MISS
)) ||
3378 nla_put_u8(skb
, IFLA_VXLAN_COLLECT_METADATA
,
3379 !!(vxlan
->flags
& VXLAN_F_COLLECT_METADATA
)) ||
3380 nla_put_u32(skb
, IFLA_VXLAN_AGEING
, vxlan
->cfg
.age_interval
) ||
3381 nla_put_u32(skb
, IFLA_VXLAN_LIMIT
, vxlan
->cfg
.addrmax
) ||
3382 nla_put_be16(skb
, IFLA_VXLAN_PORT
, vxlan
->cfg
.dst_port
) ||
3383 nla_put_u8(skb
, IFLA_VXLAN_UDP_CSUM
,
3384 !(vxlan
->flags
& VXLAN_F_UDP_ZERO_CSUM_TX
)) ||
3385 nla_put_u8(skb
, IFLA_VXLAN_UDP_ZERO_CSUM6_TX
,
3386 !!(vxlan
->flags
& VXLAN_F_UDP_ZERO_CSUM6_TX
)) ||
3387 nla_put_u8(skb
, IFLA_VXLAN_UDP_ZERO_CSUM6_RX
,
3388 !!(vxlan
->flags
& VXLAN_F_UDP_ZERO_CSUM6_RX
)) ||
3389 nla_put_u8(skb
, IFLA_VXLAN_REMCSUM_TX
,
3390 !!(vxlan
->flags
& VXLAN_F_REMCSUM_TX
)) ||
3391 nla_put_u8(skb
, IFLA_VXLAN_REMCSUM_RX
,
3392 !!(vxlan
->flags
& VXLAN_F_REMCSUM_RX
)))
3393 goto nla_put_failure
;
3395 if (nla_put(skb
, IFLA_VXLAN_PORT_RANGE
, sizeof(ports
), &ports
))
3396 goto nla_put_failure
;
3398 if (vxlan
->flags
& VXLAN_F_GBP
&&
3399 nla_put_flag(skb
, IFLA_VXLAN_GBP
))
3400 goto nla_put_failure
;
3402 if (vxlan
->flags
& VXLAN_F_GPE
&&
3403 nla_put_flag(skb
, IFLA_VXLAN_GPE
))
3404 goto nla_put_failure
;
3406 if (vxlan
->flags
& VXLAN_F_REMCSUM_NOPARTIAL
&&
3407 nla_put_flag(skb
, IFLA_VXLAN_REMCSUM_NOPARTIAL
))
3408 goto nla_put_failure
;
3416 static struct net
*vxlan_get_link_net(const struct net_device
*dev
)
3418 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
3423 static struct rtnl_link_ops vxlan_link_ops __read_mostly
= {
3425 .maxtype
= IFLA_VXLAN_MAX
,
3426 .policy
= vxlan_policy
,
3427 .priv_size
= sizeof(struct vxlan_dev
),
3428 .setup
= vxlan_setup
,
3429 .validate
= vxlan_validate
,
3430 .newlink
= vxlan_newlink
,
3431 .dellink
= vxlan_dellink
,
3432 .get_size
= vxlan_get_size
,
3433 .fill_info
= vxlan_fill_info
,
3434 .get_link_net
= vxlan_get_link_net
,
3437 struct net_device
*vxlan_dev_create(struct net
*net
, const char *name
,
3438 u8 name_assign_type
,
3439 struct vxlan_config
*conf
)
3441 struct nlattr
*tb
[IFLA_MAX
+ 1];
3442 struct net_device
*dev
;
3445 memset(&tb
, 0, sizeof(tb
));
3447 dev
= rtnl_create_link(net
, name
, name_assign_type
,
3448 &vxlan_link_ops
, tb
);
3452 err
= vxlan_dev_configure(net
, dev
, conf
);
3455 return ERR_PTR(err
);
3458 err
= rtnl_configure_link(dev
, NULL
);
3460 LIST_HEAD(list_kill
);
3462 vxlan_dellink(dev
, &list_kill
);
3463 unregister_netdevice_many(&list_kill
);
3464 return ERR_PTR(err
);
3469 EXPORT_SYMBOL_GPL(vxlan_dev_create
);
3471 static void vxlan_handle_lowerdev_unregister(struct vxlan_net
*vn
,
3472 struct net_device
*dev
)
3474 struct vxlan_dev
*vxlan
, *next
;
3475 LIST_HEAD(list_kill
);
3477 list_for_each_entry_safe(vxlan
, next
, &vn
->vxlan_list
, next
) {
3478 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
3480 /* In case we created vxlan device with carrier
3481 * and we loose the carrier due to module unload
3482 * we also need to remove vxlan device. In other
3483 * cases, it's not necessary and remote_ifindex
3484 * is 0 here, so no matches.
3486 if (dst
->remote_ifindex
== dev
->ifindex
)
3487 vxlan_dellink(vxlan
->dev
, &list_kill
);
3490 unregister_netdevice_many(&list_kill
);
3493 static int vxlan_netdevice_event(struct notifier_block
*unused
,
3494 unsigned long event
, void *ptr
)
3496 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3497 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
3499 if (event
== NETDEV_UNREGISTER
)
3500 vxlan_handle_lowerdev_unregister(vn
, dev
);
3501 else if (event
== NETDEV_UDP_TUNNEL_PUSH_INFO
)
3502 vxlan_push_rx_ports(dev
);
3507 static struct notifier_block vxlan_notifier_block __read_mostly
= {
3508 .notifier_call
= vxlan_netdevice_event
,
3511 static __net_init
int vxlan_init_net(struct net
*net
)
3513 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
3516 INIT_LIST_HEAD(&vn
->vxlan_list
);
3517 spin_lock_init(&vn
->sock_lock
);
3519 for (h
= 0; h
< PORT_HASH_SIZE
; ++h
)
3520 INIT_HLIST_HEAD(&vn
->sock_list
[h
]);
3525 #ifdef CONFIG_SYSCTL
3526 static struct ctl_table_header
*vxlan_fan_header
;
3527 static unsigned int vxlan_fan_version
= 4;
3529 static struct ctl_table vxlan_fan_sysctls
[] = {
3531 .procname
= "vxlan",
3532 .data
= &vxlan_fan_version
,
3533 .maxlen
= sizeof(vxlan_fan_version
),
3535 .proc_handler
= proc_dointvec
,
3539 #endif /* CONFIG_SYSCTL */
3541 static void __net_exit
vxlan_exit_net(struct net
*net
)
3543 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
3544 struct vxlan_dev
*vxlan
, *next
;
3545 struct net_device
*dev
, *aux
;
3549 for_each_netdev_safe(net
, dev
, aux
)
3550 if (dev
->rtnl_link_ops
== &vxlan_link_ops
)
3551 unregister_netdevice_queue(dev
, &list
);
3553 list_for_each_entry_safe(vxlan
, next
, &vn
->vxlan_list
, next
) {
3554 /* If vxlan->dev is in the same netns, it has already been added
3555 * to the list by the previous loop.
3557 if (!net_eq(dev_net(vxlan
->dev
), net
)) {
3558 gro_cells_destroy(&vxlan
->gro_cells
);
3559 unregister_netdevice_queue(vxlan
->dev
, &list
);
3563 unregister_netdevice_many(&list
);
3567 static struct pernet_operations vxlan_net_ops
= {
3568 .init
= vxlan_init_net
,
3569 .exit
= vxlan_exit_net
,
3570 .id
= &vxlan_net_id
,
3571 .size
= sizeof(struct vxlan_net
),
3574 static int __init
vxlan_init_module(void)
3578 get_random_bytes(&vxlan_salt
, sizeof(vxlan_salt
));
3580 rc
= register_pernet_subsys(&vxlan_net_ops
);
3584 rc
= register_netdevice_notifier(&vxlan_notifier_block
);
3588 rc
= rtnl_link_register(&vxlan_link_ops
);
3592 #ifdef CONFIG_SYSCTL
3593 vxlan_fan_header
= register_net_sysctl(&init_net
, "net/fan",
3595 if (!vxlan_fan_header
) {
3599 #endif /* CONFIG_SYSCTL */
3602 #ifdef CONFIG_SYSCTL
3604 rtnl_link_unregister(&vxlan_link_ops
);
3605 #endif /* CONFIG_SYSCTL */
3607 unregister_netdevice_notifier(&vxlan_notifier_block
);
3609 unregister_pernet_subsys(&vxlan_net_ops
);
3613 late_initcall(vxlan_init_module
);
3615 static void __exit
vxlan_cleanup_module(void)
3617 #ifdef CONFIG_SYSCTL
3618 unregister_net_sysctl_table(vxlan_fan_header
);
3619 #endif /* CONFIG_SYSCTL */
3620 rtnl_link_unregister(&vxlan_link_ops
);
3621 unregister_netdevice_notifier(&vxlan_notifier_block
);
3622 unregister_pernet_subsys(&vxlan_net_ops
);
3623 /* rcu_barrier() is called by netns */
3625 module_exit(vxlan_cleanup_module
);
3627 MODULE_LICENSE("GPL");
3628 MODULE_VERSION(VXLAN_VERSION
);
3629 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3630 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3631 MODULE_ALIAS_RTNL_LINK("vxlan");