]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/vxlan.c
vxlan: Don't call gro_cells_destroy() before device is unregistered
[mirror_ubuntu-bionic-kernel.git] / drivers / net / vxlan.c
1 /*
2 * VXLAN: Virtual eXtensible Local Area Network
3 *
4 * Copyright (c) 2012-2013 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/udp.h>
18 #include <linux/igmp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/if_ether.h>
21 #include <linux/ethtool.h>
22 #include <net/arp.h>
23 #include <net/ndisc.h>
24 #include <net/ip.h>
25 #include <net/icmp.h>
26 #include <net/rtnetlink.h>
27 #include <net/inet_ecn.h>
28 #include <net/net_namespace.h>
29 #include <net/netns/generic.h>
30 #include <net/tun_proto.h>
31 #include <net/vxlan.h>
32
33 #if IS_ENABLED(CONFIG_IPV6)
34 #include <net/ip6_tunnel.h>
35 #include <net/ip6_checksum.h>
36 #endif
37
38 #define VXLAN_VERSION "0.1"
39
40 #define PORT_HASH_BITS 8
41 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
42 #define FDB_AGE_DEFAULT 300 /* 5 min */
43 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
44
45 /* UDP port for VXLAN traffic.
46 * The IANA assigned port is 4789, but the Linux default is 8472
47 * for compatibility with early adopters.
48 */
49 static unsigned short vxlan_port __read_mostly = 8472;
50 module_param_named(udp_port, vxlan_port, ushort, 0444);
51 MODULE_PARM_DESC(udp_port, "Destination UDP port");
52
53 static bool log_ecn_error = true;
54 module_param(log_ecn_error, bool, 0644);
55 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
56
57 static unsigned int vxlan_net_id;
58 static struct rtnl_link_ops vxlan_link_ops;
59
60 static const u8 all_zeros_mac[ETH_ALEN + 2];
61
62 static int vxlan_sock_add(struct vxlan_dev *vxlan);
63
64 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
65
66 /* per-network namespace private data for this module */
67 struct vxlan_net {
68 struct list_head vxlan_list;
69 struct hlist_head sock_list[PORT_HASH_SIZE];
70 spinlock_t sock_lock;
71 };
72
73 /* Forwarding table entry */
74 struct vxlan_fdb {
75 struct hlist_node hlist; /* linked list of entries */
76 struct rcu_head rcu;
77 unsigned long updated; /* jiffies */
78 unsigned long used;
79 struct list_head remotes;
80 u8 eth_addr[ETH_ALEN];
81 u16 state; /* see ndm_state */
82 __be32 vni;
83 u8 flags; /* see ndm_flags */
84 };
85
86 /* salt for hash table */
87 static u32 vxlan_salt __read_mostly;
88
89 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
90 {
91 return vs->flags & VXLAN_F_COLLECT_METADATA ||
92 ip_tunnel_collect_metadata();
93 }
94
95 static struct ip_fan_map *vxlan_fan_find_map(struct vxlan_dev *vxlan, __be32 daddr)
96 {
97 struct ip_fan_map *fan_map;
98
99 rcu_read_lock();
100 list_for_each_entry_rcu(fan_map, &vxlan->fan.fan_maps, list) {
101 if (fan_map->overlay ==
102 (daddr & inet_make_mask(fan_map->overlay_prefix))) {
103 rcu_read_unlock();
104 return fan_map;
105 }
106 }
107 rcu_read_unlock();
108
109 return NULL;
110 }
111
112 static void vxlan_fan_flush_map(struct vxlan_dev *vxlan)
113 {
114 struct ip_fan_map *fan_map;
115
116 list_for_each_entry_rcu(fan_map, &vxlan->fan.fan_maps, list) {
117 list_del_rcu(&fan_map->list);
118 kfree_rcu(fan_map, rcu);
119 }
120 }
121
122 static int vxlan_fan_del_map(struct vxlan_dev *vxlan, __be32 overlay)
123 {
124 struct ip_fan_map *fan_map;
125
126 fan_map = vxlan_fan_find_map(vxlan, overlay);
127 if (!fan_map)
128 return -ENOENT;
129
130 list_del_rcu(&fan_map->list);
131 kfree_rcu(fan_map, rcu);
132
133 return 0;
134 }
135
136 static int vxlan_fan_add_map(struct vxlan_dev *vxlan, struct ifla_fan_map *map)
137 {
138 __be32 overlay_mask, underlay_mask;
139 struct ip_fan_map *fan_map;
140
141 overlay_mask = inet_make_mask(map->overlay_prefix);
142 underlay_mask = inet_make_mask(map->underlay_prefix);
143
144 netdev_dbg(vxlan->dev, "vfam: map: o %x/%d u %x/%d om %x um %x\n",
145 map->overlay, map->overlay_prefix,
146 map->underlay, map->underlay_prefix,
147 overlay_mask, underlay_mask);
148
149 if ((map->overlay & ~overlay_mask) || (map->underlay & ~underlay_mask))
150 return -EINVAL;
151
152 if (!(map->overlay & overlay_mask) && (map->underlay & underlay_mask))
153 return -EINVAL;
154
155 /* Special case: overlay 0 and underlay 0: flush all mappings */
156 if (!map->overlay && !map->underlay) {
157 vxlan_fan_flush_map(vxlan);
158 return 0;
159 }
160
161 /* Special case: overlay set and underlay 0: clear map for overlay */
162 if (!map->underlay)
163 return vxlan_fan_del_map(vxlan, map->overlay);
164
165 if (vxlan_fan_find_map(vxlan, map->overlay))
166 return -EEXIST;
167
168 fan_map = kmalloc(sizeof(*fan_map), GFP_KERNEL);
169 fan_map->underlay = map->underlay;
170 fan_map->overlay = map->overlay;
171 fan_map->underlay_prefix = map->underlay_prefix;
172 fan_map->overlay_mask = ntohl(overlay_mask);
173 fan_map->overlay_prefix = map->overlay_prefix;
174
175 list_add_tail_rcu(&fan_map->list, &vxlan->fan.fan_maps);
176
177 return 0;
178 }
179
180 static int vxlan_parse_fan_map(struct nlattr *data[], struct vxlan_dev *vxlan)
181 {
182 struct ifla_fan_map *map;
183 struct nlattr *attr;
184 int rem, rv;
185
186 nla_for_each_nested(attr, data[IFLA_IPTUN_FAN_MAP], rem) {
187 map = nla_data(attr);
188 rv = vxlan_fan_add_map(vxlan, map);
189 if (rv)
190 return rv;
191 }
192
193 return 0;
194 }
195
196 static int vxlan_fan_build_rdst(struct vxlan_dev *vxlan, struct sk_buff *skb,
197 struct vxlan_rdst *fan_rdst)
198 {
199 struct ip_fan_map *f_map;
200 union vxlan_addr *va;
201 u32 daddr, underlay;
202 struct arphdr *arp;
203 void *arp_ptr;
204 struct ethhdr *eth;
205 struct iphdr *iph;
206
207 eth = eth_hdr(skb);
208 switch (eth->h_proto) {
209 case htons(ETH_P_IP):
210 iph = ip_hdr(skb);
211 if (!iph)
212 return -EINVAL;
213 daddr = iph->daddr;
214 break;
215 case htons(ETH_P_ARP):
216 arp = arp_hdr(skb);
217 if (!arp)
218 return -EINVAL;
219 arp_ptr = arp + 1;
220 netdev_dbg(vxlan->dev,
221 "vfbr: arp sha %pM sip %pI4 tha %pM tip %pI4\n",
222 arp_ptr, arp_ptr + skb->dev->addr_len,
223 arp_ptr + skb->dev->addr_len + 4,
224 arp_ptr + (skb->dev->addr_len * 2) + 4);
225 arp_ptr += (skb->dev->addr_len * 2) + 4;
226 memcpy(&daddr, arp_ptr, 4);
227 break;
228 default:
229 netdev_dbg(vxlan->dev, "vfbr: unknown eth p %x\n", eth->h_proto);
230 return -EINVAL;
231 }
232
233 f_map = vxlan_fan_find_map(vxlan, daddr);
234 if (!f_map)
235 return -EINVAL;
236
237 daddr = ntohl(daddr);
238 underlay = ntohl(f_map->underlay);
239 if (!underlay)
240 return -EINVAL;
241
242 memset(fan_rdst, 0, sizeof(*fan_rdst));
243 va = &fan_rdst->remote_ip;
244 va->sa.sa_family = AF_INET;
245 fan_rdst->remote_vni = vxlan->default_dst.remote_vni;
246 va->sin.sin_addr.s_addr = htonl(underlay |
247 ((daddr & ~f_map->overlay_mask) >>
248 (32 - f_map->overlay_prefix -
249 (32 - f_map->underlay_prefix))));
250 netdev_dbg(vxlan->dev, "vfbr: daddr %x ul %x dst %x\n",
251 daddr, underlay, va->sin.sin_addr.s_addr);
252
253 return 0;
254 }
255
256 #if IS_ENABLED(CONFIG_IPV6)
257 static inline
258 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
259 {
260 if (a->sa.sa_family != b->sa.sa_family)
261 return false;
262 if (a->sa.sa_family == AF_INET6)
263 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
264 else
265 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
266 }
267
268 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
269 {
270 if (ipa->sa.sa_family == AF_INET6)
271 return ipv6_addr_any(&ipa->sin6.sin6_addr);
272 else
273 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
274 }
275
276 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
277 {
278 if (ipa->sa.sa_family == AF_INET6)
279 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
280 else
281 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
282 }
283
284 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
285 {
286 if (nla_len(nla) >= sizeof(struct in6_addr)) {
287 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
288 ip->sa.sa_family = AF_INET6;
289 return 0;
290 } else if (nla_len(nla) >= sizeof(__be32)) {
291 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
292 ip->sa.sa_family = AF_INET;
293 return 0;
294 } else {
295 return -EAFNOSUPPORT;
296 }
297 }
298
299 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
300 const union vxlan_addr *ip)
301 {
302 if (ip->sa.sa_family == AF_INET6)
303 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
304 else
305 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
306 }
307
308 #else /* !CONFIG_IPV6 */
309
310 static inline
311 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
312 {
313 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
314 }
315
316 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
317 {
318 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
319 }
320
321 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
322 {
323 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
324 }
325
326 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
327 {
328 if (nla_len(nla) >= sizeof(struct in6_addr)) {
329 return -EAFNOSUPPORT;
330 } else if (nla_len(nla) >= sizeof(__be32)) {
331 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
332 ip->sa.sa_family = AF_INET;
333 return 0;
334 } else {
335 return -EAFNOSUPPORT;
336 }
337 }
338
339 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
340 const union vxlan_addr *ip)
341 {
342 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
343 }
344 #endif
345
346 /* Virtual Network hash table head */
347 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
348 {
349 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
350 }
351
352 /* Socket hash table head */
353 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
354 {
355 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
356
357 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
358 }
359
360 /* First remote destination for a forwarding entry.
361 * Guaranteed to be non-NULL because remotes are never deleted.
362 */
363 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
364 {
365 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
366 }
367
368 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
369 {
370 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
371 }
372
373 /* Find VXLAN socket based on network namespace, address family and UDP port
374 * and enabled unshareable flags.
375 */
376 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
377 __be16 port, u32 flags)
378 {
379 struct vxlan_sock *vs;
380
381 flags &= VXLAN_F_RCV_FLAGS;
382
383 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
384 if (inet_sk(vs->sock->sk)->inet_sport == port &&
385 vxlan_get_sk_family(vs) == family &&
386 vs->flags == flags)
387 return vs;
388 }
389 return NULL;
390 }
391
392 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
393 __be32 vni)
394 {
395 struct vxlan_dev_node *node;
396
397 /* For flow based devices, map all packets to VNI 0 */
398 if (vs->flags & VXLAN_F_COLLECT_METADATA)
399 vni = 0;
400
401 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
402 if (node->vxlan->default_dst.remote_vni != vni)
403 continue;
404
405 if (IS_ENABLED(CONFIG_IPV6)) {
406 const struct vxlan_config *cfg = &node->vxlan->cfg;
407
408 if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
409 cfg->remote_ifindex != ifindex)
410 continue;
411 }
412
413 return node->vxlan;
414 }
415
416 return NULL;
417 }
418
419 /* Look up VNI in a per net namespace table */
420 static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
421 __be32 vni, sa_family_t family,
422 __be16 port, u32 flags)
423 {
424 struct vxlan_sock *vs;
425
426 vs = vxlan_find_sock(net, family, port, flags);
427 if (!vs)
428 return NULL;
429
430 return vxlan_vs_find_vni(vs, ifindex, vni);
431 }
432
433 /* Fill in neighbour message in skbuff. */
434 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
435 const struct vxlan_fdb *fdb,
436 u32 portid, u32 seq, int type, unsigned int flags,
437 const struct vxlan_rdst *rdst)
438 {
439 unsigned long now = jiffies;
440 struct nda_cacheinfo ci;
441 struct nlmsghdr *nlh;
442 struct ndmsg *ndm;
443 bool send_ip, send_eth;
444
445 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
446 if (nlh == NULL)
447 return -EMSGSIZE;
448
449 ndm = nlmsg_data(nlh);
450 memset(ndm, 0, sizeof(*ndm));
451
452 send_eth = send_ip = true;
453
454 if (type == RTM_GETNEIGH) {
455 send_ip = !vxlan_addr_any(&rdst->remote_ip);
456 send_eth = !is_zero_ether_addr(fdb->eth_addr);
457 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
458 } else
459 ndm->ndm_family = AF_BRIDGE;
460 ndm->ndm_state = fdb->state;
461 ndm->ndm_ifindex = vxlan->dev->ifindex;
462 ndm->ndm_flags = fdb->flags;
463 ndm->ndm_type = RTN_UNICAST;
464
465 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
466 nla_put_s32(skb, NDA_LINK_NETNSID,
467 peernet2id(dev_net(vxlan->dev), vxlan->net)))
468 goto nla_put_failure;
469
470 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
471 goto nla_put_failure;
472
473 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
474 goto nla_put_failure;
475
476 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
477 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
478 goto nla_put_failure;
479 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
480 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
481 goto nla_put_failure;
482 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
483 nla_put_u32(skb, NDA_SRC_VNI,
484 be32_to_cpu(fdb->vni)))
485 goto nla_put_failure;
486 if (rdst->remote_ifindex &&
487 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
488 goto nla_put_failure;
489
490 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
491 ci.ndm_confirmed = 0;
492 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
493 ci.ndm_refcnt = 0;
494
495 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
496 goto nla_put_failure;
497
498 nlmsg_end(skb, nlh);
499 return 0;
500
501 nla_put_failure:
502 nlmsg_cancel(skb, nlh);
503 return -EMSGSIZE;
504 }
505
506 static inline size_t vxlan_nlmsg_size(void)
507 {
508 return NLMSG_ALIGN(sizeof(struct ndmsg))
509 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
510 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
511 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
512 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
513 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
514 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
515 + nla_total_size(sizeof(struct nda_cacheinfo));
516 }
517
518 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
519 struct vxlan_rdst *rd, int type)
520 {
521 struct net *net = dev_net(vxlan->dev);
522 struct sk_buff *skb;
523 int err = -ENOBUFS;
524
525 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
526 if (skb == NULL)
527 goto errout;
528
529 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
530 if (err < 0) {
531 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
532 WARN_ON(err == -EMSGSIZE);
533 kfree_skb(skb);
534 goto errout;
535 }
536
537 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
538 return;
539 errout:
540 if (err < 0)
541 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
542 }
543
544 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
545 {
546 struct vxlan_dev *vxlan = netdev_priv(dev);
547 struct vxlan_fdb f = {
548 .state = NUD_STALE,
549 };
550 struct vxlan_rdst remote = {
551 .remote_ip = *ipa, /* goes to NDA_DST */
552 .remote_vni = cpu_to_be32(VXLAN_N_VID),
553 };
554
555 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
556 }
557
558 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
559 {
560 struct vxlan_fdb f = {
561 .state = NUD_STALE,
562 };
563 struct vxlan_rdst remote = { };
564
565 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
566
567 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
568 }
569
570 /* Hash Ethernet address */
571 static u32 eth_hash(const unsigned char *addr)
572 {
573 u64 value = get_unaligned((u64 *)addr);
574
575 /* only want 6 bytes */
576 #ifdef __BIG_ENDIAN
577 value >>= 16;
578 #else
579 value <<= 16;
580 #endif
581 return hash_64(value, FDB_HASH_BITS);
582 }
583
584 static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
585 {
586 /* use 1 byte of OUI and 3 bytes of NIC */
587 u32 key = get_unaligned((u32 *)(addr + 2));
588
589 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
590 }
591
592 /* Hash chain to use given mac address */
593 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
594 const u8 *mac, __be32 vni)
595 {
596 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
597 return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
598 else
599 return &vxlan->fdb_head[eth_hash(mac)];
600 }
601
602 /* Look up Ethernet address in forwarding table */
603 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
604 const u8 *mac, __be32 vni)
605 {
606 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
607 struct vxlan_fdb *f;
608
609 hlist_for_each_entry_rcu(f, head, hlist) {
610 if (ether_addr_equal(mac, f->eth_addr)) {
611 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
612 if (vni == f->vni)
613 return f;
614 } else {
615 return f;
616 }
617 }
618 }
619
620 return NULL;
621 }
622
623 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
624 const u8 *mac, __be32 vni)
625 {
626 struct vxlan_fdb *f;
627
628 f = __vxlan_find_mac(vxlan, mac, vni);
629 if (f)
630 f->used = jiffies;
631
632 return f;
633 }
634
635 /* caller should hold vxlan->hash_lock */
636 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
637 union vxlan_addr *ip, __be16 port,
638 __be32 vni, __u32 ifindex)
639 {
640 struct vxlan_rdst *rd;
641
642 list_for_each_entry(rd, &f->remotes, list) {
643 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
644 rd->remote_port == port &&
645 rd->remote_vni == vni &&
646 rd->remote_ifindex == ifindex)
647 return rd;
648 }
649
650 return NULL;
651 }
652
653 /* Replace destination of unicast mac */
654 static int vxlan_fdb_replace(struct vxlan_fdb *f,
655 union vxlan_addr *ip, __be16 port, __be32 vni,
656 __u32 ifindex)
657 {
658 struct vxlan_rdst *rd;
659
660 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
661 if (rd)
662 return 0;
663
664 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
665 if (!rd)
666 return 0;
667
668 dst_cache_reset(&rd->dst_cache);
669 rd->remote_ip = *ip;
670 rd->remote_port = port;
671 rd->remote_vni = vni;
672 rd->remote_ifindex = ifindex;
673 return 1;
674 }
675
676 /* Add/update destinations for multicast */
677 static int vxlan_fdb_append(struct vxlan_fdb *f,
678 union vxlan_addr *ip, __be16 port, __be32 vni,
679 __u32 ifindex, struct vxlan_rdst **rdp)
680 {
681 struct vxlan_rdst *rd;
682
683 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
684 if (rd)
685 return 0;
686
687 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
688 if (rd == NULL)
689 return -ENOBUFS;
690
691 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
692 kfree(rd);
693 return -ENOBUFS;
694 }
695
696 rd->remote_ip = *ip;
697 rd->remote_port = port;
698 rd->remote_vni = vni;
699 rd->remote_ifindex = ifindex;
700
701 list_add_tail_rcu(&rd->list, &f->remotes);
702
703 *rdp = rd;
704 return 1;
705 }
706
707 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
708 unsigned int off,
709 struct vxlanhdr *vh, size_t hdrlen,
710 __be32 vni_field,
711 struct gro_remcsum *grc,
712 bool nopartial)
713 {
714 size_t start, offset;
715
716 if (skb->remcsum_offload)
717 return vh;
718
719 if (!NAPI_GRO_CB(skb)->csum_valid)
720 return NULL;
721
722 start = vxlan_rco_start(vni_field);
723 offset = start + vxlan_rco_offset(vni_field);
724
725 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
726 start, offset, grc, nopartial);
727
728 skb->remcsum_offload = 1;
729
730 return vh;
731 }
732
733 static struct sk_buff **vxlan_gro_receive(struct sock *sk,
734 struct sk_buff **head,
735 struct sk_buff *skb)
736 {
737 struct sk_buff *p, **pp = NULL;
738 struct vxlanhdr *vh, *vh2;
739 unsigned int hlen, off_vx;
740 int flush = 1;
741 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
742 __be32 flags;
743 struct gro_remcsum grc;
744
745 skb_gro_remcsum_init(&grc);
746
747 off_vx = skb_gro_offset(skb);
748 hlen = off_vx + sizeof(*vh);
749 vh = skb_gro_header_fast(skb, off_vx);
750 if (skb_gro_header_hard(skb, hlen)) {
751 vh = skb_gro_header_slow(skb, hlen, off_vx);
752 if (unlikely(!vh))
753 goto out;
754 }
755
756 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
757
758 flags = vh->vx_flags;
759
760 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
761 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
762 vh->vx_vni, &grc,
763 !!(vs->flags &
764 VXLAN_F_REMCSUM_NOPARTIAL));
765
766 if (!vh)
767 goto out;
768 }
769
770 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
771
772 for (p = *head; p; p = p->next) {
773 if (!NAPI_GRO_CB(p)->same_flow)
774 continue;
775
776 vh2 = (struct vxlanhdr *)(p->data + off_vx);
777 if (vh->vx_flags != vh2->vx_flags ||
778 vh->vx_vni != vh2->vx_vni) {
779 NAPI_GRO_CB(p)->same_flow = 0;
780 continue;
781 }
782 }
783
784 pp = call_gro_receive(eth_gro_receive, head, skb);
785 flush = 0;
786
787 out:
788 skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
789
790 return pp;
791 }
792
793 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
794 {
795 /* Sets 'skb->inner_mac_header' since we are always called with
796 * 'skb->encapsulation' set.
797 */
798 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
799 }
800
801 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
802 const u8 *mac, __u16 state,
803 __be32 src_vni, __u8 ndm_flags)
804 {
805 struct vxlan_fdb *f;
806
807 f = kmalloc(sizeof(*f), GFP_ATOMIC);
808 if (!f)
809 return NULL;
810 f->state = state;
811 f->flags = ndm_flags;
812 f->updated = f->used = jiffies;
813 f->vni = src_vni;
814 INIT_LIST_HEAD(&f->remotes);
815 memcpy(f->eth_addr, mac, ETH_ALEN);
816
817 return f;
818 }
819
820 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
821 const u8 *mac, union vxlan_addr *ip,
822 __u16 state, __be16 port, __be32 src_vni,
823 __be32 vni, __u32 ifindex, __u8 ndm_flags,
824 struct vxlan_fdb **fdb)
825 {
826 struct vxlan_rdst *rd = NULL;
827 struct vxlan_fdb *f;
828 int rc;
829
830 if (vxlan->cfg.addrmax &&
831 vxlan->addrcnt >= vxlan->cfg.addrmax)
832 return -ENOSPC;
833
834 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
835 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
836 if (!f)
837 return -ENOMEM;
838
839 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
840 if (rc < 0) {
841 kfree(f);
842 return rc;
843 }
844
845 ++vxlan->addrcnt;
846 hlist_add_head_rcu(&f->hlist,
847 vxlan_fdb_head(vxlan, mac, src_vni));
848
849 *fdb = f;
850
851 return 0;
852 }
853
854 /* Add new entry to forwarding table -- assumes lock held */
855 static int vxlan_fdb_update(struct vxlan_dev *vxlan,
856 const u8 *mac, union vxlan_addr *ip,
857 __u16 state, __u16 flags,
858 __be16 port, __be32 src_vni, __be32 vni,
859 __u32 ifindex, __u8 ndm_flags)
860 {
861 struct vxlan_rdst *rd = NULL;
862 struct vxlan_fdb *f;
863 int notify = 0;
864 int rc;
865
866 f = __vxlan_find_mac(vxlan, mac, src_vni);
867 if (f) {
868 if (flags & NLM_F_EXCL) {
869 netdev_dbg(vxlan->dev,
870 "lost race to create %pM\n", mac);
871 return -EEXIST;
872 }
873 if (f->state != state) {
874 f->state = state;
875 f->updated = jiffies;
876 notify = 1;
877 }
878 if (f->flags != ndm_flags) {
879 f->flags = ndm_flags;
880 f->updated = jiffies;
881 notify = 1;
882 }
883 if ((flags & NLM_F_REPLACE)) {
884 /* Only change unicasts */
885 if (!(is_multicast_ether_addr(f->eth_addr) ||
886 is_zero_ether_addr(f->eth_addr))) {
887 notify |= vxlan_fdb_replace(f, ip, port, vni,
888 ifindex);
889 } else
890 return -EOPNOTSUPP;
891 }
892 if ((flags & NLM_F_APPEND) &&
893 (is_multicast_ether_addr(f->eth_addr) ||
894 is_zero_ether_addr(f->eth_addr))) {
895 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
896
897 if (rc < 0)
898 return rc;
899 notify |= rc;
900 }
901 } else {
902 if (!(flags & NLM_F_CREATE))
903 return -ENOENT;
904
905 /* Disallow replace to add a multicast entry */
906 if ((flags & NLM_F_REPLACE) &&
907 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
908 return -EOPNOTSUPP;
909
910 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
911 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
912 vni, ifindex, ndm_flags, &f);
913 if (rc < 0)
914 return rc;
915 notify = 1;
916 }
917
918 if (notify) {
919 if (rd == NULL)
920 rd = first_remote_rtnl(f);
921 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
922 }
923
924 return 0;
925 }
926
927 static void vxlan_fdb_free(struct rcu_head *head)
928 {
929 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
930 struct vxlan_rdst *rd, *nd;
931
932 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
933 dst_cache_destroy(&rd->dst_cache);
934 kfree(rd);
935 }
936 kfree(f);
937 }
938
939 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
940 bool do_notify)
941 {
942 netdev_dbg(vxlan->dev,
943 "delete %pM\n", f->eth_addr);
944
945 --vxlan->addrcnt;
946 if (do_notify)
947 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
948
949 hlist_del_rcu(&f->hlist);
950 call_rcu(&f->rcu, vxlan_fdb_free);
951 }
952
953 static void vxlan_dst_free(struct rcu_head *head)
954 {
955 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
956
957 dst_cache_destroy(&rd->dst_cache);
958 kfree(rd);
959 }
960
961 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
962 struct vxlan_rdst *rd)
963 {
964 list_del_rcu(&rd->list);
965 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
966 call_rcu(&rd->rcu, vxlan_dst_free);
967 }
968
969 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
970 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
971 __be32 *vni, u32 *ifindex)
972 {
973 struct net *net = dev_net(vxlan->dev);
974 int err;
975
976 if (tb[NDA_DST]) {
977 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
978 if (err)
979 return err;
980 } else {
981 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
982 if (remote->sa.sa_family == AF_INET) {
983 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
984 ip->sa.sa_family = AF_INET;
985 #if IS_ENABLED(CONFIG_IPV6)
986 } else {
987 ip->sin6.sin6_addr = in6addr_any;
988 ip->sa.sa_family = AF_INET6;
989 #endif
990 }
991 }
992
993 if (tb[NDA_PORT]) {
994 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
995 return -EINVAL;
996 *port = nla_get_be16(tb[NDA_PORT]);
997 } else {
998 *port = vxlan->cfg.dst_port;
999 }
1000
1001 if (tb[NDA_VNI]) {
1002 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
1003 return -EINVAL;
1004 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
1005 } else {
1006 *vni = vxlan->default_dst.remote_vni;
1007 }
1008
1009 if (tb[NDA_SRC_VNI]) {
1010 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
1011 return -EINVAL;
1012 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
1013 } else {
1014 *src_vni = vxlan->default_dst.remote_vni;
1015 }
1016
1017 if (tb[NDA_IFINDEX]) {
1018 struct net_device *tdev;
1019
1020 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
1021 return -EINVAL;
1022 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
1023 tdev = __dev_get_by_index(net, *ifindex);
1024 if (!tdev)
1025 return -EADDRNOTAVAIL;
1026 } else {
1027 *ifindex = 0;
1028 }
1029
1030 return 0;
1031 }
1032
1033 /* Add static entry (via netlink) */
1034 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1035 struct net_device *dev,
1036 const unsigned char *addr, u16 vid, u16 flags)
1037 {
1038 struct vxlan_dev *vxlan = netdev_priv(dev);
1039 /* struct net *net = dev_net(vxlan->dev); */
1040 union vxlan_addr ip;
1041 __be16 port;
1042 __be32 src_vni, vni;
1043 u32 ifindex;
1044 int err;
1045
1046 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
1047 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
1048 ndm->ndm_state);
1049 return -EINVAL;
1050 }
1051
1052 if (tb[NDA_DST] == NULL)
1053 return -EINVAL;
1054
1055 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
1056 if (err)
1057 return err;
1058
1059 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
1060 return -EAFNOSUPPORT;
1061
1062 spin_lock_bh(&vxlan->hash_lock);
1063 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
1064 port, src_vni, vni, ifindex, ndm->ndm_flags);
1065 spin_unlock_bh(&vxlan->hash_lock);
1066
1067 return err;
1068 }
1069
1070 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
1071 const unsigned char *addr, union vxlan_addr ip,
1072 __be16 port, __be32 src_vni, __be32 vni,
1073 u32 ifindex, u16 vid)
1074 {
1075 struct vxlan_fdb *f;
1076 struct vxlan_rdst *rd = NULL;
1077 int err = -ENOENT;
1078
1079 f = vxlan_find_mac(vxlan, addr, src_vni);
1080 if (!f)
1081 return err;
1082
1083 if (!vxlan_addr_any(&ip)) {
1084 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
1085 if (!rd)
1086 goto out;
1087 }
1088
1089 /* remove a destination if it's not the only one on the list,
1090 * otherwise destroy the fdb entry
1091 */
1092 if (rd && !list_is_singular(&f->remotes)) {
1093 vxlan_fdb_dst_destroy(vxlan, f, rd);
1094 goto out;
1095 }
1096
1097 vxlan_fdb_destroy(vxlan, f, true);
1098
1099 out:
1100 return 0;
1101 }
1102
1103 /* Delete entry (via netlink) */
1104 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1105 struct net_device *dev,
1106 const unsigned char *addr, u16 vid)
1107 {
1108 struct vxlan_dev *vxlan = netdev_priv(dev);
1109 union vxlan_addr ip;
1110 __be32 src_vni, vni;
1111 __be16 port;
1112 u32 ifindex;
1113 int err;
1114
1115 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
1116 if (err)
1117 return err;
1118
1119 spin_lock_bh(&vxlan->hash_lock);
1120 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
1121 vid);
1122 spin_unlock_bh(&vxlan->hash_lock);
1123
1124 return err;
1125 }
1126
1127 /* Dump forwarding table */
1128 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
1129 struct net_device *dev,
1130 struct net_device *filter_dev, int *idx)
1131 {
1132 struct vxlan_dev *vxlan = netdev_priv(dev);
1133 unsigned int h;
1134 int err = 0;
1135
1136 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1137 struct vxlan_fdb *f;
1138
1139 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
1140 struct vxlan_rdst *rd;
1141
1142 list_for_each_entry_rcu(rd, &f->remotes, list) {
1143 if (*idx < cb->args[2])
1144 goto skip;
1145
1146 err = vxlan_fdb_info(skb, vxlan, f,
1147 NETLINK_CB(cb->skb).portid,
1148 cb->nlh->nlmsg_seq,
1149 RTM_NEWNEIGH,
1150 NLM_F_MULTI, rd);
1151 if (err < 0)
1152 goto out;
1153 skip:
1154 *idx += 1;
1155 }
1156 }
1157 }
1158 out:
1159 return err;
1160 }
1161
1162 /* Watch incoming packets to learn mapping between Ethernet address
1163 * and Tunnel endpoint.
1164 * Return true if packet is bogus and should be dropped.
1165 */
1166 static bool vxlan_snoop(struct net_device *dev,
1167 union vxlan_addr *src_ip, const u8 *src_mac,
1168 u32 src_ifindex, __be32 vni)
1169 {
1170 struct vxlan_dev *vxlan = netdev_priv(dev);
1171 struct vxlan_fdb *f;
1172 u32 ifindex = 0;
1173
1174 #if IS_ENABLED(CONFIG_IPV6)
1175 if (src_ip->sa.sa_family == AF_INET6 &&
1176 (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
1177 ifindex = src_ifindex;
1178 #endif
1179
1180 f = vxlan_find_mac(vxlan, src_mac, vni);
1181 if (likely(f)) {
1182 struct vxlan_rdst *rdst = first_remote_rcu(f);
1183
1184 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
1185 rdst->remote_ifindex == ifindex))
1186 return false;
1187
1188 /* Don't migrate static entries, drop packets */
1189 if (f->state & (NUD_PERMANENT | NUD_NOARP))
1190 return true;
1191
1192 if (net_ratelimit())
1193 netdev_info(dev,
1194 "%pM migrated from %pIS to %pIS\n",
1195 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1196
1197 rdst->remote_ip = *src_ip;
1198 f->updated = jiffies;
1199 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1200 } else {
1201 /* learned new entry */
1202 spin_lock(&vxlan->hash_lock);
1203
1204 /* close off race between vxlan_flush and incoming packets */
1205 if (netif_running(dev))
1206 vxlan_fdb_update(vxlan, src_mac, src_ip,
1207 NUD_REACHABLE,
1208 NLM_F_EXCL|NLM_F_CREATE,
1209 vxlan->cfg.dst_port,
1210 vni,
1211 vxlan->default_dst.remote_vni,
1212 ifindex, NTF_SELF);
1213 spin_unlock(&vxlan->hash_lock);
1214 }
1215
1216 return false;
1217 }
1218
1219 /* See if multicast group is already in use by other ID */
1220 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1221 {
1222 struct vxlan_dev *vxlan;
1223 struct vxlan_sock *sock4;
1224 #if IS_ENABLED(CONFIG_IPV6)
1225 struct vxlan_sock *sock6;
1226 #endif
1227 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1228
1229 sock4 = rtnl_dereference(dev->vn4_sock);
1230
1231 /* The vxlan_sock is only used by dev, leaving group has
1232 * no effect on other vxlan devices.
1233 */
1234 if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
1235 return false;
1236 #if IS_ENABLED(CONFIG_IPV6)
1237 sock6 = rtnl_dereference(dev->vn6_sock);
1238 if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
1239 return false;
1240 #endif
1241
1242 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1243 if (!netif_running(vxlan->dev) || vxlan == dev)
1244 continue;
1245
1246 if (family == AF_INET &&
1247 rtnl_dereference(vxlan->vn4_sock) != sock4)
1248 continue;
1249 #if IS_ENABLED(CONFIG_IPV6)
1250 if (family == AF_INET6 &&
1251 rtnl_dereference(vxlan->vn6_sock) != sock6)
1252 continue;
1253 #endif
1254
1255 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1256 &dev->default_dst.remote_ip))
1257 continue;
1258
1259 if (vxlan->default_dst.remote_ifindex !=
1260 dev->default_dst.remote_ifindex)
1261 continue;
1262
1263 return true;
1264 }
1265
1266 return false;
1267 }
1268
1269 static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1270 {
1271 struct vxlan_net *vn;
1272
1273 if (!vs)
1274 return false;
1275 if (!refcount_dec_and_test(&vs->refcnt))
1276 return false;
1277
1278 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1279 spin_lock(&vn->sock_lock);
1280 hlist_del_rcu(&vs->hlist);
1281 udp_tunnel_notify_del_rx_port(vs->sock,
1282 (vs->flags & VXLAN_F_GPE) ?
1283 UDP_TUNNEL_TYPE_VXLAN_GPE :
1284 UDP_TUNNEL_TYPE_VXLAN);
1285 spin_unlock(&vn->sock_lock);
1286
1287 return true;
1288 }
1289
1290 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1291 {
1292 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1293 #if IS_ENABLED(CONFIG_IPV6)
1294 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1295
1296 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
1297 #endif
1298
1299 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
1300 synchronize_net();
1301
1302 vxlan_vs_del_dev(vxlan);
1303
1304 if (__vxlan_sock_release_prep(sock4)) {
1305 udp_tunnel_sock_release(sock4->sock);
1306 kfree(sock4);
1307 }
1308
1309 #if IS_ENABLED(CONFIG_IPV6)
1310 if (__vxlan_sock_release_prep(sock6)) {
1311 udp_tunnel_sock_release(sock6->sock);
1312 kfree(sock6);
1313 }
1314 #endif
1315 }
1316
1317 /* Update multicast group membership when first VNI on
1318 * multicast address is brought up
1319 */
1320 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1321 {
1322 struct sock *sk;
1323 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1324 int ifindex = vxlan->default_dst.remote_ifindex;
1325 int ret = -EINVAL;
1326
1327 if (ip->sa.sa_family == AF_INET) {
1328 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1329 struct ip_mreqn mreq = {
1330 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1331 .imr_ifindex = ifindex,
1332 };
1333
1334 sk = sock4->sock->sk;
1335 lock_sock(sk);
1336 ret = ip_mc_join_group(sk, &mreq);
1337 release_sock(sk);
1338 #if IS_ENABLED(CONFIG_IPV6)
1339 } else {
1340 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1341
1342 sk = sock6->sock->sk;
1343 lock_sock(sk);
1344 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1345 &ip->sin6.sin6_addr);
1346 release_sock(sk);
1347 #endif
1348 }
1349
1350 return ret;
1351 }
1352
1353 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1354 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1355 {
1356 struct sock *sk;
1357 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1358 int ifindex = vxlan->default_dst.remote_ifindex;
1359 int ret = -EINVAL;
1360
1361 if (ip->sa.sa_family == AF_INET) {
1362 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1363 struct ip_mreqn mreq = {
1364 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1365 .imr_ifindex = ifindex,
1366 };
1367
1368 sk = sock4->sock->sk;
1369 lock_sock(sk);
1370 ret = ip_mc_leave_group(sk, &mreq);
1371 release_sock(sk);
1372 #if IS_ENABLED(CONFIG_IPV6)
1373 } else {
1374 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1375
1376 sk = sock6->sock->sk;
1377 lock_sock(sk);
1378 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1379 &ip->sin6.sin6_addr);
1380 release_sock(sk);
1381 #endif
1382 }
1383
1384 return ret;
1385 }
1386
1387 static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1388 struct sk_buff *skb, u32 vxflags)
1389 {
1390 size_t start, offset;
1391
1392 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1393 goto out;
1394
1395 start = vxlan_rco_start(unparsed->vx_vni);
1396 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1397
1398 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1399 return false;
1400
1401 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1402 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1403 out:
1404 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1405 unparsed->vx_vni &= VXLAN_VNI_MASK;
1406 return true;
1407 }
1408
1409 static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1410 struct sk_buff *skb, u32 vxflags,
1411 struct vxlan_metadata *md)
1412 {
1413 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1414 struct metadata_dst *tun_dst;
1415
1416 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1417 goto out;
1418
1419 md->gbp = ntohs(gbp->policy_id);
1420
1421 tun_dst = (struct metadata_dst *)skb_dst(skb);
1422 if (tun_dst) {
1423 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1424 tun_dst->u.tun_info.options_len = sizeof(*md);
1425 }
1426 if (gbp->dont_learn)
1427 md->gbp |= VXLAN_GBP_DONT_LEARN;
1428
1429 if (gbp->policy_applied)
1430 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1431
1432 /* In flow-based mode, GBP is carried in dst_metadata */
1433 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1434 skb->mark = md->gbp;
1435 out:
1436 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1437 }
1438
1439 static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1440 __be16 *protocol,
1441 struct sk_buff *skb, u32 vxflags)
1442 {
1443 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1444
1445 /* Need to have Next Protocol set for interfaces in GPE mode. */
1446 if (!gpe->np_applied)
1447 return false;
1448 /* "The initial version is 0. If a receiver does not support the
1449 * version indicated it MUST drop the packet.
1450 */
1451 if (gpe->version != 0)
1452 return false;
1453 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1454 * processing MUST occur." However, we don't implement OAM
1455 * processing, thus drop the packet.
1456 */
1457 if (gpe->oam_flag)
1458 return false;
1459
1460 *protocol = tun_p_to_eth_p(gpe->next_protocol);
1461 if (!*protocol)
1462 return false;
1463
1464 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1465 return true;
1466 }
1467
1468 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1469 struct vxlan_sock *vs,
1470 struct sk_buff *skb, __be32 vni)
1471 {
1472 union vxlan_addr saddr;
1473 u32 ifindex = skb->dev->ifindex;
1474
1475 skb_reset_mac_header(skb);
1476 skb->protocol = eth_type_trans(skb, vxlan->dev);
1477 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1478
1479 /* Ignore packet loops (and multicast echo) */
1480 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1481 return false;
1482
1483 /* Get address from the outer IP header */
1484 if (vxlan_get_sk_family(vs) == AF_INET) {
1485 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1486 saddr.sa.sa_family = AF_INET;
1487 #if IS_ENABLED(CONFIG_IPV6)
1488 } else {
1489 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1490 saddr.sa.sa_family = AF_INET6;
1491 #endif
1492 }
1493
1494 if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
1495 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
1496 return false;
1497
1498 return true;
1499 }
1500
1501 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1502 struct sk_buff *skb)
1503 {
1504 int err = 0;
1505
1506 if (vxlan_get_sk_family(vs) == AF_INET)
1507 err = IP_ECN_decapsulate(oiph, skb);
1508 #if IS_ENABLED(CONFIG_IPV6)
1509 else
1510 err = IP6_ECN_decapsulate(oiph, skb);
1511 #endif
1512
1513 if (unlikely(err) && log_ecn_error) {
1514 if (vxlan_get_sk_family(vs) == AF_INET)
1515 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1516 &((struct iphdr *)oiph)->saddr,
1517 ((struct iphdr *)oiph)->tos);
1518 else
1519 net_info_ratelimited("non-ECT from %pI6\n",
1520 &((struct ipv6hdr *)oiph)->saddr);
1521 }
1522 return err <= 1;
1523 }
1524
1525 /* Callback from net/ipv4/udp.c to receive packets */
1526 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1527 {
1528 struct pcpu_sw_netstats *stats;
1529 struct vxlan_dev *vxlan;
1530 struct vxlan_sock *vs;
1531 struct vxlanhdr unparsed;
1532 struct vxlan_metadata _md;
1533 struct vxlan_metadata *md = &_md;
1534 __be16 protocol = htons(ETH_P_TEB);
1535 bool raw_proto = false;
1536 void *oiph;
1537 __be32 vni = 0;
1538
1539 /* Need UDP and VXLAN header to be present */
1540 if (!pskb_may_pull(skb, VXLAN_HLEN))
1541 goto drop;
1542
1543 unparsed = *vxlan_hdr(skb);
1544 /* VNI flag always required to be set */
1545 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1546 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1547 ntohl(vxlan_hdr(skb)->vx_flags),
1548 ntohl(vxlan_hdr(skb)->vx_vni));
1549 /* Return non vxlan pkt */
1550 goto drop;
1551 }
1552 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1553 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1554
1555 vs = rcu_dereference_sk_user_data(sk);
1556 if (!vs)
1557 goto drop;
1558
1559 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1560
1561 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
1562 if (!vxlan)
1563 goto drop;
1564
1565 /* For backwards compatibility, only allow reserved fields to be
1566 * used by VXLAN extensions if explicitly requested.
1567 */
1568 if (vs->flags & VXLAN_F_GPE) {
1569 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1570 goto drop;
1571 raw_proto = true;
1572 }
1573
1574 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1575 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1576 goto drop;
1577
1578 if (vxlan_collect_metadata(vs)) {
1579 struct metadata_dst *tun_dst;
1580
1581 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1582 key32_to_tunnel_id(vni), sizeof(*md));
1583
1584 if (!tun_dst)
1585 goto drop;
1586
1587 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1588
1589 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1590 } else {
1591 memset(md, 0, sizeof(*md));
1592 }
1593
1594 if (vs->flags & VXLAN_F_REMCSUM_RX)
1595 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1596 goto drop;
1597 if (vs->flags & VXLAN_F_GBP)
1598 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1599 /* Note that GBP and GPE can never be active together. This is
1600 * ensured in vxlan_dev_configure.
1601 */
1602
1603 if (unparsed.vx_flags || unparsed.vx_vni) {
1604 /* If there are any unprocessed flags remaining treat
1605 * this as a malformed packet. This behavior diverges from
1606 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1607 * in reserved fields are to be ignored. The approach here
1608 * maintains compatibility with previous stack code, and also
1609 * is more robust and provides a little more security in
1610 * adding extensions to VXLAN.
1611 */
1612 goto drop;
1613 }
1614
1615 if (!raw_proto) {
1616 if (!vxlan_set_mac(vxlan, vs, skb, vni))
1617 goto drop;
1618 } else {
1619 skb_reset_mac_header(skb);
1620 skb->dev = vxlan->dev;
1621 skb->pkt_type = PACKET_HOST;
1622 }
1623
1624 oiph = skb_network_header(skb);
1625 skb_reset_network_header(skb);
1626
1627 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1628 ++vxlan->dev->stats.rx_frame_errors;
1629 ++vxlan->dev->stats.rx_errors;
1630 goto drop;
1631 }
1632
1633 rcu_read_lock();
1634
1635 if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
1636 rcu_read_unlock();
1637 atomic_long_inc(&vxlan->dev->rx_dropped);
1638 goto drop;
1639 }
1640
1641 stats = this_cpu_ptr(vxlan->dev->tstats);
1642 u64_stats_update_begin(&stats->syncp);
1643 stats->rx_packets++;
1644 stats->rx_bytes += skb->len;
1645 u64_stats_update_end(&stats->syncp);
1646
1647 gro_cells_receive(&vxlan->gro_cells, skb);
1648
1649 rcu_read_unlock();
1650
1651 return 0;
1652
1653 drop:
1654 /* Consume bad packet */
1655 kfree_skb(skb);
1656 return 0;
1657 }
1658
1659 static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1660 {
1661 struct vxlan_dev *vxlan = netdev_priv(dev);
1662 struct arphdr *parp;
1663 u8 *arpptr, *sha;
1664 __be32 sip, tip;
1665 struct neighbour *n;
1666
1667 if (dev->flags & IFF_NOARP)
1668 goto out;
1669
1670 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1671 dev->stats.tx_dropped++;
1672 goto out;
1673 }
1674 parp = arp_hdr(skb);
1675
1676 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1677 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1678 parp->ar_pro != htons(ETH_P_IP) ||
1679 parp->ar_op != htons(ARPOP_REQUEST) ||
1680 parp->ar_hln != dev->addr_len ||
1681 parp->ar_pln != 4)
1682 goto out;
1683 arpptr = (u8 *)parp + sizeof(struct arphdr);
1684 sha = arpptr;
1685 arpptr += dev->addr_len; /* sha */
1686 memcpy(&sip, arpptr, sizeof(sip));
1687 arpptr += sizeof(sip);
1688 arpptr += dev->addr_len; /* tha */
1689 memcpy(&tip, arpptr, sizeof(tip));
1690
1691 if (ipv4_is_loopback(tip) ||
1692 ipv4_is_multicast(tip))
1693 goto out;
1694
1695 n = neigh_lookup(&arp_tbl, &tip, dev);
1696
1697 if (n) {
1698 struct vxlan_fdb *f;
1699 struct sk_buff *reply;
1700
1701 if (!(n->nud_state & NUD_CONNECTED)) {
1702 neigh_release(n);
1703 goto out;
1704 }
1705
1706 f = vxlan_find_mac(vxlan, n->ha, vni);
1707 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1708 /* bridge-local neighbor */
1709 neigh_release(n);
1710 goto out;
1711 }
1712
1713 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1714 n->ha, sha);
1715
1716 neigh_release(n);
1717
1718 if (reply == NULL)
1719 goto out;
1720
1721 skb_reset_mac_header(reply);
1722 __skb_pull(reply, skb_network_offset(reply));
1723 reply->ip_summed = CHECKSUM_UNNECESSARY;
1724 reply->pkt_type = PACKET_HOST;
1725
1726 if (netif_rx_ni(reply) == NET_RX_DROP)
1727 dev->stats.rx_dropped++;
1728 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1729 union vxlan_addr ipa = {
1730 .sin.sin_addr.s_addr = tip,
1731 .sin.sin_family = AF_INET,
1732 };
1733
1734 vxlan_ip_miss(dev, &ipa);
1735 }
1736 out:
1737 consume_skb(skb);
1738 return NETDEV_TX_OK;
1739 }
1740
1741 #if IS_ENABLED(CONFIG_IPV6)
1742 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1743 struct neighbour *n, bool isrouter)
1744 {
1745 struct net_device *dev = request->dev;
1746 struct sk_buff *reply;
1747 struct nd_msg *ns, *na;
1748 struct ipv6hdr *pip6;
1749 u8 *daddr;
1750 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1751 int ns_olen;
1752 int i, len;
1753
1754 if (dev == NULL || !pskb_may_pull(request, request->len))
1755 return NULL;
1756
1757 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1758 sizeof(*na) + na_olen + dev->needed_tailroom;
1759 reply = alloc_skb(len, GFP_ATOMIC);
1760 if (reply == NULL)
1761 return NULL;
1762
1763 reply->protocol = htons(ETH_P_IPV6);
1764 reply->dev = dev;
1765 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1766 skb_push(reply, sizeof(struct ethhdr));
1767 skb_reset_mac_header(reply);
1768
1769 ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
1770
1771 daddr = eth_hdr(request)->h_source;
1772 ns_olen = request->len - skb_network_offset(request) -
1773 sizeof(struct ipv6hdr) - sizeof(*ns);
1774 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1775 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1776 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1777 break;
1778 }
1779 }
1780
1781 /* Ethernet header */
1782 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1783 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1784 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1785 reply->protocol = htons(ETH_P_IPV6);
1786
1787 skb_pull(reply, sizeof(struct ethhdr));
1788 skb_reset_network_header(reply);
1789 skb_put(reply, sizeof(struct ipv6hdr));
1790
1791 /* IPv6 header */
1792
1793 pip6 = ipv6_hdr(reply);
1794 memset(pip6, 0, sizeof(struct ipv6hdr));
1795 pip6->version = 6;
1796 pip6->priority = ipv6_hdr(request)->priority;
1797 pip6->nexthdr = IPPROTO_ICMPV6;
1798 pip6->hop_limit = 255;
1799 pip6->daddr = ipv6_hdr(request)->saddr;
1800 pip6->saddr = *(struct in6_addr *)n->primary_key;
1801
1802 skb_pull(reply, sizeof(struct ipv6hdr));
1803 skb_reset_transport_header(reply);
1804
1805 /* Neighbor Advertisement */
1806 na = skb_put_zero(reply, sizeof(*na) + na_olen);
1807 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1808 na->icmph.icmp6_router = isrouter;
1809 na->icmph.icmp6_override = 1;
1810 na->icmph.icmp6_solicited = 1;
1811 na->target = ns->target;
1812 ether_addr_copy(&na->opt[2], n->ha);
1813 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1814 na->opt[1] = na_olen >> 3;
1815
1816 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1817 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1818 csum_partial(na, sizeof(*na)+na_olen, 0));
1819
1820 pip6->payload_len = htons(sizeof(*na)+na_olen);
1821
1822 skb_push(reply, sizeof(struct ipv6hdr));
1823
1824 reply->ip_summed = CHECKSUM_UNNECESSARY;
1825
1826 return reply;
1827 }
1828
1829 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1830 {
1831 struct vxlan_dev *vxlan = netdev_priv(dev);
1832 const struct in6_addr *daddr;
1833 const struct ipv6hdr *iphdr;
1834 struct inet6_dev *in6_dev;
1835 struct neighbour *n;
1836 struct nd_msg *msg;
1837
1838 in6_dev = __in6_dev_get(dev);
1839 if (!in6_dev)
1840 goto out;
1841
1842 iphdr = ipv6_hdr(skb);
1843 daddr = &iphdr->daddr;
1844 msg = (struct nd_msg *)(iphdr + 1);
1845
1846 if (ipv6_addr_loopback(daddr) ||
1847 ipv6_addr_is_multicast(&msg->target))
1848 goto out;
1849
1850 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1851
1852 if (n) {
1853 struct vxlan_fdb *f;
1854 struct sk_buff *reply;
1855
1856 if (!(n->nud_state & NUD_CONNECTED)) {
1857 neigh_release(n);
1858 goto out;
1859 }
1860
1861 f = vxlan_find_mac(vxlan, n->ha, vni);
1862 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1863 /* bridge-local neighbor */
1864 neigh_release(n);
1865 goto out;
1866 }
1867
1868 reply = vxlan_na_create(skb, n,
1869 !!(f ? f->flags & NTF_ROUTER : 0));
1870
1871 neigh_release(n);
1872
1873 if (reply == NULL)
1874 goto out;
1875
1876 if (netif_rx_ni(reply) == NET_RX_DROP)
1877 dev->stats.rx_dropped++;
1878
1879 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1880 union vxlan_addr ipa = {
1881 .sin6.sin6_addr = msg->target,
1882 .sin6.sin6_family = AF_INET6,
1883 };
1884
1885 vxlan_ip_miss(dev, &ipa);
1886 }
1887
1888 out:
1889 consume_skb(skb);
1890 return NETDEV_TX_OK;
1891 }
1892 #endif
1893
1894 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1895 {
1896 struct vxlan_dev *vxlan = netdev_priv(dev);
1897 struct neighbour *n;
1898
1899 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1900 return false;
1901
1902 n = NULL;
1903 switch (ntohs(eth_hdr(skb)->h_proto)) {
1904 case ETH_P_IP:
1905 {
1906 struct iphdr *pip;
1907
1908 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1909 return false;
1910 pip = ip_hdr(skb);
1911 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1912 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1913 union vxlan_addr ipa = {
1914 .sin.sin_addr.s_addr = pip->daddr,
1915 .sin.sin_family = AF_INET,
1916 };
1917
1918 vxlan_ip_miss(dev, &ipa);
1919 return false;
1920 }
1921
1922 break;
1923 }
1924 #if IS_ENABLED(CONFIG_IPV6)
1925 case ETH_P_IPV6:
1926 {
1927 struct ipv6hdr *pip6;
1928
1929 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1930 return false;
1931 pip6 = ipv6_hdr(skb);
1932 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1933 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1934 union vxlan_addr ipa = {
1935 .sin6.sin6_addr = pip6->daddr,
1936 .sin6.sin6_family = AF_INET6,
1937 };
1938
1939 vxlan_ip_miss(dev, &ipa);
1940 return false;
1941 }
1942
1943 break;
1944 }
1945 #endif
1946 default:
1947 return false;
1948 }
1949
1950 if (n) {
1951 bool diff;
1952
1953 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1954 if (diff) {
1955 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1956 dev->addr_len);
1957 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1958 }
1959 neigh_release(n);
1960 return diff;
1961 }
1962
1963 return false;
1964 }
1965
1966 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1967 struct vxlan_metadata *md)
1968 {
1969 struct vxlanhdr_gbp *gbp;
1970
1971 if (!md->gbp)
1972 return;
1973
1974 gbp = (struct vxlanhdr_gbp *)vxh;
1975 vxh->vx_flags |= VXLAN_HF_GBP;
1976
1977 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1978 gbp->dont_learn = 1;
1979
1980 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1981 gbp->policy_applied = 1;
1982
1983 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1984 }
1985
1986 static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1987 __be16 protocol)
1988 {
1989 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1990
1991 gpe->np_applied = 1;
1992 gpe->next_protocol = tun_p_from_eth_p(protocol);
1993 if (!gpe->next_protocol)
1994 return -EPFNOSUPPORT;
1995 return 0;
1996 }
1997
1998 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1999 int iphdr_len, __be32 vni,
2000 struct vxlan_metadata *md, u32 vxflags,
2001 bool udp_sum)
2002 {
2003 struct vxlanhdr *vxh;
2004 int min_headroom;
2005 int err;
2006 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
2007 __be16 inner_protocol = htons(ETH_P_TEB);
2008
2009 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
2010 skb->ip_summed == CHECKSUM_PARTIAL) {
2011 int csum_start = skb_checksum_start_offset(skb);
2012
2013 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
2014 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
2015 (skb->csum_offset == offsetof(struct udphdr, check) ||
2016 skb->csum_offset == offsetof(struct tcphdr, check)))
2017 type |= SKB_GSO_TUNNEL_REMCSUM;
2018 }
2019
2020 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
2021 + VXLAN_HLEN + iphdr_len;
2022
2023 /* Need space for new headers (invalidates iph ptr) */
2024 err = skb_cow_head(skb, min_headroom);
2025 if (unlikely(err))
2026 return err;
2027
2028 err = iptunnel_handle_offloads(skb, type);
2029 if (err)
2030 return err;
2031
2032 vxh = __skb_push(skb, sizeof(*vxh));
2033 vxh->vx_flags = VXLAN_HF_VNI;
2034 vxh->vx_vni = vxlan_vni_field(vni);
2035
2036 if (type & SKB_GSO_TUNNEL_REMCSUM) {
2037 unsigned int start;
2038
2039 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
2040 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
2041 vxh->vx_flags |= VXLAN_HF_RCO;
2042
2043 if (!skb_is_gso(skb)) {
2044 skb->ip_summed = CHECKSUM_NONE;
2045 skb->encapsulation = 0;
2046 }
2047 }
2048
2049 if (vxflags & VXLAN_F_GBP)
2050 vxlan_build_gbp_hdr(vxh, vxflags, md);
2051 if (vxflags & VXLAN_F_GPE) {
2052 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
2053 if (err < 0)
2054 return err;
2055 inner_protocol = skb->protocol;
2056 }
2057
2058 skb_set_inner_protocol(skb, inner_protocol);
2059 return 0;
2060 }
2061
2062 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
2063 struct vxlan_sock *sock4,
2064 struct sk_buff *skb, int oif, u8 tos,
2065 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
2066 struct dst_cache *dst_cache,
2067 const struct ip_tunnel_info *info)
2068 {
2069 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
2070 struct rtable *rt = NULL;
2071 struct flowi4 fl4;
2072
2073 if (!sock4)
2074 return ERR_PTR(-EIO);
2075
2076 if (tos && !info)
2077 use_cache = false;
2078 if (use_cache) {
2079 rt = dst_cache_get_ip4(dst_cache, saddr);
2080 if (rt)
2081 return rt;
2082 }
2083
2084 memset(&fl4, 0, sizeof(fl4));
2085 fl4.flowi4_oif = oif;
2086 fl4.flowi4_tos = RT_TOS(tos);
2087 fl4.flowi4_mark = skb->mark;
2088 fl4.flowi4_proto = IPPROTO_UDP;
2089 fl4.daddr = daddr;
2090 fl4.saddr = *saddr;
2091 fl4.fl4_dport = dport;
2092 fl4.fl4_sport = sport;
2093
2094 rt = ip_route_output_key(vxlan->net, &fl4);
2095 if (likely(!IS_ERR(rt))) {
2096 if (rt->dst.dev == dev) {
2097 netdev_dbg(dev, "circular route to %pI4\n", &daddr);
2098 ip_rt_put(rt);
2099 return ERR_PTR(-ELOOP);
2100 }
2101
2102 *saddr = fl4.saddr;
2103 if (use_cache)
2104 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
2105 } else {
2106 netdev_dbg(dev, "no route to %pI4\n", &daddr);
2107 return ERR_PTR(-ENETUNREACH);
2108 }
2109 return rt;
2110 }
2111
2112 #if IS_ENABLED(CONFIG_IPV6)
2113 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
2114 struct net_device *dev,
2115 struct vxlan_sock *sock6,
2116 struct sk_buff *skb, int oif, u8 tos,
2117 __be32 label,
2118 const struct in6_addr *daddr,
2119 struct in6_addr *saddr,
2120 __be16 dport, __be16 sport,
2121 struct dst_cache *dst_cache,
2122 const struct ip_tunnel_info *info)
2123 {
2124 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
2125 struct dst_entry *ndst;
2126 struct flowi6 fl6;
2127 int err;
2128
2129 if (!sock6)
2130 return ERR_PTR(-EIO);
2131
2132 if (tos && !info)
2133 use_cache = false;
2134 if (use_cache) {
2135 ndst = dst_cache_get_ip6(dst_cache, saddr);
2136 if (ndst)
2137 return ndst;
2138 }
2139
2140 memset(&fl6, 0, sizeof(fl6));
2141 fl6.flowi6_oif = oif;
2142 fl6.daddr = *daddr;
2143 fl6.saddr = *saddr;
2144 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
2145 fl6.flowi6_mark = skb->mark;
2146 fl6.flowi6_proto = IPPROTO_UDP;
2147 fl6.fl6_dport = dport;
2148 fl6.fl6_sport = sport;
2149
2150 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
2151 sock6->sock->sk,
2152 &ndst, &fl6);
2153 if (unlikely(err < 0)) {
2154 netdev_dbg(dev, "no route to %pI6\n", daddr);
2155 return ERR_PTR(-ENETUNREACH);
2156 }
2157
2158 if (unlikely(ndst->dev == dev)) {
2159 netdev_dbg(dev, "circular route to %pI6\n", daddr);
2160 dst_release(ndst);
2161 return ERR_PTR(-ELOOP);
2162 }
2163
2164 *saddr = fl6.saddr;
2165 if (use_cache)
2166 dst_cache_set_ip6(dst_cache, ndst, saddr);
2167 return ndst;
2168 }
2169 #endif
2170
2171 /* Bypass encapsulation if the destination is local */
2172 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2173 struct vxlan_dev *dst_vxlan, __be32 vni)
2174 {
2175 struct pcpu_sw_netstats *tx_stats, *rx_stats;
2176 union vxlan_addr loopback;
2177 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
2178 struct net_device *dev;
2179 int len = skb->len;
2180
2181 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
2182 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
2183 skb->pkt_type = PACKET_HOST;
2184 skb->encapsulation = 0;
2185 skb->dev = dst_vxlan->dev;
2186 __skb_pull(skb, skb_network_offset(skb));
2187
2188 if (remote_ip->sa.sa_family == AF_INET) {
2189 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2190 loopback.sa.sa_family = AF_INET;
2191 #if IS_ENABLED(CONFIG_IPV6)
2192 } else {
2193 loopback.sin6.sin6_addr = in6addr_loopback;
2194 loopback.sa.sa_family = AF_INET6;
2195 #endif
2196 }
2197
2198 rcu_read_lock();
2199 dev = skb->dev;
2200 if (unlikely(!(dev->flags & IFF_UP))) {
2201 kfree_skb(skb);
2202 goto drop;
2203 }
2204
2205 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
2206 vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
2207
2208 u64_stats_update_begin(&tx_stats->syncp);
2209 tx_stats->tx_packets++;
2210 tx_stats->tx_bytes += len;
2211 u64_stats_update_end(&tx_stats->syncp);
2212
2213 if (netif_rx(skb) == NET_RX_SUCCESS) {
2214 u64_stats_update_begin(&rx_stats->syncp);
2215 rx_stats->rx_packets++;
2216 rx_stats->rx_bytes += len;
2217 u64_stats_update_end(&rx_stats->syncp);
2218 } else {
2219 drop:
2220 dev->stats.rx_dropped++;
2221 }
2222 rcu_read_unlock();
2223 }
2224
2225 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
2226 struct vxlan_dev *vxlan,
2227 union vxlan_addr *daddr,
2228 __be16 dst_port, int dst_ifindex, __be32 vni,
2229 struct dst_entry *dst,
2230 u32 rt_flags)
2231 {
2232 #if IS_ENABLED(CONFIG_IPV6)
2233 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
2234 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
2235 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
2236 */
2237 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
2238 #endif
2239 /* Bypass encapsulation if the destination is local */
2240 if (rt_flags & RTCF_LOCAL &&
2241 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2242 struct vxlan_dev *dst_vxlan;
2243
2244 dst_release(dst);
2245 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
2246 daddr->sa.sa_family, dst_port,
2247 vxlan->cfg.flags);
2248 if (!dst_vxlan) {
2249 dev->stats.tx_errors++;
2250 kfree_skb(skb);
2251
2252 return -ENOENT;
2253 }
2254 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
2255 return 1;
2256 }
2257
2258 return 0;
2259 }
2260
2261 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2262 __be32 default_vni, struct vxlan_rdst *rdst,
2263 bool did_rsc)
2264 {
2265 struct dst_cache *dst_cache;
2266 struct ip_tunnel_info *info;
2267 struct vxlan_dev *vxlan = netdev_priv(dev);
2268 const struct iphdr *old_iph = ip_hdr(skb);
2269 union vxlan_addr *dst;
2270 union vxlan_addr remote_ip, local_ip;
2271 struct vxlan_metadata _md;
2272 struct vxlan_metadata *md = &_md;
2273 __be16 src_port = 0, dst_port;
2274 struct dst_entry *ndst = NULL;
2275 __be32 vni, label;
2276 __u8 tos, ttl;
2277 int ifindex;
2278 int err;
2279 u32 flags = vxlan->cfg.flags;
2280 bool udp_sum = false;
2281 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
2282
2283 info = skb_tunnel_info(skb);
2284
2285 if (rdst) {
2286 dst = &rdst->remote_ip;
2287 if (vxlan_addr_any(dst)) {
2288 if (did_rsc) {
2289 /* short-circuited back to local bridge */
2290 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
2291 return;
2292 }
2293 goto drop;
2294 }
2295
2296 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
2297 vni = (rdst->remote_vni) ? : default_vni;
2298 ifindex = rdst->remote_ifindex;
2299 local_ip = vxlan->cfg.saddr;
2300 dst_cache = &rdst->dst_cache;
2301 md->gbp = skb->mark;
2302 ttl = vxlan->cfg.ttl;
2303 if (!ttl && vxlan_addr_multicast(dst))
2304 ttl = 1;
2305
2306 tos = vxlan->cfg.tos;
2307 if (tos == 1)
2308 tos = ip_tunnel_get_dsfield(old_iph, skb);
2309
2310 if (dst->sa.sa_family == AF_INET)
2311 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2312 else
2313 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2314 label = vxlan->cfg.label;
2315 } else {
2316 if (!info) {
2317 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2318 dev->name);
2319 goto drop;
2320 }
2321 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
2322 if (remote_ip.sa.sa_family == AF_INET) {
2323 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
2324 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
2325 } else {
2326 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
2327 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2328 }
2329 dst = &remote_ip;
2330 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
2331 vni = tunnel_id_to_key32(info->key.tun_id);
2332 ifindex = 0;
2333 dst_cache = &info->dst_cache;
2334 if (info->options_len)
2335 md = ip_tunnel_info_opts(info);
2336 ttl = info->key.ttl;
2337 tos = info->key.tos;
2338 label = info->key.label;
2339 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2340 }
2341 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2342 vxlan->cfg.port_max, true);
2343
2344 rcu_read_lock();
2345 if (dst->sa.sa_family == AF_INET) {
2346 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2347 struct rtable *rt;
2348 __be16 df = 0;
2349
2350 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
2351 dst->sin.sin_addr.s_addr,
2352 &local_ip.sin.sin_addr.s_addr,
2353 dst_port, src_port,
2354 dst_cache, info);
2355 if (IS_ERR(rt)) {
2356 err = PTR_ERR(rt);
2357 goto tx_error;
2358 }
2359
2360 if (fan_has_map(&vxlan->fan) && rt->rt_flags & RTCF_LOCAL) {
2361 netdev_dbg(dev, "discard fan to localhost %pI4\n",
2362 &dst->sin.sin_addr.s_addr);
2363 ip_rt_put(rt);
2364 goto tx_free;
2365 }
2366
2367 /* Bypass encapsulation if the destination is local */
2368 if (!info) {
2369 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2370 dst_port, ifindex, vni,
2371 &rt->dst, rt->rt_flags);
2372 if (err)
2373 goto out_unlock;
2374 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2375 df = htons(IP_DF);
2376 }
2377
2378 ndst = &rt->dst;
2379 if (skb_dst(skb)) {
2380 int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
2381
2382 skb_dst_update_pmtu(skb, mtu);
2383 }
2384
2385 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2386 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2387 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
2388 vni, md, flags, udp_sum);
2389 if (err < 0)
2390 goto tx_error;
2391
2392 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
2393 dst->sin.sin_addr.s_addr, tos, ttl, df,
2394 src_port, dst_port, xnet, !udp_sum);
2395 #if IS_ENABLED(CONFIG_IPV6)
2396 } else {
2397 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2398
2399 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
2400 label, &dst->sin6.sin6_addr,
2401 &local_ip.sin6.sin6_addr,
2402 dst_port, src_port,
2403 dst_cache, info);
2404 if (IS_ERR(ndst)) {
2405 err = PTR_ERR(ndst);
2406 ndst = NULL;
2407 goto tx_error;
2408 }
2409
2410 if (!info) {
2411 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2412
2413 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2414 dst_port, ifindex, vni,
2415 ndst, rt6i_flags);
2416 if (err)
2417 goto out_unlock;
2418 }
2419
2420 if (skb_dst(skb)) {
2421 int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
2422
2423 skb_dst_update_pmtu(skb, mtu);
2424 }
2425
2426 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2427 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2428 skb_scrub_packet(skb, xnet);
2429 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2430 vni, md, flags, udp_sum);
2431 if (err < 0)
2432 goto tx_error;
2433
2434 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
2435 &local_ip.sin6.sin6_addr,
2436 &dst->sin6.sin6_addr, tos, ttl,
2437 label, src_port, dst_port, !udp_sum);
2438 #endif
2439 }
2440 out_unlock:
2441 rcu_read_unlock();
2442 return;
2443
2444 drop:
2445 dev->stats.tx_dropped++;
2446 dev_kfree_skb(skb);
2447 return;
2448
2449 tx_error:
2450 rcu_read_unlock();
2451 if (err == -ELOOP)
2452 dev->stats.collisions++;
2453 else if (err == -ENETUNREACH)
2454 dev->stats.tx_carrier_errors++;
2455 dst_release(ndst);
2456 dev->stats.tx_errors++;
2457 tx_free:
2458 kfree_skb(skb);
2459 }
2460
2461 /* Transmit local packets over Vxlan
2462 *
2463 * Outer IP header inherits ECN and DF from inner header.
2464 * Outer UDP destination is the VXLAN assigned port.
2465 * source port is based on hash of flow
2466 */
2467 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2468 {
2469 struct vxlan_dev *vxlan = netdev_priv(dev);
2470 struct vxlan_rdst *rdst, *fdst = NULL;
2471 const struct ip_tunnel_info *info;
2472 bool did_rsc = false;
2473 struct vxlan_fdb *f;
2474 struct ethhdr *eth;
2475 __be32 vni = 0;
2476
2477 info = skb_tunnel_info(skb);
2478
2479 skb_reset_mac_header(skb);
2480
2481 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
2482 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
2483 info->mode & IP_TUNNEL_INFO_TX) {
2484 vni = tunnel_id_to_key32(info->key.tun_id);
2485 } else {
2486 if (info && info->mode & IP_TUNNEL_INFO_TX)
2487 vxlan_xmit_one(skb, dev, vni, NULL, false);
2488 else
2489 kfree_skb(skb);
2490 return NETDEV_TX_OK;
2491 }
2492 }
2493
2494 if (vxlan->cfg.flags & VXLAN_F_PROXY) {
2495 eth = eth_hdr(skb);
2496 if (ntohs(eth->h_proto) == ETH_P_ARP)
2497 return arp_reduce(dev, skb, vni);
2498 #if IS_ENABLED(CONFIG_IPV6)
2499 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2500 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
2501 sizeof(struct nd_msg)) &&
2502 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2503 struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
2504
2505 if (m->icmph.icmp6_code == 0 &&
2506 m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2507 return neigh_reduce(dev, skb, vni);
2508 }
2509 #endif
2510 }
2511
2512 if (fan_has_map(&vxlan->fan)) {
2513 struct vxlan_rdst fan_rdst;
2514
2515 netdev_dbg(vxlan->dev, "vxlan_xmit p %x d %pM\n",
2516 eth->h_proto, eth->h_dest);
2517 if (vxlan_fan_build_rdst(vxlan, skb, &fan_rdst)) {
2518 dev->stats.tx_dropped++;
2519 kfree_skb(skb);
2520 return NETDEV_TX_OK;
2521 }
2522 vxlan_xmit_one(skb, dev, vni, &fan_rdst, 0);
2523 return NETDEV_TX_OK;
2524 }
2525
2526 eth = eth_hdr(skb);
2527 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2528 did_rsc = false;
2529
2530 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
2531 (ntohs(eth->h_proto) == ETH_P_IP ||
2532 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2533 did_rsc = route_shortcircuit(dev, skb);
2534 if (did_rsc)
2535 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2536 }
2537
2538 if (f == NULL) {
2539 f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
2540 if (f == NULL) {
2541 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
2542 !is_multicast_ether_addr(eth->h_dest))
2543 vxlan_fdb_miss(vxlan, eth->h_dest);
2544
2545 dev->stats.tx_dropped++;
2546 kfree_skb(skb);
2547 return NETDEV_TX_OK;
2548 }
2549 }
2550
2551 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2552 struct sk_buff *skb1;
2553
2554 if (!fdst) {
2555 fdst = rdst;
2556 continue;
2557 }
2558 skb1 = skb_clone(skb, GFP_ATOMIC);
2559 if (skb1)
2560 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
2561 }
2562
2563 if (fdst)
2564 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
2565 else
2566 kfree_skb(skb);
2567 return NETDEV_TX_OK;
2568 }
2569
2570 /* Walk the forwarding table and purge stale entries */
2571 static void vxlan_cleanup(struct timer_list *t)
2572 {
2573 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
2574 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2575 unsigned int h;
2576
2577 if (!netif_running(vxlan->dev))
2578 return;
2579
2580 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2581 struct hlist_node *p, *n;
2582
2583 spin_lock_bh(&vxlan->hash_lock);
2584 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2585 struct vxlan_fdb *f
2586 = container_of(p, struct vxlan_fdb, hlist);
2587 unsigned long timeout;
2588
2589 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2590 continue;
2591
2592 if (f->flags & NTF_EXT_LEARNED)
2593 continue;
2594
2595 timeout = f->used + vxlan->cfg.age_interval * HZ;
2596 if (time_before_eq(timeout, jiffies)) {
2597 netdev_dbg(vxlan->dev,
2598 "garbage collect %pM\n",
2599 f->eth_addr);
2600 f->state = NUD_STALE;
2601 vxlan_fdb_destroy(vxlan, f, true);
2602 } else if (time_before(timeout, next_timer))
2603 next_timer = timeout;
2604 }
2605 spin_unlock_bh(&vxlan->hash_lock);
2606 }
2607
2608 mod_timer(&vxlan->age_timer, next_timer);
2609 }
2610
2611 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2612 {
2613 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2614
2615 spin_lock(&vn->sock_lock);
2616 hlist_del_init_rcu(&vxlan->hlist4.hlist);
2617 #if IS_ENABLED(CONFIG_IPV6)
2618 hlist_del_init_rcu(&vxlan->hlist6.hlist);
2619 #endif
2620 spin_unlock(&vn->sock_lock);
2621 }
2622
2623 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2624 struct vxlan_dev_node *node)
2625 {
2626 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2627 __be32 vni = vxlan->default_dst.remote_vni;
2628
2629 node->vxlan = vxlan;
2630 spin_lock(&vn->sock_lock);
2631 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
2632 spin_unlock(&vn->sock_lock);
2633 }
2634
2635 /* Setup stats when device is created */
2636 static int vxlan_init(struct net_device *dev)
2637 {
2638 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2639 if (!dev->tstats)
2640 return -ENOMEM;
2641
2642 return 0;
2643 }
2644
2645 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2646 {
2647 struct vxlan_fdb *f;
2648
2649 spin_lock_bh(&vxlan->hash_lock);
2650 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2651 if (f)
2652 vxlan_fdb_destroy(vxlan, f, true);
2653 spin_unlock_bh(&vxlan->hash_lock);
2654 }
2655
2656 static void vxlan_uninit(struct net_device *dev)
2657 {
2658 struct vxlan_dev *vxlan = netdev_priv(dev);
2659
2660 gro_cells_destroy(&vxlan->gro_cells);
2661
2662 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
2663
2664 free_percpu(dev->tstats);
2665 }
2666
2667 /* Start ageing timer and join group when device is brought up */
2668 static int vxlan_open(struct net_device *dev)
2669 {
2670 struct vxlan_dev *vxlan = netdev_priv(dev);
2671 int ret;
2672
2673 ret = vxlan_sock_add(vxlan);
2674 if (ret < 0)
2675 return ret;
2676
2677 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2678 ret = vxlan_igmp_join(vxlan);
2679 if (ret == -EADDRINUSE)
2680 ret = 0;
2681 if (ret) {
2682 vxlan_sock_release(vxlan);
2683 return ret;
2684 }
2685 }
2686
2687 if (vxlan->cfg.age_interval)
2688 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2689
2690 return ret;
2691 }
2692
2693 /* Purge the forwarding table */
2694 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2695 {
2696 unsigned int h;
2697
2698 spin_lock_bh(&vxlan->hash_lock);
2699 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2700 struct hlist_node *p, *n;
2701 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2702 struct vxlan_fdb *f
2703 = container_of(p, struct vxlan_fdb, hlist);
2704 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2705 continue;
2706 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2707 if (!is_zero_ether_addr(f->eth_addr))
2708 vxlan_fdb_destroy(vxlan, f, true);
2709 }
2710 }
2711 spin_unlock_bh(&vxlan->hash_lock);
2712 }
2713
2714 /* Cleanup timer and forwarding table on shutdown */
2715 static int vxlan_stop(struct net_device *dev)
2716 {
2717 struct vxlan_dev *vxlan = netdev_priv(dev);
2718 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2719 int ret = 0;
2720
2721 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2722 !vxlan_group_used(vn, vxlan))
2723 ret = vxlan_igmp_leave(vxlan);
2724
2725 del_timer_sync(&vxlan->age_timer);
2726
2727 vxlan_flush(vxlan, false);
2728 vxlan_sock_release(vxlan);
2729
2730 return ret;
2731 }
2732
2733 /* Stub, nothing needs to be done. */
2734 static void vxlan_set_multicast_list(struct net_device *dev)
2735 {
2736 }
2737
2738 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2739 {
2740 struct vxlan_dev *vxlan = netdev_priv(dev);
2741 struct vxlan_rdst *dst = &vxlan->default_dst;
2742 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2743 dst->remote_ifindex);
2744 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
2745
2746 /* This check is different than dev->max_mtu, because it looks at
2747 * the lowerdev->mtu, rather than the static dev->max_mtu
2748 */
2749 if (lowerdev) {
2750 int max_mtu = lowerdev->mtu -
2751 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2752 if (new_mtu > max_mtu)
2753 return -EINVAL;
2754 }
2755
2756 dev->mtu = new_mtu;
2757 return 0;
2758 }
2759
2760 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2761 {
2762 struct vxlan_dev *vxlan = netdev_priv(dev);
2763 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2764 __be16 sport, dport;
2765
2766 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2767 vxlan->cfg.port_max, true);
2768 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2769
2770 if (ip_tunnel_info_af(info) == AF_INET) {
2771 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2772 struct rtable *rt;
2773
2774 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2775 info->key.u.ipv4.dst,
2776 &info->key.u.ipv4.src, dport, sport,
2777 &info->dst_cache, info);
2778 if (IS_ERR(rt))
2779 return PTR_ERR(rt);
2780 ip_rt_put(rt);
2781 } else {
2782 #if IS_ENABLED(CONFIG_IPV6)
2783 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2784 struct dst_entry *ndst;
2785
2786 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2787 info->key.label, &info->key.u.ipv6.dst,
2788 &info->key.u.ipv6.src, dport, sport,
2789 &info->dst_cache, info);
2790 if (IS_ERR(ndst))
2791 return PTR_ERR(ndst);
2792 dst_release(ndst);
2793 #else /* !CONFIG_IPV6 */
2794 return -EPFNOSUPPORT;
2795 #endif
2796 }
2797 info->key.tp_src = sport;
2798 info->key.tp_dst = dport;
2799 return 0;
2800 }
2801
2802 static const struct net_device_ops vxlan_netdev_ether_ops = {
2803 .ndo_init = vxlan_init,
2804 .ndo_uninit = vxlan_uninit,
2805 .ndo_open = vxlan_open,
2806 .ndo_stop = vxlan_stop,
2807 .ndo_start_xmit = vxlan_xmit,
2808 .ndo_get_stats64 = ip_tunnel_get_stats64,
2809 .ndo_set_rx_mode = vxlan_set_multicast_list,
2810 .ndo_change_mtu = vxlan_change_mtu,
2811 .ndo_validate_addr = eth_validate_addr,
2812 .ndo_set_mac_address = eth_mac_addr,
2813 .ndo_fdb_add = vxlan_fdb_add,
2814 .ndo_fdb_del = vxlan_fdb_delete,
2815 .ndo_fdb_dump = vxlan_fdb_dump,
2816 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2817 };
2818
2819 static const struct net_device_ops vxlan_netdev_raw_ops = {
2820 .ndo_init = vxlan_init,
2821 .ndo_uninit = vxlan_uninit,
2822 .ndo_open = vxlan_open,
2823 .ndo_stop = vxlan_stop,
2824 .ndo_start_xmit = vxlan_xmit,
2825 .ndo_get_stats64 = ip_tunnel_get_stats64,
2826 .ndo_change_mtu = vxlan_change_mtu,
2827 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2828 };
2829
2830 /* Info for udev, that this is a virtual tunnel endpoint */
2831 static struct device_type vxlan_type = {
2832 .name = "vxlan",
2833 };
2834
2835 /* Calls the ndo_udp_tunnel_add of the caller in order to
2836 * supply the listening VXLAN udp ports. Callers are expected
2837 * to implement the ndo_udp_tunnel_add.
2838 */
2839 static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
2840 {
2841 struct vxlan_sock *vs;
2842 struct net *net = dev_net(dev);
2843 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2844 unsigned int i;
2845
2846 spin_lock(&vn->sock_lock);
2847 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2848 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2849 unsigned short type;
2850
2851 if (vs->flags & VXLAN_F_GPE)
2852 type = UDP_TUNNEL_TYPE_VXLAN_GPE;
2853 else
2854 type = UDP_TUNNEL_TYPE_VXLAN;
2855
2856 if (push)
2857 udp_tunnel_push_rx_port(dev, vs->sock, type);
2858 else
2859 udp_tunnel_drop_rx_port(dev, vs->sock, type);
2860 }
2861 }
2862 spin_unlock(&vn->sock_lock);
2863 }
2864
2865 /* Initialize the device structure. */
2866 static void vxlan_setup(struct net_device *dev)
2867 {
2868 struct vxlan_dev *vxlan = netdev_priv(dev);
2869 unsigned int h;
2870
2871 eth_hw_addr_random(dev);
2872 ether_setup(dev);
2873
2874 dev->needs_free_netdev = true;
2875 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2876
2877 dev->features |= NETIF_F_LLTX;
2878 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2879 dev->features |= NETIF_F_RXCSUM;
2880 dev->features |= NETIF_F_GSO_SOFTWARE;
2881
2882 dev->vlan_features = dev->features;
2883 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2884 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2885 netif_keep_dst(dev);
2886 dev->priv_flags |= IFF_NO_QUEUE;
2887
2888 /* MTU range: 68 - 65535 */
2889 dev->min_mtu = ETH_MIN_MTU;
2890 dev->max_mtu = ETH_MAX_MTU;
2891
2892 INIT_LIST_HEAD(&vxlan->next);
2893 spin_lock_init(&vxlan->hash_lock);
2894
2895 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
2896
2897 vxlan->dev = dev;
2898
2899 gro_cells_init(&vxlan->gro_cells, dev);
2900
2901 for (h = 0; h < FDB_HASH_SIZE; ++h)
2902 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2903
2904 INIT_LIST_HEAD(&vxlan->fan.fan_maps);
2905 }
2906
2907 static void vxlan_ether_setup(struct net_device *dev)
2908 {
2909 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2910 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2911 dev->netdev_ops = &vxlan_netdev_ether_ops;
2912 }
2913
2914 static void vxlan_raw_setup(struct net_device *dev)
2915 {
2916 dev->header_ops = NULL;
2917 dev->type = ARPHRD_NONE;
2918 dev->hard_header_len = 0;
2919 dev->addr_len = 0;
2920 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2921 dev->netdev_ops = &vxlan_netdev_raw_ops;
2922 }
2923
2924 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2925 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2926 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2927 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2928 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2929 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2930 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2931 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2932 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2933 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2934 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2935 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2936 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2937 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2938 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2939 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2940 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2941 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2942 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2943 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2944 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2945 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2946 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2947 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2948 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2949 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2950 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2951 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2952 };
2953
2954 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
2955 struct netlink_ext_ack *extack)
2956 {
2957 if (tb[IFLA_ADDRESS]) {
2958 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2959 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2960 "Provided link layer address is not Ethernet");
2961 return -EINVAL;
2962 }
2963
2964 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2965 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2966 "Provided Ethernet address is not unicast");
2967 return -EADDRNOTAVAIL;
2968 }
2969 }
2970
2971 if (tb[IFLA_MTU]) {
2972 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
2973
2974 if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) {
2975 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
2976 "MTU must be between 68 and 65535");
2977 return -EINVAL;
2978 }
2979 }
2980
2981 if (!data) {
2982 NL_SET_ERR_MSG(extack,
2983 "Required attributes not provided to perform the operation");
2984 return -EINVAL;
2985 }
2986
2987 if (data[IFLA_VXLAN_ID]) {
2988 u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2989
2990 if (id >= VXLAN_N_VID) {
2991 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
2992 "VXLAN ID must be lower than 16777216");
2993 return -ERANGE;
2994 }
2995 }
2996
2997 if (data[IFLA_VXLAN_PORT_RANGE]) {
2998 const struct ifla_vxlan_port_range *p
2999 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3000
3001 if (ntohs(p->high) < ntohs(p->low)) {
3002 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
3003 "Invalid source port range");
3004 return -EINVAL;
3005 }
3006 }
3007
3008 return 0;
3009 }
3010
3011 static void vxlan_get_drvinfo(struct net_device *netdev,
3012 struct ethtool_drvinfo *drvinfo)
3013 {
3014 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
3015 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
3016 }
3017
3018 static const struct ethtool_ops vxlan_ethtool_ops = {
3019 .get_drvinfo = vxlan_get_drvinfo,
3020 .get_link = ethtool_op_get_link,
3021 };
3022
3023 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
3024 __be16 port, u32 flags)
3025 {
3026 struct socket *sock;
3027 struct udp_port_cfg udp_conf;
3028 int err;
3029
3030 memset(&udp_conf, 0, sizeof(udp_conf));
3031
3032 if (ipv6) {
3033 udp_conf.family = AF_INET6;
3034 udp_conf.use_udp6_rx_checksums =
3035 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
3036 udp_conf.ipv6_v6only = 1;
3037 } else {
3038 udp_conf.family = AF_INET;
3039 }
3040
3041 udp_conf.local_udp_port = port;
3042
3043 /* Open UDP socket */
3044 err = udp_sock_create(net, &udp_conf, &sock);
3045 if (err < 0)
3046 return ERR_PTR(err);
3047
3048 return sock;
3049 }
3050
3051 /* Create new listen socket if needed */
3052 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
3053 __be16 port, u32 flags)
3054 {
3055 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3056 struct vxlan_sock *vs;
3057 struct socket *sock;
3058 unsigned int h;
3059 struct udp_tunnel_sock_cfg tunnel_cfg;
3060
3061 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
3062 if (!vs)
3063 return ERR_PTR(-ENOMEM);
3064
3065 for (h = 0; h < VNI_HASH_SIZE; ++h)
3066 INIT_HLIST_HEAD(&vs->vni_list[h]);
3067
3068 sock = vxlan_create_sock(net, ipv6, port, flags);
3069 if (IS_ERR(sock)) {
3070 kfree(vs);
3071 return ERR_CAST(sock);
3072 }
3073
3074 vs->sock = sock;
3075 refcount_set(&vs->refcnt, 1);
3076 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
3077
3078 spin_lock(&vn->sock_lock);
3079 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
3080 udp_tunnel_notify_add_rx_port(sock,
3081 (vs->flags & VXLAN_F_GPE) ?
3082 UDP_TUNNEL_TYPE_VXLAN_GPE :
3083 UDP_TUNNEL_TYPE_VXLAN);
3084 spin_unlock(&vn->sock_lock);
3085
3086 /* Mark socket as an encapsulation socket. */
3087 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
3088 tunnel_cfg.sk_user_data = vs;
3089 tunnel_cfg.encap_type = 1;
3090 tunnel_cfg.encap_rcv = vxlan_rcv;
3091 tunnel_cfg.encap_destroy = NULL;
3092 tunnel_cfg.gro_receive = vxlan_gro_receive;
3093 tunnel_cfg.gro_complete = vxlan_gro_complete;
3094
3095 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
3096
3097 return vs;
3098 }
3099
3100 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
3101 {
3102 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3103 struct vxlan_sock *vs = NULL;
3104 struct vxlan_dev_node *node;
3105
3106 if (!vxlan->cfg.no_share) {
3107 spin_lock(&vn->sock_lock);
3108 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
3109 vxlan->cfg.dst_port, vxlan->cfg.flags);
3110 if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
3111 spin_unlock(&vn->sock_lock);
3112 return -EBUSY;
3113 }
3114 spin_unlock(&vn->sock_lock);
3115 }
3116 if (!vs)
3117 vs = vxlan_socket_create(vxlan->net, ipv6,
3118 vxlan->cfg.dst_port, vxlan->cfg.flags);
3119 if (IS_ERR(vs))
3120 return PTR_ERR(vs);
3121 #if IS_ENABLED(CONFIG_IPV6)
3122 if (ipv6) {
3123 rcu_assign_pointer(vxlan->vn6_sock, vs);
3124 node = &vxlan->hlist6;
3125 } else
3126 #endif
3127 {
3128 rcu_assign_pointer(vxlan->vn4_sock, vs);
3129 node = &vxlan->hlist4;
3130 }
3131 vxlan_vs_add_dev(vs, vxlan, node);
3132 return 0;
3133 }
3134
3135 static int vxlan_sock_add(struct vxlan_dev *vxlan)
3136 {
3137 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
3138 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
3139 bool ipv4 = !ipv6 || metadata;
3140 int ret = 0;
3141
3142 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
3143 #if IS_ENABLED(CONFIG_IPV6)
3144 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
3145 if (ipv6) {
3146 ret = __vxlan_sock_add(vxlan, true);
3147 if (ret < 0 && ret != -EAFNOSUPPORT)
3148 ipv4 = false;
3149 }
3150 #endif
3151 if (ipv4)
3152 ret = __vxlan_sock_add(vxlan, false);
3153 if (ret < 0)
3154 vxlan_sock_release(vxlan);
3155 return ret;
3156 }
3157
3158 static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
3159 struct net_device **lower,
3160 struct vxlan_dev *old,
3161 struct netlink_ext_ack *extack)
3162 {
3163 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
3164 struct vxlan_dev *tmp;
3165 bool use_ipv6 = false;
3166
3167 if (conf->flags & VXLAN_F_GPE) {
3168 /* For now, allow GPE only together with
3169 * COLLECT_METADATA. This can be relaxed later; in such
3170 * case, the other side of the PtP link will have to be
3171 * provided.
3172 */
3173 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
3174 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3175 NL_SET_ERR_MSG(extack,
3176 "VXLAN GPE does not support this combination of attributes");
3177 return -EINVAL;
3178 }
3179 }
3180
3181 if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
3182 /* Unless IPv6 is explicitly requested, assume IPv4 */
3183 conf->remote_ip.sa.sa_family = AF_INET;
3184 conf->saddr.sa.sa_family = AF_INET;
3185 } else if (!conf->remote_ip.sa.sa_family) {
3186 conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
3187 } else if (!conf->saddr.sa.sa_family) {
3188 conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
3189 }
3190
3191 if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) {
3192 NL_SET_ERR_MSG(extack,
3193 "Local and remote address must be from the same family");
3194 return -EINVAL;
3195 }
3196
3197 if (vxlan_addr_multicast(&conf->saddr)) {
3198 NL_SET_ERR_MSG(extack, "Local address cannot be multicast");
3199 return -EINVAL;
3200 }
3201
3202 if (conf->saddr.sa.sa_family == AF_INET6) {
3203 if (!IS_ENABLED(CONFIG_IPV6)) {
3204 NL_SET_ERR_MSG(extack,
3205 "IPv6 support not enabled in the kernel");
3206 return -EPFNOSUPPORT;
3207 }
3208 use_ipv6 = true;
3209 conf->flags |= VXLAN_F_IPV6;
3210
3211 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3212 int local_type =
3213 ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
3214 int remote_type =
3215 ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
3216
3217 if (local_type & IPV6_ADDR_LINKLOCAL) {
3218 if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
3219 (remote_type != IPV6_ADDR_ANY)) {
3220 NL_SET_ERR_MSG(extack,
3221 "Invalid combination of local and remote address scopes");
3222 return -EINVAL;
3223 }
3224
3225 conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
3226 } else {
3227 if (remote_type ==
3228 (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) {
3229 NL_SET_ERR_MSG(extack,
3230 "Invalid combination of local and remote address scopes");
3231 return -EINVAL;
3232 }
3233
3234 conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
3235 }
3236 }
3237 }
3238
3239 if (conf->label && !use_ipv6) {
3240 NL_SET_ERR_MSG(extack,
3241 "Label attribute only applies to IPv6 VXLAN devices");
3242 return -EINVAL;
3243 }
3244
3245 if (conf->remote_ifindex) {
3246 struct net_device *lowerdev;
3247
3248 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
3249 if (!lowerdev) {
3250 NL_SET_ERR_MSG(extack,
3251 "Invalid local interface, device not found");
3252 return -ENODEV;
3253 }
3254
3255 #if IS_ENABLED(CONFIG_IPV6)
3256 if (use_ipv6) {
3257 struct inet6_dev *idev = __in6_dev_get(lowerdev);
3258 if (idev && idev->cnf.disable_ipv6) {
3259 NL_SET_ERR_MSG(extack,
3260 "IPv6 support disabled by administrator");
3261 return -EPERM;
3262 }
3263 }
3264 #endif
3265
3266 *lower = lowerdev;
3267 } else {
3268 if (vxlan_addr_multicast(&conf->remote_ip)) {
3269 NL_SET_ERR_MSG(extack,
3270 "Local interface required for multicast remote destination");
3271
3272 return -EINVAL;
3273 }
3274
3275 #if IS_ENABLED(CONFIG_IPV6)
3276 if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) {
3277 NL_SET_ERR_MSG(extack,
3278 "Local interface required for link-local local/remote addresses");
3279 return -EINVAL;
3280 }
3281 #endif
3282
3283 *lower = NULL;
3284 }
3285
3286 if (!conf->dst_port) {
3287 if (conf->flags & VXLAN_F_GPE)
3288 conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
3289 else
3290 conf->dst_port = htons(vxlan_port);
3291 }
3292
3293 if (!conf->age_interval)
3294 conf->age_interval = FDB_AGE_DEFAULT;
3295
3296 list_for_each_entry(tmp, &vn->vxlan_list, next) {
3297 if (tmp == old)
3298 continue;
3299
3300 if (tmp->cfg.vni != conf->vni)
3301 continue;
3302 if (tmp->cfg.dst_port != conf->dst_port)
3303 continue;
3304 if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
3305 (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
3306 continue;
3307
3308 if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
3309 tmp->cfg.remote_ifindex != conf->remote_ifindex)
3310 continue;
3311
3312 NL_SET_ERR_MSG(extack,
3313 "A VXLAN device with the specified VNI already exists");
3314 return -EEXIST;
3315 }
3316
3317 return 0;
3318 }
3319
3320 static void vxlan_config_apply(struct net_device *dev,
3321 struct vxlan_config *conf,
3322 struct net_device *lowerdev,
3323 struct net *src_net,
3324 bool changelink)
3325 {
3326 struct vxlan_dev *vxlan = netdev_priv(dev);
3327 struct vxlan_rdst *dst = &vxlan->default_dst;
3328 unsigned short needed_headroom = ETH_HLEN;
3329 bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
3330 int max_mtu = ETH_MAX_MTU;
3331
3332 if (!changelink) {
3333 if (conf->flags & VXLAN_F_GPE)
3334 vxlan_raw_setup(dev);
3335 else
3336 vxlan_ether_setup(dev);
3337
3338 if (conf->mtu)
3339 dev->mtu = conf->mtu;
3340
3341 vxlan->net = src_net;
3342 }
3343
3344 dst->remote_vni = conf->vni;
3345
3346 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
3347
3348 if (lowerdev) {
3349 dst->remote_ifindex = conf->remote_ifindex;
3350
3351 dev->gso_max_size = lowerdev->gso_max_size;
3352 dev->gso_max_segs = lowerdev->gso_max_segs;
3353
3354 needed_headroom = lowerdev->hard_header_len;
3355
3356 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
3357 VXLAN_HEADROOM);
3358 if (max_mtu < ETH_MIN_MTU)
3359 max_mtu = ETH_MIN_MTU;
3360
3361 if (!changelink && !conf->mtu)
3362 dev->mtu = max_mtu;
3363 }
3364
3365 if (dev->mtu > max_mtu)
3366 dev->mtu = max_mtu;
3367
3368 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
3369 needed_headroom += VXLAN6_HEADROOM;
3370 else
3371 needed_headroom += VXLAN_HEADROOM;
3372 dev->needed_headroom = needed_headroom;
3373
3374 memcpy(&vxlan->cfg, conf, sizeof(*conf));
3375 }
3376
3377 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
3378 struct vxlan_config *conf, bool changelink,
3379 struct netlink_ext_ack *extack)
3380 {
3381 struct vxlan_dev *vxlan = netdev_priv(dev);
3382 struct net_device *lowerdev;
3383 int ret;
3384
3385 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack);
3386 if (ret)
3387 return ret;
3388
3389 vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
3390
3391 return 0;
3392 }
3393
3394 static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3395 struct vxlan_config *conf,
3396 struct netlink_ext_ack *extack)
3397 {
3398 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3399 struct vxlan_dev *vxlan = netdev_priv(dev);
3400 struct vxlan_fdb *f = NULL;
3401 int err;
3402
3403 err = vxlan_dev_configure(net, dev, conf, false, extack);
3404 if (err)
3405 return err;
3406
3407 dev->ethtool_ops = &vxlan_ethtool_ops;
3408
3409 /* create an fdb entry for a valid default destination */
3410 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3411 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3412 &vxlan->default_dst.remote_ip,
3413 NUD_REACHABLE | NUD_PERMANENT,
3414 vxlan->cfg.dst_port,
3415 vxlan->default_dst.remote_vni,
3416 vxlan->default_dst.remote_vni,
3417 vxlan->default_dst.remote_ifindex,
3418 NTF_SELF, &f);
3419 if (err)
3420 return err;
3421 }
3422
3423 err = register_netdevice(dev);
3424 if (err)
3425 goto errout;
3426
3427 err = rtnl_configure_link(dev, NULL);
3428 if (err) {
3429 unregister_netdevice(dev);
3430 goto errout;
3431 }
3432
3433 /* notify default fdb entry */
3434 if (f)
3435 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
3436
3437 list_add(&vxlan->next, &vn->vxlan_list);
3438 return 0;
3439 errout:
3440 if (f)
3441 vxlan_fdb_destroy(vxlan, f, false);
3442 return err;
3443 }
3444
3445 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3446 struct net_device *dev, struct vxlan_config *conf,
3447 bool changelink)
3448 {
3449 struct vxlan_dev *vxlan = netdev_priv(dev);
3450 int err;
3451
3452 memset(conf, 0, sizeof(*conf));
3453
3454 /* if changelink operation, start with old existing cfg */
3455 if (changelink)
3456 memcpy(conf, &vxlan->cfg, sizeof(*conf));
3457
3458 if (data[IFLA_VXLAN_ID]) {
3459 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3460
3461 if (changelink && (vni != conf->vni))
3462 return -EOPNOTSUPP;
3463 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3464 }
3465
3466 if (data[IFLA_VXLAN_GROUP]) {
3467 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET))
3468 return -EOPNOTSUPP;
3469
3470 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
3471 conf->remote_ip.sa.sa_family = AF_INET;
3472 } else if (data[IFLA_VXLAN_GROUP6]) {
3473 if (!IS_ENABLED(CONFIG_IPV6))
3474 return -EPFNOSUPPORT;
3475
3476 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6))
3477 return -EOPNOTSUPP;
3478
3479 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
3480 conf->remote_ip.sa.sa_family = AF_INET6;
3481 }
3482
3483 if (data[IFLA_VXLAN_FAN_MAP]) {
3484 err = vxlan_parse_fan_map(data, vxlan);
3485 if (err)
3486 return err;
3487 }
3488
3489 if (data[IFLA_VXLAN_LOCAL]) {
3490 if (changelink && (conf->saddr.sa.sa_family != AF_INET))
3491 return -EOPNOTSUPP;
3492
3493 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
3494 conf->saddr.sa.sa_family = AF_INET;
3495 } else if (data[IFLA_VXLAN_LOCAL6]) {
3496 if (!IS_ENABLED(CONFIG_IPV6))
3497 return -EPFNOSUPPORT;
3498
3499 if (changelink && (conf->saddr.sa.sa_family != AF_INET6))
3500 return -EOPNOTSUPP;
3501
3502 /* TODO: respect scope id */
3503 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
3504 conf->saddr.sa.sa_family = AF_INET6;
3505 }
3506
3507 if (data[IFLA_VXLAN_LINK])
3508 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
3509
3510 if (data[IFLA_VXLAN_TOS])
3511 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
3512
3513 if (data[IFLA_VXLAN_TTL])
3514 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
3515
3516 if (data[IFLA_VXLAN_LABEL])
3517 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
3518 IPV6_FLOWLABEL_MASK;
3519
3520 if (data[IFLA_VXLAN_LEARNING]) {
3521 if (nla_get_u8(data[IFLA_VXLAN_LEARNING]))
3522 conf->flags |= VXLAN_F_LEARN;
3523 else
3524 conf->flags &= ~VXLAN_F_LEARN;
3525 } else if (!changelink) {
3526 /* default to learn on a new device */
3527 conf->flags |= VXLAN_F_LEARN;
3528 }
3529
3530 if (data[IFLA_VXLAN_AGEING]) {
3531 if (changelink)
3532 return -EOPNOTSUPP;
3533 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
3534 }
3535
3536 if (data[IFLA_VXLAN_PROXY]) {
3537 if (changelink)
3538 return -EOPNOTSUPP;
3539 if (nla_get_u8(data[IFLA_VXLAN_PROXY]))
3540 conf->flags |= VXLAN_F_PROXY;
3541 }
3542
3543 if (data[IFLA_VXLAN_RSC]) {
3544 if (changelink)
3545 return -EOPNOTSUPP;
3546 if (nla_get_u8(data[IFLA_VXLAN_RSC]))
3547 conf->flags |= VXLAN_F_RSC;
3548 }
3549
3550 if (data[IFLA_VXLAN_L2MISS]) {
3551 if (changelink)
3552 return -EOPNOTSUPP;
3553 if (nla_get_u8(data[IFLA_VXLAN_L2MISS]))
3554 conf->flags |= VXLAN_F_L2MISS;
3555 }
3556
3557 if (data[IFLA_VXLAN_L3MISS]) {
3558 if (changelink)
3559 return -EOPNOTSUPP;
3560 if (nla_get_u8(data[IFLA_VXLAN_L3MISS]))
3561 conf->flags |= VXLAN_F_L3MISS;
3562 }
3563
3564 if (data[IFLA_VXLAN_LIMIT]) {
3565 if (changelink)
3566 return -EOPNOTSUPP;
3567 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3568 }
3569
3570 if (data[IFLA_VXLAN_COLLECT_METADATA]) {
3571 if (changelink)
3572 return -EOPNOTSUPP;
3573 if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
3574 conf->flags |= VXLAN_F_COLLECT_METADATA;
3575 }
3576
3577 if (data[IFLA_VXLAN_PORT_RANGE]) {
3578 if (!changelink) {
3579 const struct ifla_vxlan_port_range *p
3580 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3581 conf->port_min = ntohs(p->low);
3582 conf->port_max = ntohs(p->high);
3583 } else {
3584 return -EOPNOTSUPP;
3585 }
3586 }
3587
3588 if (data[IFLA_VXLAN_PORT]) {
3589 if (changelink)
3590 return -EOPNOTSUPP;
3591 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3592 }
3593
3594 if (data[IFLA_VXLAN_UDP_CSUM]) {
3595 if (changelink)
3596 return -EOPNOTSUPP;
3597 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3598 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3599 }
3600
3601 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
3602 if (changelink)
3603 return -EOPNOTSUPP;
3604 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
3605 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
3606 }
3607
3608 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
3609 if (changelink)
3610 return -EOPNOTSUPP;
3611 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
3612 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
3613 }
3614
3615 if (data[IFLA_VXLAN_REMCSUM_TX]) {
3616 if (changelink)
3617 return -EOPNOTSUPP;
3618 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
3619 conf->flags |= VXLAN_F_REMCSUM_TX;
3620 }
3621
3622 if (data[IFLA_VXLAN_REMCSUM_RX]) {
3623 if (changelink)
3624 return -EOPNOTSUPP;
3625 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
3626 conf->flags |= VXLAN_F_REMCSUM_RX;
3627 }
3628
3629 if (data[IFLA_VXLAN_GBP]) {
3630 if (changelink)
3631 return -EOPNOTSUPP;
3632 conf->flags |= VXLAN_F_GBP;
3633 }
3634
3635 if (data[IFLA_VXLAN_GPE]) {
3636 if (changelink)
3637 return -EOPNOTSUPP;
3638 conf->flags |= VXLAN_F_GPE;
3639 }
3640
3641 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
3642 if (changelink)
3643 return -EOPNOTSUPP;
3644 conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3645 }
3646
3647 if (tb[IFLA_MTU]) {
3648 if (changelink)
3649 return -EOPNOTSUPP;
3650 conf->mtu = nla_get_u32(tb[IFLA_MTU]);
3651 }
3652
3653 return 0;
3654 }
3655
3656 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3657 struct nlattr *tb[], struct nlattr *data[],
3658 struct netlink_ext_ack *extack)
3659 {
3660 struct vxlan_config conf;
3661 int err;
3662
3663 err = vxlan_nl2conf(tb, data, dev, &conf, false);
3664 if (err)
3665 return err;
3666
3667 return __vxlan_dev_create(src_net, dev, &conf, extack);
3668 }
3669
3670 static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3671 struct nlattr *data[],
3672 struct netlink_ext_ack *extack)
3673 {
3674 struct vxlan_dev *vxlan = netdev_priv(dev);
3675 struct vxlan_rdst *dst = &vxlan->default_dst;
3676 struct vxlan_rdst old_dst;
3677 struct vxlan_config conf;
3678 struct vxlan_fdb *f = NULL;
3679 int err;
3680
3681 err = vxlan_nl2conf(tb, data,
3682 dev, &conf, true);
3683 if (err)
3684 return err;
3685
3686 memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
3687
3688 err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
3689 if (err)
3690 return err;
3691
3692 /* handle default dst entry */
3693 if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
3694 spin_lock_bh(&vxlan->hash_lock);
3695 if (!vxlan_addr_any(&old_dst.remote_ip))
3696 __vxlan_fdb_delete(vxlan, all_zeros_mac,
3697 old_dst.remote_ip,
3698 vxlan->cfg.dst_port,
3699 old_dst.remote_vni,
3700 old_dst.remote_vni,
3701 old_dst.remote_ifindex, 0);
3702
3703 if (!vxlan_addr_any(&dst->remote_ip)) {
3704 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3705 &dst->remote_ip,
3706 NUD_REACHABLE | NUD_PERMANENT,
3707 vxlan->cfg.dst_port,
3708 dst->remote_vni,
3709 dst->remote_vni,
3710 dst->remote_ifindex,
3711 NTF_SELF, &f);
3712 if (err) {
3713 spin_unlock_bh(&vxlan->hash_lock);
3714 return err;
3715 }
3716 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
3717 }
3718 spin_unlock_bh(&vxlan->hash_lock);
3719 }
3720
3721 return 0;
3722 }
3723
3724 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3725 {
3726 struct vxlan_dev *vxlan = netdev_priv(dev);
3727
3728 vxlan_flush(vxlan, true);
3729
3730 list_del(&vxlan->next);
3731 unregister_netdevice_queue(dev, head);
3732 }
3733
3734 static size_t vxlan_get_size(const struct net_device *dev)
3735 {
3736
3737 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3738 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3739 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3740 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3741 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3742 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3743 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3744 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3745 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3746 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3747 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3748 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3749 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3750 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3751 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3752 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3753 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3754 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3755 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3756 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3757 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3758 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3759 nla_total_size(sizeof(struct ip_fan_map) * 256) +
3760 0;
3761 }
3762
3763 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3764 {
3765 const struct vxlan_dev *vxlan = netdev_priv(dev);
3766 const struct vxlan_rdst *dst = &vxlan->default_dst;
3767 struct ifla_vxlan_port_range ports = {
3768 .low = htons(vxlan->cfg.port_min),
3769 .high = htons(vxlan->cfg.port_max),
3770 };
3771
3772 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3773 goto nla_put_failure;
3774
3775 if (!vxlan_addr_any(&dst->remote_ip)) {
3776 if (dst->remote_ip.sa.sa_family == AF_INET) {
3777 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3778 dst->remote_ip.sin.sin_addr.s_addr))
3779 goto nla_put_failure;
3780 #if IS_ENABLED(CONFIG_IPV6)
3781 } else {
3782 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3783 &dst->remote_ip.sin6.sin6_addr))
3784 goto nla_put_failure;
3785 #endif
3786 }
3787 }
3788
3789 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3790 goto nla_put_failure;
3791
3792 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3793 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3794 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3795 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3796 goto nla_put_failure;
3797 #if IS_ENABLED(CONFIG_IPV6)
3798 } else {
3799 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3800 &vxlan->cfg.saddr.sin6.sin6_addr))
3801 goto nla_put_failure;
3802 #endif
3803 }
3804 }
3805
3806 if (fan_has_map(&vxlan->fan)) {
3807 struct nlattr *fan_nest;
3808 struct ip_fan_map *fan_map;
3809
3810 fan_nest = nla_nest_start(skb, IFLA_VXLAN_FAN_MAP);
3811 if (!fan_nest)
3812 goto nla_put_failure;
3813 list_for_each_entry_rcu(fan_map, &vxlan->fan.fan_maps, list) {
3814 struct ifla_fan_map map;
3815
3816 map.underlay = fan_map->underlay;
3817 map.underlay_prefix = fan_map->underlay_prefix;
3818 map.overlay = fan_map->overlay;
3819 map.overlay_prefix = fan_map->overlay_prefix;
3820 if (nla_put(skb, IFLA_FAN_MAPPING, sizeof(map), &map))
3821 goto nla_put_failure;
3822 }
3823 nla_nest_end(skb, fan_nest);
3824 }
3825
3826 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3827 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3828 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3829 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3830 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
3831 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3832 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
3833 nla_put_u8(skb, IFLA_VXLAN_RSC,
3834 !!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
3835 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3836 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
3837 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3838 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
3839 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3840 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
3841 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3842 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3843 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3844 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3845 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3846 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3847 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3848 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3849 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3850 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3851 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
3852 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3853 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
3854 goto nla_put_failure;
3855
3856 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3857 goto nla_put_failure;
3858
3859 if (vxlan->cfg.flags & VXLAN_F_GBP &&
3860 nla_put_flag(skb, IFLA_VXLAN_GBP))
3861 goto nla_put_failure;
3862
3863 if (vxlan->cfg.flags & VXLAN_F_GPE &&
3864 nla_put_flag(skb, IFLA_VXLAN_GPE))
3865 goto nla_put_failure;
3866
3867 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3868 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3869 goto nla_put_failure;
3870
3871 return 0;
3872
3873 nla_put_failure:
3874 return -EMSGSIZE;
3875 }
3876
3877 static struct net *vxlan_get_link_net(const struct net_device *dev)
3878 {
3879 struct vxlan_dev *vxlan = netdev_priv(dev);
3880
3881 return vxlan->net;
3882 }
3883
3884 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3885 .kind = "vxlan",
3886 .maxtype = IFLA_VXLAN_MAX,
3887 .policy = vxlan_policy,
3888 .priv_size = sizeof(struct vxlan_dev),
3889 .setup = vxlan_setup,
3890 .validate = vxlan_validate,
3891 .newlink = vxlan_newlink,
3892 .changelink = vxlan_changelink,
3893 .dellink = vxlan_dellink,
3894 .get_size = vxlan_get_size,
3895 .fill_info = vxlan_fill_info,
3896 .get_link_net = vxlan_get_link_net,
3897 };
3898
3899 struct net_device *vxlan_dev_create(struct net *net, const char *name,
3900 u8 name_assign_type,
3901 struct vxlan_config *conf)
3902 {
3903 struct nlattr *tb[IFLA_MAX + 1];
3904 struct net_device *dev;
3905 int err;
3906
3907 memset(&tb, 0, sizeof(tb));
3908
3909 dev = rtnl_create_link(net, name, name_assign_type,
3910 &vxlan_link_ops, tb);
3911 if (IS_ERR(dev))
3912 return dev;
3913
3914 err = __vxlan_dev_create(net, dev, conf, NULL);
3915 if (err < 0) {
3916 free_netdev(dev);
3917 return ERR_PTR(err);
3918 }
3919
3920 err = rtnl_configure_link(dev, NULL);
3921 if (err < 0) {
3922 LIST_HEAD(list_kill);
3923
3924 vxlan_dellink(dev, &list_kill);
3925 unregister_netdevice_many(&list_kill);
3926 return ERR_PTR(err);
3927 }
3928
3929 return dev;
3930 }
3931 EXPORT_SYMBOL_GPL(vxlan_dev_create);
3932
3933 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3934 struct net_device *dev)
3935 {
3936 struct vxlan_dev *vxlan, *next;
3937 LIST_HEAD(list_kill);
3938
3939 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3940 struct vxlan_rdst *dst = &vxlan->default_dst;
3941
3942 /* In case we created vxlan device with carrier
3943 * and we loose the carrier due to module unload
3944 * we also need to remove vxlan device. In other
3945 * cases, it's not necessary and remote_ifindex
3946 * is 0 here, so no matches.
3947 */
3948 if (dst->remote_ifindex == dev->ifindex)
3949 vxlan_dellink(vxlan->dev, &list_kill);
3950 }
3951
3952 unregister_netdevice_many(&list_kill);
3953 }
3954
3955 static int vxlan_netdevice_event(struct notifier_block *unused,
3956 unsigned long event, void *ptr)
3957 {
3958 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3959 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3960
3961 if (event == NETDEV_UNREGISTER) {
3962 vxlan_offload_rx_ports(dev, false);
3963 vxlan_handle_lowerdev_unregister(vn, dev);
3964 } else if (event == NETDEV_REGISTER) {
3965 vxlan_offload_rx_ports(dev, true);
3966 } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
3967 event == NETDEV_UDP_TUNNEL_DROP_INFO) {
3968 vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
3969 }
3970
3971 return NOTIFY_DONE;
3972 }
3973
3974 static struct notifier_block vxlan_notifier_block __read_mostly = {
3975 .notifier_call = vxlan_netdevice_event,
3976 };
3977
3978 static __net_init int vxlan_init_net(struct net *net)
3979 {
3980 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3981 unsigned int h;
3982
3983 INIT_LIST_HEAD(&vn->vxlan_list);
3984 spin_lock_init(&vn->sock_lock);
3985
3986 for (h = 0; h < PORT_HASH_SIZE; ++h)
3987 INIT_HLIST_HEAD(&vn->sock_list[h]);
3988
3989 return 0;
3990 }
3991
3992 #ifdef CONFIG_SYSCTL
3993 static struct ctl_table_header *vxlan_fan_header;
3994 static unsigned int vxlan_fan_version = 4;
3995
3996 static struct ctl_table vxlan_fan_sysctls[] = {
3997 {
3998 .procname = "vxlan",
3999 .data = &vxlan_fan_version,
4000 .maxlen = sizeof(vxlan_fan_version),
4001 .mode = 0444,
4002 .proc_handler = proc_dointvec,
4003 },
4004 {},
4005 };
4006 #endif /* CONFIG_SYSCTL */
4007
4008 static void __net_exit vxlan_exit_net(struct net *net)
4009 {
4010 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
4011 struct vxlan_dev *vxlan, *next;
4012 struct net_device *dev, *aux;
4013 unsigned int h;
4014 LIST_HEAD(list);
4015
4016 rtnl_lock();
4017 for_each_netdev_safe(net, dev, aux)
4018 if (dev->rtnl_link_ops == &vxlan_link_ops)
4019 unregister_netdevice_queue(dev, &list);
4020
4021 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
4022 /* If vxlan->dev is in the same netns, it has already been added
4023 * to the list by the previous loop.
4024 */
4025 if (!net_eq(dev_net(vxlan->dev), net))
4026 unregister_netdevice_queue(vxlan->dev, &list);
4027 }
4028
4029 unregister_netdevice_many(&list);
4030 rtnl_unlock();
4031
4032 for (h = 0; h < PORT_HASH_SIZE; ++h)
4033 WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
4034 }
4035
4036 static struct pernet_operations vxlan_net_ops = {
4037 .init = vxlan_init_net,
4038 .exit = vxlan_exit_net,
4039 .id = &vxlan_net_id,
4040 .size = sizeof(struct vxlan_net),
4041 };
4042
4043 static int __init vxlan_init_module(void)
4044 {
4045 int rc;
4046
4047 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
4048
4049 rc = register_pernet_subsys(&vxlan_net_ops);
4050 if (rc)
4051 goto out1;
4052
4053 rc = register_netdevice_notifier(&vxlan_notifier_block);
4054 if (rc)
4055 goto out2;
4056
4057 rc = rtnl_link_register(&vxlan_link_ops);
4058 if (rc)
4059 goto out3;
4060
4061 #ifdef CONFIG_SYSCTL
4062 vxlan_fan_header = register_net_sysctl(&init_net, "net/fan",
4063 vxlan_fan_sysctls);
4064 if (!vxlan_fan_header) {
4065 rc = -ENOMEM;
4066 goto sysctl_failed;
4067 }
4068 #endif /* CONFIG_SYSCTL */
4069
4070 return 0;
4071 #ifdef CONFIG_SYSCTL
4072 sysctl_failed:
4073 rtnl_link_unregister(&vxlan_link_ops);
4074 #endif /* CONFIG_SYSCTL */
4075 out3:
4076 unregister_netdevice_notifier(&vxlan_notifier_block);
4077 out2:
4078 unregister_pernet_subsys(&vxlan_net_ops);
4079 out1:
4080 return rc;
4081 }
4082 late_initcall(vxlan_init_module);
4083
4084 static void __exit vxlan_cleanup_module(void)
4085 {
4086 #ifdef CONFIG_SYSCTL
4087 unregister_net_sysctl_table(vxlan_fan_header);
4088 #endif /* CONFIG_SYSCTL */
4089 rtnl_link_unregister(&vxlan_link_ops);
4090 unregister_netdevice_notifier(&vxlan_notifier_block);
4091 unregister_pernet_subsys(&vxlan_net_ops);
4092 /* rcu_barrier() is called by netns */
4093 }
4094 module_exit(vxlan_cleanup_module);
4095
4096 MODULE_LICENSE("GPL");
4097 MODULE_VERSION(VXLAN_VERSION);
4098 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
4099 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
4100 MODULE_ALIAS_RTNL_LINK("vxlan");