2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
38 #include <net/fib_rules.h>
39 #include <net/netns/generic.h>
41 #define DRV_NAME "vrf"
42 #define DRV_VERSION "1.0"
44 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
46 static unsigned int vrf_net_id
;
49 struct rtable __rcu
*rth
;
50 struct rt6_info __rcu
*rt6
;
61 struct u64_stats_sync syncp
;
64 static void vrf_rx_stats(struct net_device
*dev
, int len
)
66 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
68 u64_stats_update_begin(&dstats
->syncp
);
70 dstats
->rx_bytes
+= len
;
71 u64_stats_update_end(&dstats
->syncp
);
74 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
76 vrf_dev
->stats
.tx_errors
++;
80 static void vrf_get_stats64(struct net_device
*dev
,
81 struct rtnl_link_stats64
*stats
)
85 for_each_possible_cpu(i
) {
86 const struct pcpu_dstats
*dstats
;
87 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
90 dstats
= per_cpu_ptr(dev
->dstats
, i
);
92 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
93 tbytes
= dstats
->tx_bytes
;
94 tpkts
= dstats
->tx_pkts
;
95 tdrops
= dstats
->tx_drps
;
96 rbytes
= dstats
->rx_bytes
;
97 rpkts
= dstats
->rx_pkts
;
98 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
99 stats
->tx_bytes
+= tbytes
;
100 stats
->tx_packets
+= tpkts
;
101 stats
->tx_dropped
+= tdrops
;
102 stats
->rx_bytes
+= rbytes
;
103 stats
->rx_packets
+= rpkts
;
107 /* by default VRF devices do not have a qdisc and are expected
108 * to be created with only a single queue.
110 static bool qdisc_tx_is_default(const struct net_device
*dev
)
112 struct netdev_queue
*txq
;
115 if (dev
->num_tx_queues
> 1)
118 txq
= netdev_get_tx_queue(dev
, 0);
119 qdisc
= rcu_access_pointer(txq
->qdisc
);
121 return !qdisc
->enqueue
;
124 /* Local traffic destined to local address. Reinsert the packet to rx
125 * path, similar to loopback handling.
127 static int vrf_local_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
128 struct dst_entry
*dst
)
134 skb_dst_set(skb
, dst
);
136 /* set pkt_type to avoid skb hitting packet taps twice -
137 * once on Tx and again in Rx processing
139 skb
->pkt_type
= PACKET_LOOPBACK
;
141 skb
->protocol
= eth_type_trans(skb
, dev
);
143 if (likely(netif_rx(skb
) == NET_RX_SUCCESS
))
144 vrf_rx_stats(dev
, len
);
146 this_cpu_inc(dev
->dstats
->rx_drps
);
151 #if IS_ENABLED(CONFIG_IPV6)
152 static int vrf_ip6_local_out(struct net
*net
, struct sock
*sk
,
157 err
= nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
,
158 sk
, skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
160 if (likely(err
== 1))
161 err
= dst_output(net
, sk
, skb
);
166 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
167 struct net_device
*dev
)
169 const struct ipv6hdr
*iph
;
170 struct net
*net
= dev_net(skb
->dev
);
172 int ret
= NET_XMIT_DROP
;
173 struct dst_entry
*dst
;
174 struct dst_entry
*dst_null
= &net
->ipv6
.ip6_null_entry
->dst
;
176 if (!pskb_may_pull(skb
, ETH_HLEN
+ sizeof(struct ipv6hdr
)))
181 memset(&fl6
, 0, sizeof(fl6
));
182 /* needed to match OIF rule */
183 fl6
.flowi6_oif
= dev
->ifindex
;
184 fl6
.flowi6_iif
= LOOPBACK_IFINDEX
;
185 fl6
.daddr
= iph
->daddr
;
186 fl6
.saddr
= iph
->saddr
;
187 fl6
.flowlabel
= ip6_flowinfo(iph
);
188 fl6
.flowi6_mark
= skb
->mark
;
189 fl6
.flowi6_proto
= iph
->nexthdr
;
190 fl6
.flowi6_flags
= FLOWI_FLAG_SKIP_NH_OIF
;
192 dst
= ip6_route_output(net
, NULL
, &fl6
);
198 /* if dst.dev is loopback or the VRF device again this is locally
199 * originated traffic destined to a local address. Short circuit
203 return vrf_local_xmit(skb
, dev
, dst
);
205 skb_dst_set(skb
, dst
);
207 /* strip the ethernet header added for pass through VRF device */
208 __skb_pull(skb
, skb_network_offset(skb
));
210 ret
= vrf_ip6_local_out(net
, skb
->sk
, skb
);
211 if (unlikely(net_xmit_eval(ret
)))
212 dev
->stats
.tx_errors
++;
214 ret
= NET_XMIT_SUCCESS
;
218 vrf_tx_error(dev
, skb
);
219 return NET_XMIT_DROP
;
222 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
223 struct net_device
*dev
)
225 vrf_tx_error(dev
, skb
);
226 return NET_XMIT_DROP
;
230 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
231 static int vrf_ip_local_out(struct net
*net
, struct sock
*sk
,
236 err
= nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, net
, sk
,
237 skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
238 if (likely(err
== 1))
239 err
= dst_output(net
, sk
, skb
);
244 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
245 struct net_device
*vrf_dev
)
248 int ret
= NET_XMIT_DROP
;
250 struct net
*net
= dev_net(vrf_dev
);
253 if (!pskb_may_pull(skb
, ETH_HLEN
+ sizeof(struct iphdr
)))
258 memset(&fl4
, 0, sizeof(fl4
));
259 /* needed to match OIF rule */
260 fl4
.flowi4_oif
= vrf_dev
->ifindex
;
261 fl4
.flowi4_iif
= LOOPBACK_IFINDEX
;
262 fl4
.flowi4_tos
= RT_TOS(ip4h
->tos
);
263 fl4
.flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_SKIP_NH_OIF
;
264 fl4
.flowi4_proto
= ip4h
->protocol
;
265 fl4
.daddr
= ip4h
->daddr
;
266 fl4
.saddr
= ip4h
->saddr
;
268 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
274 /* if dst.dev is loopback or the VRF device again this is locally
275 * originated traffic destined to a local address. Short circuit
278 if (rt
->dst
.dev
== vrf_dev
)
279 return vrf_local_xmit(skb
, vrf_dev
, &rt
->dst
);
281 skb_dst_set(skb
, &rt
->dst
);
283 /* strip the ethernet header added for pass through VRF device */
284 __skb_pull(skb
, skb_network_offset(skb
));
287 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
291 ret
= vrf_ip_local_out(dev_net(skb_dst(skb
)->dev
), skb
->sk
, skb
);
292 if (unlikely(net_xmit_eval(ret
)))
293 vrf_dev
->stats
.tx_errors
++;
295 ret
= NET_XMIT_SUCCESS
;
300 vrf_tx_error(vrf_dev
, skb
);
304 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
306 switch (skb
->protocol
) {
307 case htons(ETH_P_IP
):
308 return vrf_process_v4_outbound(skb
, dev
);
309 case htons(ETH_P_IPV6
):
310 return vrf_process_v6_outbound(skb
, dev
);
312 vrf_tx_error(dev
, skb
);
313 return NET_XMIT_DROP
;
317 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
320 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
322 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
323 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
325 u64_stats_update_begin(&dstats
->syncp
);
327 dstats
->tx_bytes
+= len
;
328 u64_stats_update_end(&dstats
->syncp
);
330 this_cpu_inc(dev
->dstats
->tx_drps
);
336 static int vrf_finish_direct(struct net
*net
, struct sock
*sk
,
339 struct net_device
*vrf_dev
= skb
->dev
;
341 if (!list_empty(&vrf_dev
->ptype_all
) &&
342 likely(skb_headroom(skb
) >= ETH_HLEN
)) {
343 struct ethhdr
*eth
= skb_push(skb
, ETH_HLEN
);
345 ether_addr_copy(eth
->h_source
, vrf_dev
->dev_addr
);
346 eth_zero_addr(eth
->h_dest
);
347 eth
->h_proto
= skb
->protocol
;
350 dev_queue_xmit_nit(skb
, vrf_dev
);
351 rcu_read_unlock_bh();
353 skb_pull(skb
, ETH_HLEN
);
359 #if IS_ENABLED(CONFIG_IPV6)
360 /* modelled after ip6_finish_output2 */
361 static int vrf_finish_output6(struct net
*net
, struct sock
*sk
,
364 struct dst_entry
*dst
= skb_dst(skb
);
365 struct net_device
*dev
= dst
->dev
;
366 struct neighbour
*neigh
;
367 struct in6_addr
*nexthop
;
372 skb
->protocol
= htons(ETH_P_IPV6
);
376 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
377 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
378 if (unlikely(!neigh
))
379 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
380 if (!IS_ERR(neigh
)) {
381 sock_confirm_neigh(skb
, neigh
);
382 ret
= neigh_output(neigh
, skb
);
383 rcu_read_unlock_bh();
386 rcu_read_unlock_bh();
388 IP6_INC_STATS(dev_net(dst
->dev
),
389 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
394 /* modelled after ip6_output */
395 static int vrf_output6(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
397 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
398 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
400 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
403 /* set dst on skb to send packet to us via dev_xmit path. Allows
404 * packet to go through device based features such as qdisc, netfilter
405 * hooks and packet sockets with skb->dev set to vrf device.
407 static struct sk_buff
*vrf_ip6_out_redirect(struct net_device
*vrf_dev
,
410 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
411 struct dst_entry
*dst
= NULL
;
412 struct rt6_info
*rt6
;
416 rt6
= rcu_dereference(vrf
->rt6
);
424 if (unlikely(!dst
)) {
425 vrf_tx_error(vrf_dev
, skb
);
430 skb_dst_set(skb
, dst
);
435 static int vrf_output6_direct(struct net
*net
, struct sock
*sk
,
438 skb
->protocol
= htons(ETH_P_IPV6
);
440 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
441 net
, sk
, skb
, NULL
, skb
->dev
,
443 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
446 static struct sk_buff
*vrf_ip6_out_direct(struct net_device
*vrf_dev
,
450 struct net
*net
= dev_net(vrf_dev
);
455 err
= nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
, sk
,
456 skb
, NULL
, vrf_dev
, vrf_output6_direct
);
458 if (likely(err
== 1))
459 err
= vrf_output6_direct(net
, sk
, skb
);
461 /* reset skb device */
462 if (likely(err
== 1))
470 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
474 /* don't divert link scope packets */
475 if (rt6_need_strict(&ipv6_hdr(skb
)->daddr
))
478 if (qdisc_tx_is_default(vrf_dev
))
479 return vrf_ip6_out_direct(vrf_dev
, sk
, skb
);
481 return vrf_ip6_out_redirect(vrf_dev
, skb
);
485 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
487 struct rt6_info
*rt6
= rtnl_dereference(vrf
->rt6
);
488 struct net
*net
= dev_net(dev
);
489 struct dst_entry
*dst
;
491 RCU_INIT_POINTER(vrf
->rt6
, NULL
);
494 /* move dev in dst's to loopback so this VRF device can be deleted
495 * - based on dst_ifdown
500 dst
->dev
= net
->loopback_dev
;
506 static int vrf_rt6_create(struct net_device
*dev
)
508 int flags
= DST_HOST
| DST_NOPOLICY
| DST_NOXFRM
;
509 struct net_vrf
*vrf
= netdev_priv(dev
);
510 struct net
*net
= dev_net(dev
);
511 struct fib6_table
*rt6i_table
;
512 struct rt6_info
*rt6
;
515 /* IPv6 can be CONFIG enabled and then disabled runtime */
516 if (!ipv6_mod_enabled())
519 rt6i_table
= fib6_new_table(net
, vrf
->tb_id
);
523 /* create a dst for routing packets out a VRF device */
524 rt6
= ip6_dst_alloc(net
, dev
, flags
);
528 rt6
->rt6i_table
= rt6i_table
;
529 rt6
->dst
.output
= vrf_output6
;
531 rcu_assign_pointer(vrf
->rt6
, rt6
);
538 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
545 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
549 static int vrf_rt6_create(struct net_device
*dev
)
555 /* modelled after ip_finish_output2 */
556 static int vrf_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
558 struct dst_entry
*dst
= skb_dst(skb
);
559 struct rtable
*rt
= (struct rtable
*)dst
;
560 struct net_device
*dev
= dst
->dev
;
561 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
562 struct neighbour
*neigh
;
568 /* Be paranoid, rather than too clever. */
569 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
570 struct sk_buff
*skb2
;
572 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
578 skb_set_owner_w(skb2
, skb
->sk
);
586 nexthop
= (__force u32
)rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
587 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
588 if (unlikely(!neigh
))
589 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
590 if (!IS_ERR(neigh
)) {
591 sock_confirm_neigh(skb
, neigh
);
592 ret
= neigh_output(neigh
, skb
);
593 rcu_read_unlock_bh();
597 rcu_read_unlock_bh();
599 vrf_tx_error(skb
->dev
, skb
);
603 static int vrf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
605 struct net_device
*dev
= skb_dst(skb
)->dev
;
607 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
610 skb
->protocol
= htons(ETH_P_IP
);
612 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
613 net
, sk
, skb
, NULL
, dev
,
615 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
618 /* set dst on skb to send packet to us via dev_xmit path. Allows
619 * packet to go through device based features such as qdisc, netfilter
620 * hooks and packet sockets with skb->dev set to vrf device.
622 static struct sk_buff
*vrf_ip_out_redirect(struct net_device
*vrf_dev
,
625 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
626 struct dst_entry
*dst
= NULL
;
631 rth
= rcu_dereference(vrf
->rth
);
639 if (unlikely(!dst
)) {
640 vrf_tx_error(vrf_dev
, skb
);
645 skb_dst_set(skb
, dst
);
650 static int vrf_output_direct(struct net
*net
, struct sock
*sk
,
653 skb
->protocol
= htons(ETH_P_IP
);
655 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
656 net
, sk
, skb
, NULL
, skb
->dev
,
658 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
661 static struct sk_buff
*vrf_ip_out_direct(struct net_device
*vrf_dev
,
665 struct net
*net
= dev_net(vrf_dev
);
670 err
= nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, net
, sk
,
671 skb
, NULL
, vrf_dev
, vrf_output_direct
);
673 if (likely(err
== 1))
674 err
= vrf_output_direct(net
, sk
, skb
);
676 /* reset skb device */
677 if (likely(err
== 1))
685 static struct sk_buff
*vrf_ip_out(struct net_device
*vrf_dev
,
689 /* don't divert multicast or local broadcast */
690 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
) ||
691 ipv4_is_lbcast(ip_hdr(skb
)->daddr
))
694 if (qdisc_tx_is_default(vrf_dev
))
695 return vrf_ip_out_direct(vrf_dev
, sk
, skb
);
697 return vrf_ip_out_redirect(vrf_dev
, skb
);
700 /* called with rcu lock held */
701 static struct sk_buff
*vrf_l3_out(struct net_device
*vrf_dev
,
708 return vrf_ip_out(vrf_dev
, sk
, skb
);
710 return vrf_ip6_out(vrf_dev
, sk
, skb
);
717 static void vrf_rtable_release(struct net_device
*dev
, struct net_vrf
*vrf
)
719 struct rtable
*rth
= rtnl_dereference(vrf
->rth
);
720 struct net
*net
= dev_net(dev
);
721 struct dst_entry
*dst
;
723 RCU_INIT_POINTER(vrf
->rth
, NULL
);
726 /* move dev in dst's to loopback so this VRF device can be deleted
727 * - based on dst_ifdown
732 dst
->dev
= net
->loopback_dev
;
738 static int vrf_rtable_create(struct net_device
*dev
)
740 struct net_vrf
*vrf
= netdev_priv(dev
);
743 if (!fib_new_table(dev_net(dev
), vrf
->tb_id
))
746 /* create a dst for routing packets out through a VRF device */
747 rth
= rt_dst_alloc(dev
, 0, RTN_UNICAST
, 1, 1, 0);
751 rth
->dst
.output
= vrf_output
;
752 rth
->rt_table_id
= vrf
->tb_id
;
754 rcu_assign_pointer(vrf
->rth
, rth
);
759 /**************************** device handling ********************/
761 /* cycle interface to flush neighbor cache and move routes across tables */
762 static void cycle_netdev(struct net_device
*dev
)
764 unsigned int flags
= dev
->flags
;
767 if (!netif_running(dev
))
770 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
);
772 ret
= dev_change_flags(dev
, flags
);
776 "Failed to cycle device %s; route tables might be wrong!\n",
781 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
782 struct netlink_ext_ack
*extack
)
786 /* do not allow loopback device to be enslaved to a VRF.
787 * The vrf device acts as the loopback for the vrf.
789 if (port_dev
== dev_net(dev
)->loopback_dev
) {
790 NL_SET_ERR_MSG(extack
,
791 "Can not enslave loopback device to a VRF");
795 port_dev
->priv_flags
|= IFF_L3MDEV_SLAVE
;
796 ret
= netdev_master_upper_dev_link(port_dev
, dev
, NULL
, NULL
, extack
);
800 cycle_netdev(port_dev
);
805 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
809 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
810 struct netlink_ext_ack
*extack
)
812 if (netif_is_l3_master(port_dev
)) {
813 NL_SET_ERR_MSG(extack
,
814 "Can not enslave an L3 master device to a VRF");
818 if (netif_is_l3_slave(port_dev
))
821 return do_vrf_add_slave(dev
, port_dev
, extack
);
824 /* inverse of do_vrf_add_slave */
825 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
827 netdev_upper_dev_unlink(port_dev
, dev
);
828 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
830 cycle_netdev(port_dev
);
835 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
837 return do_vrf_del_slave(dev
, port_dev
);
840 static void vrf_dev_uninit(struct net_device
*dev
)
842 struct net_vrf
*vrf
= netdev_priv(dev
);
844 vrf_rtable_release(dev
, vrf
);
845 vrf_rt6_release(dev
, vrf
);
847 free_percpu(dev
->dstats
);
851 static int vrf_dev_init(struct net_device
*dev
)
853 struct net_vrf
*vrf
= netdev_priv(dev
);
855 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
859 /* create the default dst which points back to us */
860 if (vrf_rtable_create(dev
) != 0)
863 if (vrf_rt6_create(dev
) != 0)
866 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
868 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
869 dev
->mtu
= 64 * 1024;
871 /* similarly, oper state is irrelevant; set to up to avoid confusion */
872 dev
->operstate
= IF_OPER_UP
;
873 netdev_lockdep_set_classes(dev
);
877 vrf_rtable_release(dev
, vrf
);
879 free_percpu(dev
->dstats
);
885 static const struct net_device_ops vrf_netdev_ops
= {
886 .ndo_init
= vrf_dev_init
,
887 .ndo_uninit
= vrf_dev_uninit
,
888 .ndo_start_xmit
= vrf_xmit
,
889 .ndo_get_stats64
= vrf_get_stats64
,
890 .ndo_add_slave
= vrf_add_slave
,
891 .ndo_del_slave
= vrf_del_slave
,
894 static u32
vrf_fib_table(const struct net_device
*dev
)
896 struct net_vrf
*vrf
= netdev_priv(dev
);
901 static int vrf_rcv_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
907 static struct sk_buff
*vrf_rcv_nfhook(u8 pf
, unsigned int hook
,
909 struct net_device
*dev
)
911 struct net
*net
= dev_net(dev
);
913 if (nf_hook(pf
, hook
, net
, NULL
, skb
, dev
, NULL
, vrf_rcv_finish
) != 1)
914 skb
= NULL
; /* kfree_skb(skb) handled by nf code */
919 #if IS_ENABLED(CONFIG_IPV6)
920 /* neighbor handling is done with actual device; do not want
921 * to flip skb->dev for those ndisc packets. This really fails
922 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
925 static bool ipv6_ndisc_frame(const struct sk_buff
*skb
)
927 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
930 if (iph
->nexthdr
== NEXTHDR_ICMP
) {
931 const struct icmp6hdr
*icmph
;
932 struct icmp6hdr _icmph
;
934 icmph
= skb_header_pointer(skb
, sizeof(*iph
),
935 sizeof(_icmph
), &_icmph
);
939 switch (icmph
->icmp6_type
) {
940 case NDISC_ROUTER_SOLICITATION
:
941 case NDISC_ROUTER_ADVERTISEMENT
:
942 case NDISC_NEIGHBOUR_SOLICITATION
:
943 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
954 static struct rt6_info
*vrf_ip6_route_lookup(struct net
*net
,
955 const struct net_device
*dev
,
960 struct net_vrf
*vrf
= netdev_priv(dev
);
961 struct fib6_table
*table
= NULL
;
962 struct rt6_info
*rt6
;
966 /* fib6_table does not have a refcnt and can not be freed */
967 rt6
= rcu_dereference(vrf
->rt6
);
969 table
= rt6
->rt6i_table
;
976 return ip6_pol_route(net
, table
, ifindex
, fl6
, flags
);
979 static void vrf_ip6_input_dst(struct sk_buff
*skb
, struct net_device
*vrf_dev
,
982 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
983 struct flowi6 fl6
= {
984 .flowi6_iif
= ifindex
,
985 .flowi6_mark
= skb
->mark
,
986 .flowi6_proto
= iph
->nexthdr
,
989 .flowlabel
= ip6_flowinfo(iph
),
991 struct net
*net
= dev_net(vrf_dev
);
992 struct rt6_info
*rt6
;
994 rt6
= vrf_ip6_route_lookup(net
, vrf_dev
, &fl6
, ifindex
,
995 RT6_LOOKUP_F_HAS_SADDR
| RT6_LOOKUP_F_IFACE
);
999 if (unlikely(&rt6
->dst
== &net
->ipv6
.ip6_null_entry
->dst
))
1002 skb_dst_set(skb
, &rt6
->dst
);
1005 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
1006 struct sk_buff
*skb
)
1008 int orig_iif
= skb
->skb_iif
;
1011 /* loopback traffic; do not push through packet taps again.
1012 * Reset pkt_type for upper layers to process skb
1014 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
1016 skb
->skb_iif
= vrf_dev
->ifindex
;
1017 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
1018 skb
->pkt_type
= PACKET_HOST
;
1022 /* if packet is NDISC or addressed to multicast or link-local
1023 * then keep the ingress interface
1025 need_strict
= rt6_need_strict(&ipv6_hdr(skb
)->daddr
);
1026 if (!ipv6_ndisc_frame(skb
) && !need_strict
) {
1027 vrf_rx_stats(vrf_dev
, skb
->len
);
1029 skb
->skb_iif
= vrf_dev
->ifindex
;
1031 if (!list_empty(&vrf_dev
->ptype_all
)) {
1032 skb_push(skb
, skb
->mac_len
);
1033 dev_queue_xmit_nit(skb
, vrf_dev
);
1034 skb_pull(skb
, skb
->mac_len
);
1037 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
1041 vrf_ip6_input_dst(skb
, vrf_dev
, orig_iif
);
1043 skb
= vrf_rcv_nfhook(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1049 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
1050 struct sk_buff
*skb
)
1056 static struct sk_buff
*vrf_ip_rcv(struct net_device
*vrf_dev
,
1057 struct sk_buff
*skb
)
1060 skb
->skb_iif
= vrf_dev
->ifindex
;
1061 IPCB(skb
)->flags
|= IPSKB_L3SLAVE
;
1063 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
))
1066 /* loopback traffic; do not push through packet taps again.
1067 * Reset pkt_type for upper layers to process skb
1069 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
1070 skb
->pkt_type
= PACKET_HOST
;
1074 vrf_rx_stats(vrf_dev
, skb
->len
);
1076 if (!list_empty(&vrf_dev
->ptype_all
)) {
1077 skb_push(skb
, skb
->mac_len
);
1078 dev_queue_xmit_nit(skb
, vrf_dev
);
1079 skb_pull(skb
, skb
->mac_len
);
1082 skb
= vrf_rcv_nfhook(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1087 /* called with rcu lock held */
1088 static struct sk_buff
*vrf_l3_rcv(struct net_device
*vrf_dev
,
1089 struct sk_buff
*skb
,
1094 return vrf_ip_rcv(vrf_dev
, skb
);
1096 return vrf_ip6_rcv(vrf_dev
, skb
);
1102 #if IS_ENABLED(CONFIG_IPV6)
1103 /* send to link-local or multicast address via interface enslaved to
1104 * VRF device. Force lookup to VRF table without changing flow struct
1106 static struct dst_entry
*vrf_link_scope_lookup(const struct net_device
*dev
,
1109 struct net
*net
= dev_net(dev
);
1110 int flags
= RT6_LOOKUP_F_IFACE
;
1111 struct dst_entry
*dst
= NULL
;
1112 struct rt6_info
*rt
;
1114 /* VRF device does not have a link-local address and
1115 * sending packets to link-local or mcast addresses over
1116 * a VRF device does not make sense
1118 if (fl6
->flowi6_oif
== dev
->ifindex
) {
1119 dst
= &net
->ipv6
.ip6_null_entry
->dst
;
1124 if (!ipv6_addr_any(&fl6
->saddr
))
1125 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
1127 rt
= vrf_ip6_route_lookup(net
, dev
, fl6
, fl6
->flowi6_oif
, flags
);
1135 static const struct l3mdev_ops vrf_l3mdev_ops
= {
1136 .l3mdev_fib_table
= vrf_fib_table
,
1137 .l3mdev_l3_rcv
= vrf_l3_rcv
,
1138 .l3mdev_l3_out
= vrf_l3_out
,
1139 #if IS_ENABLED(CONFIG_IPV6)
1140 .l3mdev_link_scope_lookup
= vrf_link_scope_lookup
,
1144 static void vrf_get_drvinfo(struct net_device
*dev
,
1145 struct ethtool_drvinfo
*info
)
1147 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1148 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1151 static const struct ethtool_ops vrf_ethtool_ops
= {
1152 .get_drvinfo
= vrf_get_drvinfo
,
1155 static inline size_t vrf_fib_rule_nl_size(void)
1159 sz
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
));
1160 sz
+= nla_total_size(sizeof(u8
)); /* FRA_L3MDEV */
1161 sz
+= nla_total_size(sizeof(u32
)); /* FRA_PRIORITY */
1166 static int vrf_fib_rule(const struct net_device
*dev
, __u8 family
, bool add_it
)
1168 struct fib_rule_hdr
*frh
;
1169 struct nlmsghdr
*nlh
;
1170 struct sk_buff
*skb
;
1173 if (family
== AF_INET6
&& !ipv6_mod_enabled())
1176 skb
= nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL
);
1180 nlh
= nlmsg_put(skb
, 0, 0, 0, sizeof(*frh
), 0);
1182 goto nla_put_failure
;
1184 /* rule only needs to appear once */
1185 nlh
->nlmsg_flags
|= NLM_F_EXCL
;
1187 frh
= nlmsg_data(nlh
);
1188 memset(frh
, 0, sizeof(*frh
));
1189 frh
->family
= family
;
1190 frh
->action
= FR_ACT_TO_TBL
;
1192 if (nla_put_u8(skb
, FRA_L3MDEV
, 1))
1193 goto nla_put_failure
;
1195 if (nla_put_u32(skb
, FRA_PRIORITY
, FIB_RULE_PREF
))
1196 goto nla_put_failure
;
1198 nlmsg_end(skb
, nlh
);
1200 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1201 skb
->sk
= dev_net(dev
)->rtnl
;
1203 err
= fib_nl_newrule(skb
, nlh
, NULL
);
1207 err
= fib_nl_delrule(skb
, nlh
, NULL
);
1221 static int vrf_add_fib_rules(const struct net_device
*dev
)
1225 err
= vrf_fib_rule(dev
, AF_INET
, true);
1229 err
= vrf_fib_rule(dev
, AF_INET6
, true);
1233 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1234 err
= vrf_fib_rule(dev
, RTNL_FAMILY_IPMR
, true);
1241 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1243 vrf_fib_rule(dev
, AF_INET6
, false);
1247 vrf_fib_rule(dev
, AF_INET
, false);
1250 netdev_err(dev
, "Failed to add FIB rules.\n");
1254 static void vrf_setup(struct net_device
*dev
)
1258 /* Initialize the device structure. */
1259 dev
->netdev_ops
= &vrf_netdev_ops
;
1260 dev
->l3mdev_ops
= &vrf_l3mdev_ops
;
1261 dev
->ethtool_ops
= &vrf_ethtool_ops
;
1262 dev
->needs_free_netdev
= true;
1264 /* Fill in device structure with ethernet-generic values. */
1265 eth_hw_addr_random(dev
);
1267 /* don't acquire vrf device's netif_tx_lock when transmitting */
1268 dev
->features
|= NETIF_F_LLTX
;
1270 /* don't allow vrf devices to change network namespaces. */
1271 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1273 /* does not make sense for a VLAN to be added to a vrf device */
1274 dev
->features
|= NETIF_F_VLAN_CHALLENGED
;
1276 /* enable offload features */
1277 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1278 dev
->features
|= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
;
1279 dev
->features
|= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_HIGHDMA
;
1281 dev
->hw_features
= dev
->features
;
1282 dev
->hw_enc_features
= dev
->features
;
1284 /* default to no qdisc; user can add if desired */
1285 dev
->priv_flags
|= IFF_NO_QUEUE
;
1288 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1289 struct netlink_ext_ack
*extack
)
1291 if (tb
[IFLA_ADDRESS
]) {
1292 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1293 NL_SET_ERR_MSG(extack
, "Invalid hardware address");
1296 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1297 NL_SET_ERR_MSG(extack
, "Invalid hardware address");
1298 return -EADDRNOTAVAIL
;
1304 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
1306 struct net_device
*port_dev
;
1307 struct list_head
*iter
;
1309 netdev_for_each_lower_dev(dev
, port_dev
, iter
)
1310 vrf_del_slave(dev
, port_dev
);
1312 unregister_netdevice_queue(dev
, head
);
1315 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
1316 struct nlattr
*tb
[], struct nlattr
*data
[],
1317 struct netlink_ext_ack
*extack
)
1319 struct net_vrf
*vrf
= netdev_priv(dev
);
1320 bool *add_fib_rules
;
1324 if (!data
|| !data
[IFLA_VRF_TABLE
]) {
1325 NL_SET_ERR_MSG(extack
, "VRF table id is missing");
1329 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
1330 if (vrf
->tb_id
== RT_TABLE_UNSPEC
) {
1331 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_VRF_TABLE
],
1332 "Invalid VRF table id");
1336 dev
->priv_flags
|= IFF_L3MDEV_MASTER
;
1338 err
= register_netdevice(dev
);
1343 add_fib_rules
= net_generic(net
, vrf_net_id
);
1344 if (*add_fib_rules
) {
1345 err
= vrf_add_fib_rules(dev
);
1347 unregister_netdevice(dev
);
1350 *add_fib_rules
= false;
1357 static size_t vrf_nl_getsize(const struct net_device
*dev
)
1359 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
1362 static int vrf_fillinfo(struct sk_buff
*skb
,
1363 const struct net_device
*dev
)
1365 struct net_vrf
*vrf
= netdev_priv(dev
);
1367 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
1370 static size_t vrf_get_slave_size(const struct net_device
*bond_dev
,
1371 const struct net_device
*slave_dev
)
1373 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_PORT_TABLE */
1376 static int vrf_fill_slave_info(struct sk_buff
*skb
,
1377 const struct net_device
*vrf_dev
,
1378 const struct net_device
*slave_dev
)
1380 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
1382 if (nla_put_u32(skb
, IFLA_VRF_PORT_TABLE
, vrf
->tb_id
))
1388 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
1389 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
1392 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
1394 .priv_size
= sizeof(struct net_vrf
),
1396 .get_size
= vrf_nl_getsize
,
1397 .policy
= vrf_nl_policy
,
1398 .validate
= vrf_validate
,
1399 .fill_info
= vrf_fillinfo
,
1401 .get_slave_size
= vrf_get_slave_size
,
1402 .fill_slave_info
= vrf_fill_slave_info
,
1404 .newlink
= vrf_newlink
,
1405 .dellink
= vrf_dellink
,
1407 .maxtype
= IFLA_VRF_MAX
,
1410 static int vrf_device_event(struct notifier_block
*unused
,
1411 unsigned long event
, void *ptr
)
1413 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1415 /* only care about unregister events to drop slave references */
1416 if (event
== NETDEV_UNREGISTER
) {
1417 struct net_device
*vrf_dev
;
1419 if (!netif_is_l3_slave(dev
))
1422 vrf_dev
= netdev_master_upper_dev_get(dev
);
1423 vrf_del_slave(vrf_dev
, dev
);
1429 static struct notifier_block vrf_notifier_block __read_mostly
= {
1430 .notifier_call
= vrf_device_event
,
1433 /* Initialize per network namespace state */
1434 static int __net_init
vrf_netns_init(struct net
*net
)
1436 bool *add_fib_rules
= net_generic(net
, vrf_net_id
);
1438 *add_fib_rules
= true;
1443 static struct pernet_operations vrf_net_ops __net_initdata
= {
1444 .init
= vrf_netns_init
,
1446 .size
= sizeof(bool),
1449 static int __init
vrf_init_module(void)
1453 register_netdevice_notifier(&vrf_notifier_block
);
1455 rc
= register_pernet_subsys(&vrf_net_ops
);
1459 rc
= rtnl_link_register(&vrf_link_ops
);
1461 unregister_pernet_subsys(&vrf_net_ops
);
1468 unregister_netdevice_notifier(&vrf_notifier_block
);
1472 module_init(vrf_init_module
);
1473 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1474 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1475 MODULE_LICENSE("GPL");
1476 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
1477 MODULE_VERSION(DRV_VERSION
);