1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vrf.c: device driver to encapsulate a VRF space
5 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
6 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
7 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
9 * Based on dummy, team and ipvlan drivers
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
17 #include <linux/init.h>
18 #include <linux/moduleparam.h>
19 #include <linux/netfilter.h>
20 #include <linux/rtnetlink.h>
21 #include <net/rtnetlink.h>
22 #include <linux/u64_stats_sync.h>
23 #include <linux/hashtable.h>
24 #include <linux/spinlock_types.h>
26 #include <linux/inetdevice.h>
29 #include <net/ip_fib.h>
30 #include <net/ip6_fib.h>
31 #include <net/ip6_route.h>
32 #include <net/route.h>
33 #include <net/addrconf.h>
34 #include <net/l3mdev.h>
35 #include <net/fib_rules.h>
36 #include <net/netns/generic.h>
38 #define DRV_NAME "vrf"
39 #define DRV_VERSION "1.1"
41 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
44 #define HASH_INITVAL ((u32)0xcafef00d)
47 DECLARE_HASHTABLE(ht
, HT_MAP_BITS
);
51 * count how many distinct tables do not comply with the strict mode
53 * shared_tables value must be 0 in order to enable the strict mode.
55 * example of the evolution of shared_tables:
57 * add vrf0 --> table 100 shared_tables = 0 | t0
58 * add vrf1 --> table 101 shared_tables = 0 | t1
59 * add vrf2 --> table 100 shared_tables = 1 | t2
60 * add vrf3 --> table 100 shared_tables = 1 | t3
61 * add vrf4 --> table 101 shared_tables = 2 v t4
63 * shared_tables is a "step function" (or "staircase function")
64 * and it is increased by one when the second vrf is associated to a
67 * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1.
69 * at t3, another dev (vrf3) is bound to the same table 100 but the
70 * value of shared_tables is still 1.
71 * This means that no matter how many new vrfs will register on the
72 * table 100, the shared_tables will not increase (considering only
75 * at t4, vrf4 is bound to table 101, and shared_tables = 2.
77 * Looking at the value of shared_tables we can immediately know if
78 * the strict_mode can or cannot be enforced. Indeed, strict_mode
79 * can be enforced iff shared_tables = 0.
81 * Conversely, shared_tables is decreased when a vrf is de-associated
82 * from a table with exactly two associated vrfs.
90 struct hlist_node hnode
;
91 struct list_head vrf_list
; /* VRFs registered to this table */
98 static unsigned int vrf_net_id
;
100 /* per netns vrf data */
102 /* protected by rtnl lock */
106 struct ctl_table_header
*ctl_hdr
;
110 struct rtable __rcu
*rth
;
111 struct rt6_info __rcu
*rt6
;
112 #if IS_ENABLED(CONFIG_IPV6)
113 struct fib6_table
*fib6_table
;
117 struct list_head me_list
; /* entry in vrf_map_elem */
128 struct u64_stats_sync syncp
;
131 static void vrf_rx_stats(struct net_device
*dev
, int len
)
133 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
135 u64_stats_update_begin(&dstats
->syncp
);
137 dstats
->rx_bytes
+= len
;
138 u64_stats_update_end(&dstats
->syncp
);
141 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
143 vrf_dev
->stats
.tx_errors
++;
147 static void vrf_get_stats64(struct net_device
*dev
,
148 struct rtnl_link_stats64
*stats
)
152 for_each_possible_cpu(i
) {
153 const struct pcpu_dstats
*dstats
;
154 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
157 dstats
= per_cpu_ptr(dev
->dstats
, i
);
159 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
160 tbytes
= dstats
->tx_bytes
;
161 tpkts
= dstats
->tx_pkts
;
162 tdrops
= dstats
->tx_drps
;
163 rbytes
= dstats
->rx_bytes
;
164 rpkts
= dstats
->rx_pkts
;
165 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
166 stats
->tx_bytes
+= tbytes
;
167 stats
->tx_packets
+= tpkts
;
168 stats
->tx_dropped
+= tdrops
;
169 stats
->rx_bytes
+= rbytes
;
170 stats
->rx_packets
+= rpkts
;
174 static struct vrf_map
*netns_vrf_map(struct net
*net
)
176 struct netns_vrf
*nn_vrf
= net_generic(net
, vrf_net_id
);
178 return &nn_vrf
->vmap
;
181 static struct vrf_map
*netns_vrf_map_by_dev(struct net_device
*dev
)
183 return netns_vrf_map(dev_net(dev
));
186 static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem
*me
)
188 struct list_head
*me_head
= &me
->vrf_list
;
191 if (list_empty(me_head
))
194 vrf
= list_first_entry(me_head
, struct net_vrf
, me_list
);
199 static struct vrf_map_elem
*vrf_map_elem_alloc(gfp_t flags
)
201 struct vrf_map_elem
*me
;
203 me
= kmalloc(sizeof(*me
), flags
);
210 static void vrf_map_elem_free(struct vrf_map_elem
*me
)
215 static void vrf_map_elem_init(struct vrf_map_elem
*me
, int table_id
,
216 int ifindex
, int users
)
218 me
->table_id
= table_id
;
219 me
->ifindex
= ifindex
;
221 INIT_LIST_HEAD(&me
->vrf_list
);
224 static struct vrf_map_elem
*vrf_map_lookup_elem(struct vrf_map
*vmap
,
227 struct vrf_map_elem
*me
;
230 key
= jhash_1word(table_id
, HASH_INITVAL
);
231 hash_for_each_possible(vmap
->ht
, me
, hnode
, key
) {
232 if (me
->table_id
== table_id
)
239 static void vrf_map_add_elem(struct vrf_map
*vmap
, struct vrf_map_elem
*me
)
241 u32 table_id
= me
->table_id
;
244 key
= jhash_1word(table_id
, HASH_INITVAL
);
245 hash_add(vmap
->ht
, &me
->hnode
, key
);
248 static void vrf_map_del_elem(struct vrf_map_elem
*me
)
250 hash_del(&me
->hnode
);
253 static void vrf_map_lock(struct vrf_map
*vmap
) __acquires(&vmap
->vmap_lock
)
255 spin_lock(&vmap
->vmap_lock
);
258 static void vrf_map_unlock(struct vrf_map
*vmap
) __releases(&vmap
->vmap_lock
)
260 spin_unlock(&vmap
->vmap_lock
);
263 /* called with rtnl lock held */
265 vrf_map_register_dev(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
267 struct vrf_map
*vmap
= netns_vrf_map_by_dev(dev
);
268 struct net_vrf
*vrf
= netdev_priv(dev
);
269 struct vrf_map_elem
*new_me
, *me
;
270 u32 table_id
= vrf
->tb_id
;
271 bool free_new_me
= false;
275 /* we pre-allocate elements used in the spin-locked section (so that we
276 * keep the spinlock as short as possibile).
278 new_me
= vrf_map_elem_alloc(GFP_KERNEL
);
282 vrf_map_elem_init(new_me
, table_id
, dev
->ifindex
, 0);
286 me
= vrf_map_lookup_elem(vmap
, table_id
);
289 vrf_map_add_elem(vmap
, me
);
293 /* we already have an entry in the vrf_map, so it means there is (at
294 * least) a vrf registered on the specific table.
297 if (vmap
->strict_mode
) {
298 /* vrfs cannot share the same table */
299 NL_SET_ERR_MSG(extack
, "Table is used by another VRF");
307 ++vmap
->shared_tables
;
309 list_add(&vrf
->me_list
, &me
->vrf_list
);
314 vrf_map_unlock(vmap
);
316 /* clean-up, if needed */
318 vrf_map_elem_free(new_me
);
323 /* called with rtnl lock held */
324 static void vrf_map_unregister_dev(struct net_device
*dev
)
326 struct vrf_map
*vmap
= netns_vrf_map_by_dev(dev
);
327 struct net_vrf
*vrf
= netdev_priv(dev
);
328 u32 table_id
= vrf
->tb_id
;
329 struct vrf_map_elem
*me
;
334 me
= vrf_map_lookup_elem(vmap
, table_id
);
338 list_del(&vrf
->me_list
);
342 --vmap
->shared_tables
;
343 } else if (users
== 0) {
344 vrf_map_del_elem(me
);
346 /* no one will refer to this element anymore */
347 vrf_map_elem_free(me
);
351 vrf_map_unlock(vmap
);
354 /* return the vrf device index associated with the table_id */
355 static int vrf_ifindex_lookup_by_table_id(struct net
*net
, u32 table_id
)
357 struct vrf_map
*vmap
= netns_vrf_map(net
);
358 struct vrf_map_elem
*me
;
363 if (!vmap
->strict_mode
) {
368 me
= vrf_map_lookup_elem(vmap
, table_id
);
374 ifindex
= vrf_map_elem_get_vrf_ifindex(me
);
377 vrf_map_unlock(vmap
);
382 /* by default VRF devices do not have a qdisc and are expected
383 * to be created with only a single queue.
385 static bool qdisc_tx_is_default(const struct net_device
*dev
)
387 struct netdev_queue
*txq
;
390 if (dev
->num_tx_queues
> 1)
393 txq
= netdev_get_tx_queue(dev
, 0);
394 qdisc
= rcu_access_pointer(txq
->qdisc
);
396 return !qdisc
->enqueue
;
399 /* Local traffic destined to local address. Reinsert the packet to rx
400 * path, similar to loopback handling.
402 static int vrf_local_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
403 struct dst_entry
*dst
)
409 skb_dst_set(skb
, dst
);
411 /* set pkt_type to avoid skb hitting packet taps twice -
412 * once on Tx and again in Rx processing
414 skb
->pkt_type
= PACKET_LOOPBACK
;
416 skb
->protocol
= eth_type_trans(skb
, dev
);
418 if (likely(netif_rx(skb
) == NET_RX_SUCCESS
))
419 vrf_rx_stats(dev
, len
);
421 this_cpu_inc(dev
->dstats
->rx_drps
);
426 #if IS_ENABLED(CONFIG_IPV6)
427 static int vrf_ip6_local_out(struct net
*net
, struct sock
*sk
,
432 err
= nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
,
433 sk
, skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
435 if (likely(err
== 1))
436 err
= dst_output(net
, sk
, skb
);
441 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
442 struct net_device
*dev
)
444 const struct ipv6hdr
*iph
;
445 struct net
*net
= dev_net(skb
->dev
);
447 int ret
= NET_XMIT_DROP
;
448 struct dst_entry
*dst
;
449 struct dst_entry
*dst_null
= &net
->ipv6
.ip6_null_entry
->dst
;
451 if (!pskb_may_pull(skb
, ETH_HLEN
+ sizeof(struct ipv6hdr
)))
456 memset(&fl6
, 0, sizeof(fl6
));
457 /* needed to match OIF rule */
458 fl6
.flowi6_oif
= dev
->ifindex
;
459 fl6
.flowi6_iif
= LOOPBACK_IFINDEX
;
460 fl6
.daddr
= iph
->daddr
;
461 fl6
.saddr
= iph
->saddr
;
462 fl6
.flowlabel
= ip6_flowinfo(iph
);
463 fl6
.flowi6_mark
= skb
->mark
;
464 fl6
.flowi6_proto
= iph
->nexthdr
;
465 fl6
.flowi6_flags
= FLOWI_FLAG_SKIP_NH_OIF
;
467 dst
= ip6_dst_lookup_flow(net
, NULL
, &fl6
, NULL
);
468 if (IS_ERR(dst
) || dst
== dst_null
)
473 /* if dst.dev is loopback or the VRF device again this is locally
474 * originated traffic destined to a local address. Short circuit
478 return vrf_local_xmit(skb
, dev
, dst
);
480 skb_dst_set(skb
, dst
);
482 /* strip the ethernet header added for pass through VRF device */
483 __skb_pull(skb
, skb_network_offset(skb
));
485 ret
= vrf_ip6_local_out(net
, skb
->sk
, skb
);
486 if (unlikely(net_xmit_eval(ret
)))
487 dev
->stats
.tx_errors
++;
489 ret
= NET_XMIT_SUCCESS
;
493 vrf_tx_error(dev
, skb
);
494 return NET_XMIT_DROP
;
497 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
498 struct net_device
*dev
)
500 vrf_tx_error(dev
, skb
);
501 return NET_XMIT_DROP
;
505 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
506 static int vrf_ip_local_out(struct net
*net
, struct sock
*sk
,
511 err
= nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, net
, sk
,
512 skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
513 if (likely(err
== 1))
514 err
= dst_output(net
, sk
, skb
);
519 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
520 struct net_device
*vrf_dev
)
523 int ret
= NET_XMIT_DROP
;
525 struct net
*net
= dev_net(vrf_dev
);
528 if (!pskb_may_pull(skb
, ETH_HLEN
+ sizeof(struct iphdr
)))
533 memset(&fl4
, 0, sizeof(fl4
));
534 /* needed to match OIF rule */
535 fl4
.flowi4_oif
= vrf_dev
->ifindex
;
536 fl4
.flowi4_iif
= LOOPBACK_IFINDEX
;
537 fl4
.flowi4_tos
= RT_TOS(ip4h
->tos
);
538 fl4
.flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_SKIP_NH_OIF
;
539 fl4
.flowi4_proto
= ip4h
->protocol
;
540 fl4
.daddr
= ip4h
->daddr
;
541 fl4
.saddr
= ip4h
->saddr
;
543 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
549 /* if dst.dev is loopback or the VRF device again this is locally
550 * originated traffic destined to a local address. Short circuit
553 if (rt
->dst
.dev
== vrf_dev
)
554 return vrf_local_xmit(skb
, vrf_dev
, &rt
->dst
);
556 skb_dst_set(skb
, &rt
->dst
);
558 /* strip the ethernet header added for pass through VRF device */
559 __skb_pull(skb
, skb_network_offset(skb
));
562 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
566 ret
= vrf_ip_local_out(dev_net(skb_dst(skb
)->dev
), skb
->sk
, skb
);
567 if (unlikely(net_xmit_eval(ret
)))
568 vrf_dev
->stats
.tx_errors
++;
570 ret
= NET_XMIT_SUCCESS
;
575 vrf_tx_error(vrf_dev
, skb
);
579 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
581 switch (skb
->protocol
) {
582 case htons(ETH_P_IP
):
583 return vrf_process_v4_outbound(skb
, dev
);
584 case htons(ETH_P_IPV6
):
585 return vrf_process_v6_outbound(skb
, dev
);
587 vrf_tx_error(dev
, skb
);
588 return NET_XMIT_DROP
;
592 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
595 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
597 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
598 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
600 u64_stats_update_begin(&dstats
->syncp
);
602 dstats
->tx_bytes
+= len
;
603 u64_stats_update_end(&dstats
->syncp
);
605 this_cpu_inc(dev
->dstats
->tx_drps
);
611 static int vrf_finish_direct(struct net
*net
, struct sock
*sk
,
614 struct net_device
*vrf_dev
= skb
->dev
;
616 if (!list_empty(&vrf_dev
->ptype_all
) &&
617 likely(skb_headroom(skb
) >= ETH_HLEN
)) {
618 struct ethhdr
*eth
= skb_push(skb
, ETH_HLEN
);
620 ether_addr_copy(eth
->h_source
, vrf_dev
->dev_addr
);
621 eth_zero_addr(eth
->h_dest
);
622 eth
->h_proto
= skb
->protocol
;
625 dev_queue_xmit_nit(skb
, vrf_dev
);
626 rcu_read_unlock_bh();
628 skb_pull(skb
, ETH_HLEN
);
634 #if IS_ENABLED(CONFIG_IPV6)
635 /* modelled after ip6_finish_output2 */
636 static int vrf_finish_output6(struct net
*net
, struct sock
*sk
,
639 struct dst_entry
*dst
= skb_dst(skb
);
640 struct net_device
*dev
= dst
->dev
;
641 const struct in6_addr
*nexthop
;
642 struct neighbour
*neigh
;
647 skb
->protocol
= htons(ETH_P_IPV6
);
651 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
652 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
653 if (unlikely(!neigh
))
654 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
655 if (!IS_ERR(neigh
)) {
656 sock_confirm_neigh(skb
, neigh
);
657 ret
= neigh_output(neigh
, skb
, false);
658 rcu_read_unlock_bh();
661 rcu_read_unlock_bh();
663 IP6_INC_STATS(dev_net(dst
->dev
),
664 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
669 /* modelled after ip6_output */
670 static int vrf_output6(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
672 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
673 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
675 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
678 /* set dst on skb to send packet to us via dev_xmit path. Allows
679 * packet to go through device based features such as qdisc, netfilter
680 * hooks and packet sockets with skb->dev set to vrf device.
682 static struct sk_buff
*vrf_ip6_out_redirect(struct net_device
*vrf_dev
,
685 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
686 struct dst_entry
*dst
= NULL
;
687 struct rt6_info
*rt6
;
691 rt6
= rcu_dereference(vrf
->rt6
);
699 if (unlikely(!dst
)) {
700 vrf_tx_error(vrf_dev
, skb
);
705 skb_dst_set(skb
, dst
);
710 static int vrf_output6_direct(struct net
*net
, struct sock
*sk
,
713 skb
->protocol
= htons(ETH_P_IPV6
);
715 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
716 net
, sk
, skb
, NULL
, skb
->dev
,
718 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
721 static struct sk_buff
*vrf_ip6_out_direct(struct net_device
*vrf_dev
,
725 struct net
*net
= dev_net(vrf_dev
);
730 err
= nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
, sk
,
731 skb
, NULL
, vrf_dev
, vrf_output6_direct
);
733 if (likely(err
== 1))
734 err
= vrf_output6_direct(net
, sk
, skb
);
736 /* reset skb device */
737 if (likely(err
== 1))
745 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
749 /* don't divert link scope packets */
750 if (rt6_need_strict(&ipv6_hdr(skb
)->daddr
))
753 if (qdisc_tx_is_default(vrf_dev
) ||
754 IP6CB(skb
)->flags
& IP6SKB_XFRM_TRANSFORMED
)
755 return vrf_ip6_out_direct(vrf_dev
, sk
, skb
);
757 return vrf_ip6_out_redirect(vrf_dev
, skb
);
761 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
763 struct rt6_info
*rt6
= rtnl_dereference(vrf
->rt6
);
764 struct net
*net
= dev_net(dev
);
765 struct dst_entry
*dst
;
767 RCU_INIT_POINTER(vrf
->rt6
, NULL
);
770 /* move dev in dst's to loopback so this VRF device can be deleted
771 * - based on dst_ifdown
776 dst
->dev
= net
->loopback_dev
;
782 static int vrf_rt6_create(struct net_device
*dev
)
784 int flags
= DST_NOPOLICY
| DST_NOXFRM
;
785 struct net_vrf
*vrf
= netdev_priv(dev
);
786 struct net
*net
= dev_net(dev
);
787 struct rt6_info
*rt6
;
790 /* IPv6 can be CONFIG enabled and then disabled runtime */
791 if (!ipv6_mod_enabled())
794 vrf
->fib6_table
= fib6_new_table(net
, vrf
->tb_id
);
795 if (!vrf
->fib6_table
)
798 /* create a dst for routing packets out a VRF device */
799 rt6
= ip6_dst_alloc(net
, dev
, flags
);
803 rt6
->dst
.output
= vrf_output6
;
805 rcu_assign_pointer(vrf
->rt6
, rt6
);
812 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
819 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
823 static int vrf_rt6_create(struct net_device
*dev
)
829 /* modelled after ip_finish_output2 */
830 static int vrf_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
832 struct dst_entry
*dst
= skb_dst(skb
);
833 struct rtable
*rt
= (struct rtable
*)dst
;
834 struct net_device
*dev
= dst
->dev
;
835 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
836 struct neighbour
*neigh
;
837 bool is_v6gw
= false;
842 /* Be paranoid, rather than too clever. */
843 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
844 struct sk_buff
*skb2
;
846 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
852 skb_set_owner_w(skb2
, skb
->sk
);
860 neigh
= ip_neigh_for_gw(rt
, skb
, &is_v6gw
);
861 if (!IS_ERR(neigh
)) {
862 sock_confirm_neigh(skb
, neigh
);
863 /* if crossing protocols, can not use the cached header */
864 ret
= neigh_output(neigh
, skb
, is_v6gw
);
865 rcu_read_unlock_bh();
869 rcu_read_unlock_bh();
871 vrf_tx_error(skb
->dev
, skb
);
875 static int vrf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
877 struct net_device
*dev
= skb_dst(skb
)->dev
;
879 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
882 skb
->protocol
= htons(ETH_P_IP
);
884 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
885 net
, sk
, skb
, NULL
, dev
,
887 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
890 /* set dst on skb to send packet to us via dev_xmit path. Allows
891 * packet to go through device based features such as qdisc, netfilter
892 * hooks and packet sockets with skb->dev set to vrf device.
894 static struct sk_buff
*vrf_ip_out_redirect(struct net_device
*vrf_dev
,
897 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
898 struct dst_entry
*dst
= NULL
;
903 rth
= rcu_dereference(vrf
->rth
);
911 if (unlikely(!dst
)) {
912 vrf_tx_error(vrf_dev
, skb
);
917 skb_dst_set(skb
, dst
);
922 static int vrf_output_direct(struct net
*net
, struct sock
*sk
,
925 skb
->protocol
= htons(ETH_P_IP
);
927 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
928 net
, sk
, skb
, NULL
, skb
->dev
,
930 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
933 static struct sk_buff
*vrf_ip_out_direct(struct net_device
*vrf_dev
,
937 struct net
*net
= dev_net(vrf_dev
);
942 err
= nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, net
, sk
,
943 skb
, NULL
, vrf_dev
, vrf_output_direct
);
945 if (likely(err
== 1))
946 err
= vrf_output_direct(net
, sk
, skb
);
948 /* reset skb device */
949 if (likely(err
== 1))
957 static struct sk_buff
*vrf_ip_out(struct net_device
*vrf_dev
,
961 /* don't divert multicast or local broadcast */
962 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
) ||
963 ipv4_is_lbcast(ip_hdr(skb
)->daddr
))
966 if (qdisc_tx_is_default(vrf_dev
) ||
967 IPCB(skb
)->flags
& IPSKB_XFRM_TRANSFORMED
)
968 return vrf_ip_out_direct(vrf_dev
, sk
, skb
);
970 return vrf_ip_out_redirect(vrf_dev
, skb
);
973 /* called with rcu lock held */
974 static struct sk_buff
*vrf_l3_out(struct net_device
*vrf_dev
,
981 return vrf_ip_out(vrf_dev
, sk
, skb
);
983 return vrf_ip6_out(vrf_dev
, sk
, skb
);
990 static void vrf_rtable_release(struct net_device
*dev
, struct net_vrf
*vrf
)
992 struct rtable
*rth
= rtnl_dereference(vrf
->rth
);
993 struct net
*net
= dev_net(dev
);
994 struct dst_entry
*dst
;
996 RCU_INIT_POINTER(vrf
->rth
, NULL
);
999 /* move dev in dst's to loopback so this VRF device can be deleted
1000 * - based on dst_ifdown
1005 dst
->dev
= net
->loopback_dev
;
1011 static int vrf_rtable_create(struct net_device
*dev
)
1013 struct net_vrf
*vrf
= netdev_priv(dev
);
1016 if (!fib_new_table(dev_net(dev
), vrf
->tb_id
))
1019 /* create a dst for routing packets out through a VRF device */
1020 rth
= rt_dst_alloc(dev
, 0, RTN_UNICAST
, 1, 1);
1024 rth
->dst
.output
= vrf_output
;
1026 rcu_assign_pointer(vrf
->rth
, rth
);
1031 /**************************** device handling ********************/
1033 /* cycle interface to flush neighbor cache and move routes across tables */
1034 static void cycle_netdev(struct net_device
*dev
,
1035 struct netlink_ext_ack
*extack
)
1037 unsigned int flags
= dev
->flags
;
1040 if (!netif_running(dev
))
1043 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
, extack
);
1045 ret
= dev_change_flags(dev
, flags
, extack
);
1049 "Failed to cycle device %s; route tables might be wrong!\n",
1054 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
1055 struct netlink_ext_ack
*extack
)
1059 /* do not allow loopback device to be enslaved to a VRF.
1060 * The vrf device acts as the loopback for the vrf.
1062 if (port_dev
== dev_net(dev
)->loopback_dev
) {
1063 NL_SET_ERR_MSG(extack
,
1064 "Can not enslave loopback device to a VRF");
1068 port_dev
->priv_flags
|= IFF_L3MDEV_SLAVE
;
1069 ret
= netdev_master_upper_dev_link(port_dev
, dev
, NULL
, NULL
, extack
);
1073 cycle_netdev(port_dev
, extack
);
1078 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
1082 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
1083 struct netlink_ext_ack
*extack
)
1085 if (netif_is_l3_master(port_dev
)) {
1086 NL_SET_ERR_MSG(extack
,
1087 "Can not enslave an L3 master device to a VRF");
1091 if (netif_is_l3_slave(port_dev
))
1094 return do_vrf_add_slave(dev
, port_dev
, extack
);
1097 /* inverse of do_vrf_add_slave */
1098 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1100 netdev_upper_dev_unlink(port_dev
, dev
);
1101 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
1103 cycle_netdev(port_dev
, NULL
);
1108 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1110 return do_vrf_del_slave(dev
, port_dev
);
1113 static void vrf_dev_uninit(struct net_device
*dev
)
1115 struct net_vrf
*vrf
= netdev_priv(dev
);
1117 vrf_rtable_release(dev
, vrf
);
1118 vrf_rt6_release(dev
, vrf
);
1120 free_percpu(dev
->dstats
);
1124 static int vrf_dev_init(struct net_device
*dev
)
1126 struct net_vrf
*vrf
= netdev_priv(dev
);
1128 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
1132 /* create the default dst which points back to us */
1133 if (vrf_rtable_create(dev
) != 0)
1136 if (vrf_rt6_create(dev
) != 0)
1139 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
1141 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
1142 dev
->mtu
= 64 * 1024;
1144 /* similarly, oper state is irrelevant; set to up to avoid confusion */
1145 dev
->operstate
= IF_OPER_UP
;
1146 netdev_lockdep_set_classes(dev
);
1150 vrf_rtable_release(dev
, vrf
);
1152 free_percpu(dev
->dstats
);
1158 static const struct net_device_ops vrf_netdev_ops
= {
1159 .ndo_init
= vrf_dev_init
,
1160 .ndo_uninit
= vrf_dev_uninit
,
1161 .ndo_start_xmit
= vrf_xmit
,
1162 .ndo_set_mac_address
= eth_mac_addr
,
1163 .ndo_get_stats64
= vrf_get_stats64
,
1164 .ndo_add_slave
= vrf_add_slave
,
1165 .ndo_del_slave
= vrf_del_slave
,
1168 static u32
vrf_fib_table(const struct net_device
*dev
)
1170 struct net_vrf
*vrf
= netdev_priv(dev
);
1175 static int vrf_rcv_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
1181 static struct sk_buff
*vrf_rcv_nfhook(u8 pf
, unsigned int hook
,
1182 struct sk_buff
*skb
,
1183 struct net_device
*dev
)
1185 struct net
*net
= dev_net(dev
);
1187 if (nf_hook(pf
, hook
, net
, NULL
, skb
, dev
, NULL
, vrf_rcv_finish
) != 1)
1188 skb
= NULL
; /* kfree_skb(skb) handled by nf code */
1193 #if IS_ENABLED(CONFIG_IPV6)
1194 /* neighbor handling is done with actual device; do not want
1195 * to flip skb->dev for those ndisc packets. This really fails
1196 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
1199 static bool ipv6_ndisc_frame(const struct sk_buff
*skb
)
1201 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1204 if (iph
->nexthdr
== NEXTHDR_ICMP
) {
1205 const struct icmp6hdr
*icmph
;
1206 struct icmp6hdr _icmph
;
1208 icmph
= skb_header_pointer(skb
, sizeof(*iph
),
1209 sizeof(_icmph
), &_icmph
);
1213 switch (icmph
->icmp6_type
) {
1214 case NDISC_ROUTER_SOLICITATION
:
1215 case NDISC_ROUTER_ADVERTISEMENT
:
1216 case NDISC_NEIGHBOUR_SOLICITATION
:
1217 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
1218 case NDISC_REDIRECT
:
1228 static struct rt6_info
*vrf_ip6_route_lookup(struct net
*net
,
1229 const struct net_device
*dev
,
1232 const struct sk_buff
*skb
,
1235 struct net_vrf
*vrf
= netdev_priv(dev
);
1237 return ip6_pol_route(net
, vrf
->fib6_table
, ifindex
, fl6
, skb
, flags
);
1240 static void vrf_ip6_input_dst(struct sk_buff
*skb
, struct net_device
*vrf_dev
,
1243 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1244 struct flowi6 fl6
= {
1245 .flowi6_iif
= ifindex
,
1246 .flowi6_mark
= skb
->mark
,
1247 .flowi6_proto
= iph
->nexthdr
,
1248 .daddr
= iph
->daddr
,
1249 .saddr
= iph
->saddr
,
1250 .flowlabel
= ip6_flowinfo(iph
),
1252 struct net
*net
= dev_net(vrf_dev
);
1253 struct rt6_info
*rt6
;
1255 rt6
= vrf_ip6_route_lookup(net
, vrf_dev
, &fl6
, ifindex
, skb
,
1256 RT6_LOOKUP_F_HAS_SADDR
| RT6_LOOKUP_F_IFACE
);
1260 if (unlikely(&rt6
->dst
== &net
->ipv6
.ip6_null_entry
->dst
))
1263 skb_dst_set(skb
, &rt6
->dst
);
1266 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
1267 struct sk_buff
*skb
)
1269 int orig_iif
= skb
->skb_iif
;
1270 bool need_strict
= rt6_need_strict(&ipv6_hdr(skb
)->daddr
);
1271 bool is_ndisc
= ipv6_ndisc_frame(skb
);
1273 /* loopback, multicast & non-ND link-local traffic; do not push through
1274 * packet taps again. Reset pkt_type for upper layers to process skb
1276 if (skb
->pkt_type
== PACKET_LOOPBACK
|| (need_strict
&& !is_ndisc
)) {
1278 skb
->skb_iif
= vrf_dev
->ifindex
;
1279 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
1280 if (skb
->pkt_type
== PACKET_LOOPBACK
)
1281 skb
->pkt_type
= PACKET_HOST
;
1285 /* if packet is NDISC then keep the ingress interface */
1287 vrf_rx_stats(vrf_dev
, skb
->len
);
1289 skb
->skb_iif
= vrf_dev
->ifindex
;
1291 if (!list_empty(&vrf_dev
->ptype_all
)) {
1292 skb_push(skb
, skb
->mac_len
);
1293 dev_queue_xmit_nit(skb
, vrf_dev
);
1294 skb_pull(skb
, skb
->mac_len
);
1297 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
1301 vrf_ip6_input_dst(skb
, vrf_dev
, orig_iif
);
1303 skb
= vrf_rcv_nfhook(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1309 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
1310 struct sk_buff
*skb
)
1316 static struct sk_buff
*vrf_ip_rcv(struct net_device
*vrf_dev
,
1317 struct sk_buff
*skb
)
1320 skb
->skb_iif
= vrf_dev
->ifindex
;
1321 IPCB(skb
)->flags
|= IPSKB_L3SLAVE
;
1323 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
))
1326 /* loopback traffic; do not push through packet taps again.
1327 * Reset pkt_type for upper layers to process skb
1329 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
1330 skb
->pkt_type
= PACKET_HOST
;
1334 vrf_rx_stats(vrf_dev
, skb
->len
);
1336 if (!list_empty(&vrf_dev
->ptype_all
)) {
1337 skb_push(skb
, skb
->mac_len
);
1338 dev_queue_xmit_nit(skb
, vrf_dev
);
1339 skb_pull(skb
, skb
->mac_len
);
1342 skb
= vrf_rcv_nfhook(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1347 /* called with rcu lock held */
1348 static struct sk_buff
*vrf_l3_rcv(struct net_device
*vrf_dev
,
1349 struct sk_buff
*skb
,
1354 return vrf_ip_rcv(vrf_dev
, skb
);
1356 return vrf_ip6_rcv(vrf_dev
, skb
);
1362 #if IS_ENABLED(CONFIG_IPV6)
1363 /* send to link-local or multicast address via interface enslaved to
1364 * VRF device. Force lookup to VRF table without changing flow struct
1365 * Note: Caller to this function must hold rcu_read_lock() and no refcnt
1366 * is taken on the dst by this function.
1368 static struct dst_entry
*vrf_link_scope_lookup(const struct net_device
*dev
,
1371 struct net
*net
= dev_net(dev
);
1372 int flags
= RT6_LOOKUP_F_IFACE
| RT6_LOOKUP_F_DST_NOREF
;
1373 struct dst_entry
*dst
= NULL
;
1374 struct rt6_info
*rt
;
1376 /* VRF device does not have a link-local address and
1377 * sending packets to link-local or mcast addresses over
1378 * a VRF device does not make sense
1380 if (fl6
->flowi6_oif
== dev
->ifindex
) {
1381 dst
= &net
->ipv6
.ip6_null_entry
->dst
;
1385 if (!ipv6_addr_any(&fl6
->saddr
))
1386 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
1388 rt
= vrf_ip6_route_lookup(net
, dev
, fl6
, fl6
->flowi6_oif
, NULL
, flags
);
1396 static const struct l3mdev_ops vrf_l3mdev_ops
= {
1397 .l3mdev_fib_table
= vrf_fib_table
,
1398 .l3mdev_l3_rcv
= vrf_l3_rcv
,
1399 .l3mdev_l3_out
= vrf_l3_out
,
1400 #if IS_ENABLED(CONFIG_IPV6)
1401 .l3mdev_link_scope_lookup
= vrf_link_scope_lookup
,
1405 static void vrf_get_drvinfo(struct net_device
*dev
,
1406 struct ethtool_drvinfo
*info
)
1408 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1409 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1412 static const struct ethtool_ops vrf_ethtool_ops
= {
1413 .get_drvinfo
= vrf_get_drvinfo
,
1416 static inline size_t vrf_fib_rule_nl_size(void)
1420 sz
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
));
1421 sz
+= nla_total_size(sizeof(u8
)); /* FRA_L3MDEV */
1422 sz
+= nla_total_size(sizeof(u32
)); /* FRA_PRIORITY */
1423 sz
+= nla_total_size(sizeof(u8
)); /* FRA_PROTOCOL */
1428 static int vrf_fib_rule(const struct net_device
*dev
, __u8 family
, bool add_it
)
1430 struct fib_rule_hdr
*frh
;
1431 struct nlmsghdr
*nlh
;
1432 struct sk_buff
*skb
;
1435 if ((family
== AF_INET6
|| family
== RTNL_FAMILY_IP6MR
) &&
1436 !ipv6_mod_enabled())
1439 skb
= nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL
);
1443 nlh
= nlmsg_put(skb
, 0, 0, 0, sizeof(*frh
), 0);
1445 goto nla_put_failure
;
1447 /* rule only needs to appear once */
1448 nlh
->nlmsg_flags
|= NLM_F_EXCL
;
1450 frh
= nlmsg_data(nlh
);
1451 memset(frh
, 0, sizeof(*frh
));
1452 frh
->family
= family
;
1453 frh
->action
= FR_ACT_TO_TBL
;
1455 if (nla_put_u8(skb
, FRA_PROTOCOL
, RTPROT_KERNEL
))
1456 goto nla_put_failure
;
1458 if (nla_put_u8(skb
, FRA_L3MDEV
, 1))
1459 goto nla_put_failure
;
1461 if (nla_put_u32(skb
, FRA_PRIORITY
, FIB_RULE_PREF
))
1462 goto nla_put_failure
;
1464 nlmsg_end(skb
, nlh
);
1466 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1467 skb
->sk
= dev_net(dev
)->rtnl
;
1469 err
= fib_nl_newrule(skb
, nlh
, NULL
);
1473 err
= fib_nl_delrule(skb
, nlh
, NULL
);
1487 static int vrf_add_fib_rules(const struct net_device
*dev
)
1491 err
= vrf_fib_rule(dev
, AF_INET
, true);
1495 err
= vrf_fib_rule(dev
, AF_INET6
, true);
1499 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1500 err
= vrf_fib_rule(dev
, RTNL_FAMILY_IPMR
, true);
1505 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1506 err
= vrf_fib_rule(dev
, RTNL_FAMILY_IP6MR
, true);
1513 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1515 vrf_fib_rule(dev
, RTNL_FAMILY_IPMR
, false);
1518 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1520 vrf_fib_rule(dev
, AF_INET6
, false);
1524 vrf_fib_rule(dev
, AF_INET
, false);
1527 netdev_err(dev
, "Failed to add FIB rules.\n");
1531 static void vrf_setup(struct net_device
*dev
)
1535 /* Initialize the device structure. */
1536 dev
->netdev_ops
= &vrf_netdev_ops
;
1537 dev
->l3mdev_ops
= &vrf_l3mdev_ops
;
1538 dev
->ethtool_ops
= &vrf_ethtool_ops
;
1539 dev
->needs_free_netdev
= true;
1541 /* Fill in device structure with ethernet-generic values. */
1542 eth_hw_addr_random(dev
);
1544 /* don't acquire vrf device's netif_tx_lock when transmitting */
1545 dev
->features
|= NETIF_F_LLTX
;
1547 /* don't allow vrf devices to change network namespaces. */
1548 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1550 /* does not make sense for a VLAN to be added to a vrf device */
1551 dev
->features
|= NETIF_F_VLAN_CHALLENGED
;
1553 /* enable offload features */
1554 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1555 dev
->features
|= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
| NETIF_F_SCTP_CRC
;
1556 dev
->features
|= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_HIGHDMA
;
1558 dev
->hw_features
= dev
->features
;
1559 dev
->hw_enc_features
= dev
->features
;
1561 /* default to no qdisc; user can add if desired */
1562 dev
->priv_flags
|= IFF_NO_QUEUE
;
1563 dev
->priv_flags
|= IFF_NO_RX_HANDLER
;
1564 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1566 /* VRF devices do not care about MTU, but if the MTU is set
1567 * too low then the ipv4 and ipv6 protocols are disabled
1568 * which breaks networking.
1570 dev
->min_mtu
= IPV6_MIN_MTU
;
1571 dev
->max_mtu
= ETH_MAX_MTU
;
1574 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1575 struct netlink_ext_ack
*extack
)
1577 if (tb
[IFLA_ADDRESS
]) {
1578 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1579 NL_SET_ERR_MSG(extack
, "Invalid hardware address");
1582 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1583 NL_SET_ERR_MSG(extack
, "Invalid hardware address");
1584 return -EADDRNOTAVAIL
;
1590 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
1592 struct net_device
*port_dev
;
1593 struct list_head
*iter
;
1595 netdev_for_each_lower_dev(dev
, port_dev
, iter
)
1596 vrf_del_slave(dev
, port_dev
);
1598 vrf_map_unregister_dev(dev
);
1600 unregister_netdevice_queue(dev
, head
);
1603 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
1604 struct nlattr
*tb
[], struct nlattr
*data
[],
1605 struct netlink_ext_ack
*extack
)
1607 struct net_vrf
*vrf
= netdev_priv(dev
);
1608 struct netns_vrf
*nn_vrf
;
1609 bool *add_fib_rules
;
1613 if (!data
|| !data
[IFLA_VRF_TABLE
]) {
1614 NL_SET_ERR_MSG(extack
, "VRF table id is missing");
1618 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
1619 if (vrf
->tb_id
== RT_TABLE_UNSPEC
) {
1620 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_VRF_TABLE
],
1621 "Invalid VRF table id");
1625 dev
->priv_flags
|= IFF_L3MDEV_MASTER
;
1627 err
= register_netdevice(dev
);
1631 /* mapping between table_id and vrf;
1632 * note: such binding could not be done in the dev init function
1633 * because dev->ifindex id is not available yet.
1635 vrf
->ifindex
= dev
->ifindex
;
1637 err
= vrf_map_register_dev(dev
, extack
);
1639 unregister_netdevice(dev
);
1644 nn_vrf
= net_generic(net
, vrf_net_id
);
1646 add_fib_rules
= &nn_vrf
->add_fib_rules
;
1647 if (*add_fib_rules
) {
1648 err
= vrf_add_fib_rules(dev
);
1650 vrf_map_unregister_dev(dev
);
1651 unregister_netdevice(dev
);
1654 *add_fib_rules
= false;
1661 static size_t vrf_nl_getsize(const struct net_device
*dev
)
1663 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
1666 static int vrf_fillinfo(struct sk_buff
*skb
,
1667 const struct net_device
*dev
)
1669 struct net_vrf
*vrf
= netdev_priv(dev
);
1671 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
1674 static size_t vrf_get_slave_size(const struct net_device
*bond_dev
,
1675 const struct net_device
*slave_dev
)
1677 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_PORT_TABLE */
1680 static int vrf_fill_slave_info(struct sk_buff
*skb
,
1681 const struct net_device
*vrf_dev
,
1682 const struct net_device
*slave_dev
)
1684 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
1686 if (nla_put_u32(skb
, IFLA_VRF_PORT_TABLE
, vrf
->tb_id
))
1692 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
1693 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
1696 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
1698 .priv_size
= sizeof(struct net_vrf
),
1700 .get_size
= vrf_nl_getsize
,
1701 .policy
= vrf_nl_policy
,
1702 .validate
= vrf_validate
,
1703 .fill_info
= vrf_fillinfo
,
1705 .get_slave_size
= vrf_get_slave_size
,
1706 .fill_slave_info
= vrf_fill_slave_info
,
1708 .newlink
= vrf_newlink
,
1709 .dellink
= vrf_dellink
,
1711 .maxtype
= IFLA_VRF_MAX
,
1714 static int vrf_device_event(struct notifier_block
*unused
,
1715 unsigned long event
, void *ptr
)
1717 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1719 /* only care about unregister events to drop slave references */
1720 if (event
== NETDEV_UNREGISTER
) {
1721 struct net_device
*vrf_dev
;
1723 if (!netif_is_l3_slave(dev
))
1726 vrf_dev
= netdev_master_upper_dev_get(dev
);
1727 vrf_del_slave(vrf_dev
, dev
);
1733 static struct notifier_block vrf_notifier_block __read_mostly
= {
1734 .notifier_call
= vrf_device_event
,
1737 static int vrf_map_init(struct vrf_map
*vmap
)
1739 spin_lock_init(&vmap
->vmap_lock
);
1740 hash_init(vmap
->ht
);
1742 vmap
->strict_mode
= false;
1747 #ifdef CONFIG_SYSCTL
1748 static bool vrf_strict_mode(struct vrf_map
*vmap
)
1753 strict_mode
= vmap
->strict_mode
;
1754 vrf_map_unlock(vmap
);
1759 static int vrf_strict_mode_change(struct vrf_map
*vmap
, bool new_mode
)
1766 cur_mode
= &vmap
->strict_mode
;
1767 if (*cur_mode
== new_mode
)
1771 /* disable strict mode */
1774 if (vmap
->shared_tables
) {
1775 /* we cannot allow strict_mode because there are some
1776 * vrfs that share one or more tables.
1782 /* no tables are shared among vrfs, so we can go back
1783 * to 1:1 association between a vrf with its table.
1789 vrf_map_unlock(vmap
);
1794 static int vrf_shared_table_handler(struct ctl_table
*table
, int write
,
1795 void *buffer
, size_t *lenp
, loff_t
*ppos
)
1797 struct net
*net
= (struct net
*)table
->extra1
;
1798 struct vrf_map
*vmap
= netns_vrf_map(net
);
1799 int proc_strict_mode
= 0;
1800 struct ctl_table tmp
= {
1801 .procname
= table
->procname
,
1802 .data
= &proc_strict_mode
,
1803 .maxlen
= sizeof(int),
1804 .mode
= table
->mode
,
1805 .extra1
= SYSCTL_ZERO
,
1806 .extra2
= SYSCTL_ONE
,
1811 proc_strict_mode
= vrf_strict_mode(vmap
);
1813 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
1815 if (write
&& ret
== 0)
1816 ret
= vrf_strict_mode_change(vmap
, (bool)proc_strict_mode
);
1821 static const struct ctl_table vrf_table
[] = {
1823 .procname
= "strict_mode",
1825 .maxlen
= sizeof(int),
1827 .proc_handler
= vrf_shared_table_handler
,
1828 /* set by the vrf_netns_init */
1834 static int vrf_netns_init_sysctl(struct net
*net
, struct netns_vrf
*nn_vrf
)
1836 struct ctl_table
*table
;
1838 table
= kmemdup(vrf_table
, sizeof(vrf_table
), GFP_KERNEL
);
1842 /* init the extra1 parameter with the reference to current netns */
1843 table
[0].extra1
= net
;
1845 nn_vrf
->ctl_hdr
= register_net_sysctl(net
, "net/vrf", table
);
1846 if (!nn_vrf
->ctl_hdr
) {
1854 static void vrf_netns_exit_sysctl(struct net
*net
)
1856 struct netns_vrf
*nn_vrf
= net_generic(net
, vrf_net_id
);
1857 struct ctl_table
*table
;
1859 table
= nn_vrf
->ctl_hdr
->ctl_table_arg
;
1860 unregister_net_sysctl_table(nn_vrf
->ctl_hdr
);
1864 static int vrf_netns_init_sysctl(struct net
*net
, struct netns_vrf
*nn_vrf
)
1869 static void vrf_netns_exit_sysctl(struct net
*net
)
1874 /* Initialize per network namespace state */
1875 static int __net_init
vrf_netns_init(struct net
*net
)
1877 struct netns_vrf
*nn_vrf
= net_generic(net
, vrf_net_id
);
1879 nn_vrf
->add_fib_rules
= true;
1880 vrf_map_init(&nn_vrf
->vmap
);
1882 return vrf_netns_init_sysctl(net
, nn_vrf
);
1885 static void __net_exit
vrf_netns_exit(struct net
*net
)
1887 vrf_netns_exit_sysctl(net
);
1890 static struct pernet_operations vrf_net_ops __net_initdata
= {
1891 .init
= vrf_netns_init
,
1892 .exit
= vrf_netns_exit
,
1894 .size
= sizeof(struct netns_vrf
),
1897 static int __init
vrf_init_module(void)
1901 register_netdevice_notifier(&vrf_notifier_block
);
1903 rc
= register_pernet_subsys(&vrf_net_ops
);
1907 rc
= l3mdev_table_lookup_register(L3MDEV_TYPE_VRF
,
1908 vrf_ifindex_lookup_by_table_id
);
1912 rc
= rtnl_link_register(&vrf_link_ops
);
1914 goto table_lookup_unreg
;
1919 l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF
,
1920 vrf_ifindex_lookup_by_table_id
);
1923 unregister_pernet_subsys(&vrf_net_ops
);
1926 unregister_netdevice_notifier(&vrf_notifier_block
);
1930 module_init(vrf_init_module
);
1931 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1932 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1933 MODULE_LICENSE("GPL");
1934 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
1935 MODULE_VERSION(DRV_VERSION
);