1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
18 #include <net/rtnetlink.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
30 #define DRV_NAME "veth"
31 #define DRV_VERSION "1.0"
33 #define VETH_XDP_FLAG BIT(0)
34 #define VETH_RING_SIZE 256
35 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37 #define VETH_XDP_TX_BULK_SIZE 16
38 #define VETH_XDP_BATCH 16
50 u64 peer_tq_xdp_xmit_err
;
53 struct veth_rq_stats
{
55 struct u64_stats_sync syncp
;
59 struct napi_struct xdp_napi
;
60 struct napi_struct __rcu
*napi
; /* points to xdp_napi when the latter is initialized */
61 struct net_device
*dev
;
62 struct bpf_prog __rcu
*xdp_prog
;
63 struct xdp_mem_info xdp_mem
;
64 struct veth_rq_stats stats
;
65 bool rx_notify_masked
;
66 struct ptr_ring xdp_ring
;
67 struct xdp_rxq_info xdp_rxq
;
71 struct net_device __rcu
*peer
;
73 struct bpf_prog
*_xdp_prog
;
75 unsigned int requested_headroom
;
78 struct veth_xdp_tx_bq
{
79 struct xdp_frame
*q
[VETH_XDP_TX_BULK_SIZE
];
87 struct veth_q_stat_desc
{
88 char desc
[ETH_GSTRING_LEN
];
92 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
94 static const struct veth_q_stat_desc veth_rq_stats_desc
[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets
) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes
) },
97 { "drops", VETH_RQ_STAT(rx_drops
) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect
) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops
) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx
) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err
) },
104 #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
106 static const struct veth_q_stat_desc veth_tq_stats_desc
[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit
) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err
) },
111 #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
114 const char string
[ETH_GSTRING_LEN
];
115 } ethtool_stats_keys
[] = {
119 static int veth_get_link_ksettings(struct net_device
*dev
,
120 struct ethtool_link_ksettings
*cmd
)
122 cmd
->base
.speed
= SPEED_10000
;
123 cmd
->base
.duplex
= DUPLEX_FULL
;
124 cmd
->base
.port
= PORT_TP
;
125 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
129 static void veth_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
131 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
132 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
135 static void veth_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
137 char *p
= (char *)buf
;
142 memcpy(p
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
143 p
+= sizeof(ethtool_stats_keys
);
144 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
145 for (j
= 0; j
< VETH_RQ_STATS_LEN
; j
++) {
146 snprintf(p
, ETH_GSTRING_LEN
,
148 i
, veth_rq_stats_desc
[j
].desc
);
149 p
+= ETH_GSTRING_LEN
;
152 for (i
= 0; i
< dev
->real_num_tx_queues
; i
++) {
153 for (j
= 0; j
< VETH_TQ_STATS_LEN
; j
++) {
154 snprintf(p
, ETH_GSTRING_LEN
,
156 i
, veth_tq_stats_desc
[j
].desc
);
157 p
+= ETH_GSTRING_LEN
;
164 static int veth_get_sset_count(struct net_device
*dev
, int sset
)
168 return ARRAY_SIZE(ethtool_stats_keys
) +
169 VETH_RQ_STATS_LEN
* dev
->real_num_rx_queues
+
170 VETH_TQ_STATS_LEN
* dev
->real_num_tx_queues
;
176 static void veth_get_ethtool_stats(struct net_device
*dev
,
177 struct ethtool_stats
*stats
, u64
*data
)
179 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
180 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
183 data
[0] = peer
? peer
->ifindex
: 0;
185 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
186 const struct veth_rq_stats
*rq_stats
= &priv
->rq
[i
].stats
;
187 const void *stats_base
= (void *)&rq_stats
->vs
;
192 start
= u64_stats_fetch_begin_irq(&rq_stats
->syncp
);
193 for (j
= 0; j
< VETH_RQ_STATS_LEN
; j
++) {
194 offset
= veth_rq_stats_desc
[j
].offset
;
195 data
[idx
+ j
] = *(u64
*)(stats_base
+ offset
);
197 } while (u64_stats_fetch_retry_irq(&rq_stats
->syncp
, start
));
198 idx
+= VETH_RQ_STATS_LEN
;
204 rcv_priv
= netdev_priv(peer
);
205 for (i
= 0; i
< peer
->real_num_rx_queues
; i
++) {
206 const struct veth_rq_stats
*rq_stats
= &rcv_priv
->rq
[i
].stats
;
207 const void *base
= (void *)&rq_stats
->vs
;
208 unsigned int start
, tx_idx
= idx
;
211 tx_idx
+= (i
% dev
->real_num_tx_queues
) * VETH_TQ_STATS_LEN
;
213 start
= u64_stats_fetch_begin_irq(&rq_stats
->syncp
);
214 for (j
= 0; j
< VETH_TQ_STATS_LEN
; j
++) {
215 offset
= veth_tq_stats_desc
[j
].offset
;
216 data
[tx_idx
+ j
] += *(u64
*)(base
+ offset
);
218 } while (u64_stats_fetch_retry_irq(&rq_stats
->syncp
, start
));
222 static void veth_get_channels(struct net_device
*dev
,
223 struct ethtool_channels
*channels
)
225 channels
->tx_count
= dev
->real_num_tx_queues
;
226 channels
->rx_count
= dev
->real_num_rx_queues
;
227 channels
->max_tx
= dev
->num_tx_queues
;
228 channels
->max_rx
= dev
->num_rx_queues
;
231 static int veth_set_channels(struct net_device
*dev
,
232 struct ethtool_channels
*ch
);
234 static const struct ethtool_ops veth_ethtool_ops
= {
235 .get_drvinfo
= veth_get_drvinfo
,
236 .get_link
= ethtool_op_get_link
,
237 .get_strings
= veth_get_strings
,
238 .get_sset_count
= veth_get_sset_count
,
239 .get_ethtool_stats
= veth_get_ethtool_stats
,
240 .get_link_ksettings
= veth_get_link_ksettings
,
241 .get_ts_info
= ethtool_op_get_ts_info
,
242 .get_channels
= veth_get_channels
,
243 .set_channels
= veth_set_channels
,
246 /* general routines */
248 static bool veth_is_xdp_frame(void *ptr
)
250 return (unsigned long)ptr
& VETH_XDP_FLAG
;
253 static struct xdp_frame
*veth_ptr_to_xdp(void *ptr
)
255 return (void *)((unsigned long)ptr
& ~VETH_XDP_FLAG
);
258 static void *veth_xdp_to_ptr(struct xdp_frame
*xdp
)
260 return (void *)((unsigned long)xdp
| VETH_XDP_FLAG
);
263 static void veth_ptr_free(void *ptr
)
265 if (veth_is_xdp_frame(ptr
))
266 xdp_return_frame(veth_ptr_to_xdp(ptr
));
271 static void __veth_xdp_flush(struct veth_rq
*rq
)
273 /* Write ptr_ring before reading rx_notify_masked */
275 if (!rq
->rx_notify_masked
) {
276 rq
->rx_notify_masked
= true;
277 napi_schedule(&rq
->xdp_napi
);
281 static int veth_xdp_rx(struct veth_rq
*rq
, struct sk_buff
*skb
)
283 if (unlikely(ptr_ring_produce(&rq
->xdp_ring
, skb
))) {
284 dev_kfree_skb_any(skb
);
288 return NET_RX_SUCCESS
;
291 static int veth_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
,
292 struct veth_rq
*rq
, bool xdp
)
294 return __dev_forward_skb(dev
, skb
) ?: xdp
?
295 veth_xdp_rx(rq
, skb
) :
299 /* return true if the specified skb has chances of GRO aggregation
300 * Don't strive for accuracy, but try to avoid GRO overhead in the most
302 * When XDP is enabled, all traffic is considered eligible, as the xmit
303 * device has TSO off.
304 * When TSO is enabled on the xmit device, we are likely interested only
305 * in UDP aggregation, explicitly check for that if the skb is suspected
306 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
307 * to belong to locally generated UDP traffic.
309 static bool veth_skb_is_eligible_for_gro(const struct net_device
*dev
,
310 const struct net_device
*rcv
,
311 const struct sk_buff
*skb
)
313 return !(dev
->features
& NETIF_F_ALL_TSO
) ||
314 (skb
->destructor
== sock_wfree
&&
315 rcv
->features
& (NETIF_F_GRO_FRAGLIST
| NETIF_F_GRO_UDP_FWD
));
318 static netdev_tx_t
veth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
320 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
321 struct veth_rq
*rq
= NULL
;
322 struct net_device
*rcv
;
323 int length
= skb
->len
;
324 bool use_napi
= false;
328 rcv
= rcu_dereference(priv
->peer
);
329 if (unlikely(!rcv
)) {
334 rcv_priv
= netdev_priv(rcv
);
335 rxq
= skb_get_queue_mapping(skb
);
336 if (rxq
< rcv
->real_num_rx_queues
) {
337 rq
= &rcv_priv
->rq
[rxq
];
339 /* The napi pointer is available when an XDP program is
340 * attached or when GRO is enabled
341 * Don't bother with napi/GRO if the skb can't be aggregated
343 use_napi
= rcu_access_pointer(rq
->napi
) &&
344 veth_skb_is_eligible_for_gro(dev
, rcv
, skb
);
345 skb_record_rx_queue(skb
, rxq
);
348 skb_tx_timestamp(skb
);
349 if (likely(veth_forward_skb(rcv
, skb
, rq
, use_napi
) == NET_RX_SUCCESS
)) {
351 dev_lstats_add(dev
, length
);
354 atomic64_inc(&priv
->dropped
);
358 __veth_xdp_flush(rq
);
365 static u64
veth_stats_tx(struct net_device
*dev
, u64
*packets
, u64
*bytes
)
367 struct veth_priv
*priv
= netdev_priv(dev
);
369 dev_lstats_read(dev
, packets
, bytes
);
370 return atomic64_read(&priv
->dropped
);
373 static void veth_stats_rx(struct veth_stats
*result
, struct net_device
*dev
)
375 struct veth_priv
*priv
= netdev_priv(dev
);
378 result
->peer_tq_xdp_xmit_err
= 0;
379 result
->xdp_packets
= 0;
380 result
->xdp_tx_err
= 0;
381 result
->xdp_bytes
= 0;
382 result
->rx_drops
= 0;
383 for (i
= 0; i
< dev
->num_rx_queues
; i
++) {
384 u64 packets
, bytes
, drops
, xdp_tx_err
, peer_tq_xdp_xmit_err
;
385 struct veth_rq_stats
*stats
= &priv
->rq
[i
].stats
;
389 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
390 peer_tq_xdp_xmit_err
= stats
->vs
.peer_tq_xdp_xmit_err
;
391 xdp_tx_err
= stats
->vs
.xdp_tx_err
;
392 packets
= stats
->vs
.xdp_packets
;
393 bytes
= stats
->vs
.xdp_bytes
;
394 drops
= stats
->vs
.rx_drops
;
395 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
396 result
->peer_tq_xdp_xmit_err
+= peer_tq_xdp_xmit_err
;
397 result
->xdp_tx_err
+= xdp_tx_err
;
398 result
->xdp_packets
+= packets
;
399 result
->xdp_bytes
+= bytes
;
400 result
->rx_drops
+= drops
;
404 static void veth_get_stats64(struct net_device
*dev
,
405 struct rtnl_link_stats64
*tot
)
407 struct veth_priv
*priv
= netdev_priv(dev
);
408 struct net_device
*peer
;
409 struct veth_stats rx
;
412 tot
->tx_dropped
= veth_stats_tx(dev
, &packets
, &bytes
);
413 tot
->tx_bytes
= bytes
;
414 tot
->tx_packets
= packets
;
416 veth_stats_rx(&rx
, dev
);
417 tot
->tx_dropped
+= rx
.xdp_tx_err
;
418 tot
->rx_dropped
= rx
.rx_drops
+ rx
.peer_tq_xdp_xmit_err
;
419 tot
->rx_bytes
= rx
.xdp_bytes
;
420 tot
->rx_packets
= rx
.xdp_packets
;
423 peer
= rcu_dereference(priv
->peer
);
425 veth_stats_tx(peer
, &packets
, &bytes
);
426 tot
->rx_bytes
+= bytes
;
427 tot
->rx_packets
+= packets
;
429 veth_stats_rx(&rx
, peer
);
430 tot
->tx_dropped
+= rx
.peer_tq_xdp_xmit_err
;
431 tot
->rx_dropped
+= rx
.xdp_tx_err
;
432 tot
->tx_bytes
+= rx
.xdp_bytes
;
433 tot
->tx_packets
+= rx
.xdp_packets
;
438 /* fake multicast ability */
439 static void veth_set_multicast_list(struct net_device
*dev
)
443 static struct sk_buff
*veth_build_skb(void *head
, int headroom
, int len
,
448 skb
= build_skb(head
, buflen
);
452 skb_reserve(skb
, headroom
);
458 static int veth_select_rxq(struct net_device
*dev
)
460 return smp_processor_id() % dev
->real_num_rx_queues
;
463 static struct net_device
*veth_peer_dev(struct net_device
*dev
)
465 struct veth_priv
*priv
= netdev_priv(dev
);
467 /* Callers must be under RCU read side. */
468 return rcu_dereference(priv
->peer
);
471 static int veth_xdp_xmit(struct net_device
*dev
, int n
,
472 struct xdp_frame
**frames
,
473 u32 flags
, bool ndo_xmit
)
475 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
476 int i
, ret
= -ENXIO
, nxmit
= 0;
477 struct net_device
*rcv
;
478 unsigned int max_len
;
481 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
485 rcv
= rcu_dereference(priv
->peer
);
489 rcv_priv
= netdev_priv(rcv
);
490 rq
= &rcv_priv
->rq
[veth_select_rxq(rcv
)];
491 /* The napi pointer is set if NAPI is enabled, which ensures that
492 * xdp_ring is initialized on receive side and the peer device is up.
494 if (!rcu_access_pointer(rq
->napi
))
497 max_len
= rcv
->mtu
+ rcv
->hard_header_len
+ VLAN_HLEN
;
499 spin_lock(&rq
->xdp_ring
.producer_lock
);
500 for (i
= 0; i
< n
; i
++) {
501 struct xdp_frame
*frame
= frames
[i
];
502 void *ptr
= veth_xdp_to_ptr(frame
);
504 if (unlikely(frame
->len
> max_len
||
505 __ptr_ring_produce(&rq
->xdp_ring
, ptr
)))
509 spin_unlock(&rq
->xdp_ring
.producer_lock
);
511 if (flags
& XDP_XMIT_FLUSH
)
512 __veth_xdp_flush(rq
);
516 u64_stats_update_begin(&rq
->stats
.syncp
);
517 rq
->stats
.vs
.peer_tq_xdp_xmit
+= nxmit
;
518 rq
->stats
.vs
.peer_tq_xdp_xmit_err
+= n
- nxmit
;
519 u64_stats_update_end(&rq
->stats
.syncp
);
528 static int veth_ndo_xdp_xmit(struct net_device
*dev
, int n
,
529 struct xdp_frame
**frames
, u32 flags
)
533 err
= veth_xdp_xmit(dev
, n
, frames
, flags
, true);
535 struct veth_priv
*priv
= netdev_priv(dev
);
537 atomic64_add(n
, &priv
->dropped
);
543 static void veth_xdp_flush_bq(struct veth_rq
*rq
, struct veth_xdp_tx_bq
*bq
)
545 int sent
, i
, err
= 0, drops
;
547 sent
= veth_xdp_xmit(rq
->dev
, bq
->count
, bq
->q
, 0, false);
553 for (i
= sent
; unlikely(i
< bq
->count
); i
++)
554 xdp_return_frame(bq
->q
[i
]);
556 drops
= bq
->count
- sent
;
557 trace_xdp_bulk_tx(rq
->dev
, sent
, drops
, err
);
559 u64_stats_update_begin(&rq
->stats
.syncp
);
560 rq
->stats
.vs
.xdp_tx
+= sent
;
561 rq
->stats
.vs
.xdp_tx_err
+= drops
;
562 u64_stats_update_end(&rq
->stats
.syncp
);
567 static void veth_xdp_flush(struct veth_rq
*rq
, struct veth_xdp_tx_bq
*bq
)
569 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(rq
->dev
);
570 struct net_device
*rcv
;
571 struct veth_rq
*rcv_rq
;
574 veth_xdp_flush_bq(rq
, bq
);
575 rcv
= rcu_dereference(priv
->peer
);
579 rcv_priv
= netdev_priv(rcv
);
580 rcv_rq
= &rcv_priv
->rq
[veth_select_rxq(rcv
)];
581 /* xdp_ring is initialized on receive side? */
582 if (unlikely(!rcu_access_pointer(rcv_rq
->xdp_prog
)))
585 __veth_xdp_flush(rcv_rq
);
590 static int veth_xdp_tx(struct veth_rq
*rq
, struct xdp_buff
*xdp
,
591 struct veth_xdp_tx_bq
*bq
)
593 struct xdp_frame
*frame
= xdp_convert_buff_to_frame(xdp
);
595 if (unlikely(!frame
))
598 if (unlikely(bq
->count
== VETH_XDP_TX_BULK_SIZE
))
599 veth_xdp_flush_bq(rq
, bq
);
601 bq
->q
[bq
->count
++] = frame
;
606 static struct xdp_frame
*veth_xdp_rcv_one(struct veth_rq
*rq
,
607 struct xdp_frame
*frame
,
608 struct veth_xdp_tx_bq
*bq
,
609 struct veth_stats
*stats
)
611 struct xdp_frame orig_frame
;
612 struct bpf_prog
*xdp_prog
;
615 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
616 if (likely(xdp_prog
)) {
620 xdp_convert_frame_to_buff(frame
, &xdp
);
621 xdp
.rxq
= &rq
->xdp_rxq
;
623 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
627 if (xdp_update_frame_from_buff(&xdp
, frame
))
632 xdp
.rxq
->mem
= frame
->mem
;
633 if (unlikely(veth_xdp_tx(rq
, &xdp
, bq
) < 0)) {
634 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
644 xdp
.rxq
->mem
= frame
->mem
;
645 if (xdp_do_redirect(rq
->dev
, &xdp
, xdp_prog
)) {
650 stats
->xdp_redirect
++;
654 bpf_warn_invalid_xdp_action(act
);
657 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
669 xdp_return_frame(frame
);
674 /* frames array contains VETH_XDP_BATCH at most */
675 static void veth_xdp_rcv_bulk_skb(struct veth_rq
*rq
, void **frames
,
676 int n_xdpf
, struct veth_xdp_tx_bq
*bq
,
677 struct veth_stats
*stats
)
679 void *skbs
[VETH_XDP_BATCH
];
682 if (xdp_alloc_skb_bulk(skbs
, n_xdpf
,
683 GFP_ATOMIC
| __GFP_ZERO
) < 0) {
684 for (i
= 0; i
< n_xdpf
; i
++)
685 xdp_return_frame(frames
[i
]);
686 stats
->rx_drops
+= n_xdpf
;
691 for (i
= 0; i
< n_xdpf
; i
++) {
692 struct sk_buff
*skb
= skbs
[i
];
694 skb
= __xdp_build_skb_from_frame(frames
[i
], skb
,
697 xdp_return_frame(frames
[i
]);
701 napi_gro_receive(&rq
->xdp_napi
, skb
);
705 static struct sk_buff
*veth_xdp_rcv_skb(struct veth_rq
*rq
,
707 struct veth_xdp_tx_bq
*bq
,
708 struct veth_stats
*stats
)
710 u32 pktlen
, headroom
, act
, metalen
, frame_sz
;
711 void *orig_data
, *orig_data_end
;
712 struct bpf_prog
*xdp_prog
;
713 int mac_len
, delta
, off
;
716 skb_prepare_for_gro(skb
);
719 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
720 if (unlikely(!xdp_prog
)) {
725 mac_len
= skb
->data
- skb_mac_header(skb
);
726 pktlen
= skb
->len
+ mac_len
;
727 headroom
= skb_headroom(skb
) - mac_len
;
729 if (skb_shared(skb
) || skb_head_is_locked(skb
) ||
730 skb_is_nonlinear(skb
) || headroom
< XDP_PACKET_HEADROOM
) {
731 struct sk_buff
*nskb
;
736 size
= SKB_DATA_ALIGN(VETH_XDP_HEADROOM
+ pktlen
) +
737 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
738 if (size
> PAGE_SIZE
)
741 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
745 head
= page_address(page
);
746 start
= head
+ VETH_XDP_HEADROOM
;
747 if (skb_copy_bits(skb
, -mac_len
, start
, pktlen
)) {
748 page_frag_free(head
);
752 nskb
= veth_build_skb(head
, VETH_XDP_HEADROOM
+ mac_len
,
753 skb
->len
, PAGE_SIZE
);
755 page_frag_free(head
);
759 skb_copy_header(nskb
, skb
);
760 head_off
= skb_headroom(nskb
) - skb_headroom(skb
);
761 skb_headers_offset_update(nskb
, head_off
);
766 /* SKB "head" area always have tailroom for skb_shared_info */
767 frame_sz
= skb_end_pointer(skb
) - skb
->head
;
768 frame_sz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
769 xdp_init_buff(&xdp
, frame_sz
, &rq
->xdp_rxq
);
770 xdp_prepare_buff(&xdp
, skb
->head
, skb
->mac_header
, pktlen
, true);
772 orig_data
= xdp
.data
;
773 orig_data_end
= xdp
.data_end
;
775 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
781 get_page(virt_to_page(xdp
.data
));
783 xdp
.rxq
->mem
= rq
->xdp_mem
;
784 if (unlikely(veth_xdp_tx(rq
, &xdp
, bq
) < 0)) {
785 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
793 get_page(virt_to_page(xdp
.data
));
795 xdp
.rxq
->mem
= rq
->xdp_mem
;
796 if (xdp_do_redirect(rq
->dev
, &xdp
, xdp_prog
)) {
800 stats
->xdp_redirect
++;
804 bpf_warn_invalid_xdp_action(act
);
807 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
815 /* check if bpf_xdp_adjust_head was used */
816 delta
= orig_data
- xdp
.data
;
817 off
= mac_len
+ delta
;
819 __skb_push(skb
, off
);
821 __skb_pull(skb
, -off
);
822 skb
->mac_header
-= delta
;
824 /* check if bpf_xdp_adjust_tail was used */
825 off
= xdp
.data_end
- orig_data_end
;
827 __skb_put(skb
, off
); /* positive on grow, negative on shrink */
828 skb
->protocol
= eth_type_trans(skb
, rq
->dev
);
830 metalen
= xdp
.data
- xdp
.data_meta
;
832 skb_metadata_set(skb
, metalen
);
843 page_frag_free(xdp
.data
);
848 static int veth_xdp_rcv(struct veth_rq
*rq
, int budget
,
849 struct veth_xdp_tx_bq
*bq
,
850 struct veth_stats
*stats
)
852 int i
, done
= 0, n_xdpf
= 0;
853 void *xdpf
[VETH_XDP_BATCH
];
855 for (i
= 0; i
< budget
; i
++) {
856 void *ptr
= __ptr_ring_consume(&rq
->xdp_ring
);
861 if (veth_is_xdp_frame(ptr
)) {
863 struct xdp_frame
*frame
= veth_ptr_to_xdp(ptr
);
865 stats
->xdp_bytes
+= frame
->len
;
866 frame
= veth_xdp_rcv_one(rq
, frame
, bq
, stats
);
869 xdpf
[n_xdpf
++] = frame
;
870 if (n_xdpf
== VETH_XDP_BATCH
) {
871 veth_xdp_rcv_bulk_skb(rq
, xdpf
, n_xdpf
,
878 struct sk_buff
*skb
= ptr
;
880 stats
->xdp_bytes
+= skb
->len
;
881 skb
= veth_xdp_rcv_skb(rq
, skb
, bq
, stats
);
883 napi_gro_receive(&rq
->xdp_napi
, skb
);
889 veth_xdp_rcv_bulk_skb(rq
, xdpf
, n_xdpf
, bq
, stats
);
891 u64_stats_update_begin(&rq
->stats
.syncp
);
892 rq
->stats
.vs
.xdp_redirect
+= stats
->xdp_redirect
;
893 rq
->stats
.vs
.xdp_bytes
+= stats
->xdp_bytes
;
894 rq
->stats
.vs
.xdp_drops
+= stats
->xdp_drops
;
895 rq
->stats
.vs
.rx_drops
+= stats
->rx_drops
;
896 rq
->stats
.vs
.xdp_packets
+= done
;
897 u64_stats_update_end(&rq
->stats
.syncp
);
902 static int veth_poll(struct napi_struct
*napi
, int budget
)
905 container_of(napi
, struct veth_rq
, xdp_napi
);
906 struct veth_stats stats
= {};
907 struct veth_xdp_tx_bq bq
;
912 xdp_set_return_frame_no_direct();
913 done
= veth_xdp_rcv(rq
, budget
, &bq
, &stats
);
915 if (done
< budget
&& napi_complete_done(napi
, done
)) {
916 /* Write rx_notify_masked before reading ptr_ring */
917 smp_store_mb(rq
->rx_notify_masked
, false);
918 if (unlikely(!__ptr_ring_empty(&rq
->xdp_ring
))) {
919 rq
->rx_notify_masked
= true;
920 napi_schedule(&rq
->xdp_napi
);
924 if (stats
.xdp_tx
> 0)
925 veth_xdp_flush(rq
, &bq
);
926 if (stats
.xdp_redirect
> 0)
928 xdp_clear_return_frame_no_direct();
933 static int __veth_napi_enable_range(struct net_device
*dev
, int start
, int end
)
935 struct veth_priv
*priv
= netdev_priv(dev
);
938 for (i
= start
; i
< end
; i
++) {
939 struct veth_rq
*rq
= &priv
->rq
[i
];
941 err
= ptr_ring_init(&rq
->xdp_ring
, VETH_RING_SIZE
, GFP_KERNEL
);
946 for (i
= start
; i
< end
; i
++) {
947 struct veth_rq
*rq
= &priv
->rq
[i
];
949 napi_enable(&rq
->xdp_napi
);
950 rcu_assign_pointer(priv
->rq
[i
].napi
, &priv
->rq
[i
].xdp_napi
);
956 for (i
--; i
>= start
; i
--)
957 ptr_ring_cleanup(&priv
->rq
[i
].xdp_ring
, veth_ptr_free
);
962 static int __veth_napi_enable(struct net_device
*dev
)
964 return __veth_napi_enable_range(dev
, 0, dev
->real_num_rx_queues
);
967 static void veth_napi_del_range(struct net_device
*dev
, int start
, int end
)
969 struct veth_priv
*priv
= netdev_priv(dev
);
972 for (i
= start
; i
< end
; i
++) {
973 struct veth_rq
*rq
= &priv
->rq
[i
];
975 rcu_assign_pointer(priv
->rq
[i
].napi
, NULL
);
976 napi_disable(&rq
->xdp_napi
);
977 __netif_napi_del(&rq
->xdp_napi
);
981 for (i
= start
; i
< end
; i
++) {
982 struct veth_rq
*rq
= &priv
->rq
[i
];
984 rq
->rx_notify_masked
= false;
985 ptr_ring_cleanup(&rq
->xdp_ring
, veth_ptr_free
);
989 static void veth_napi_del(struct net_device
*dev
)
991 veth_napi_del_range(dev
, 0, dev
->real_num_rx_queues
);
994 static bool veth_gro_requested(const struct net_device
*dev
)
996 return !!(dev
->wanted_features
& NETIF_F_GRO
);
999 static int veth_enable_xdp_range(struct net_device
*dev
, int start
, int end
,
1000 bool napi_already_on
)
1002 struct veth_priv
*priv
= netdev_priv(dev
);
1005 for (i
= start
; i
< end
; i
++) {
1006 struct veth_rq
*rq
= &priv
->rq
[i
];
1008 if (!napi_already_on
)
1009 netif_napi_add(dev
, &rq
->xdp_napi
, veth_poll
, NAPI_POLL_WEIGHT
);
1010 err
= xdp_rxq_info_reg(&rq
->xdp_rxq
, dev
, i
, rq
->xdp_napi
.napi_id
);
1014 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
1015 MEM_TYPE_PAGE_SHARED
,
1020 /* Save original mem info as it can be overwritten */
1021 rq
->xdp_mem
= rq
->xdp_rxq
.mem
;
1026 xdp_rxq_info_unreg(&priv
->rq
[i
].xdp_rxq
);
1028 for (i
--; i
>= start
; i
--) {
1029 struct veth_rq
*rq
= &priv
->rq
[i
];
1031 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
1032 if (!napi_already_on
)
1033 netif_napi_del(&rq
->xdp_napi
);
1039 static void veth_disable_xdp_range(struct net_device
*dev
, int start
, int end
,
1042 struct veth_priv
*priv
= netdev_priv(dev
);
1045 for (i
= start
; i
< end
; i
++) {
1046 struct veth_rq
*rq
= &priv
->rq
[i
];
1048 rq
->xdp_rxq
.mem
= rq
->xdp_mem
;
1049 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
1052 netif_napi_del(&rq
->xdp_napi
);
1056 static int veth_enable_xdp(struct net_device
*dev
)
1058 bool napi_already_on
= veth_gro_requested(dev
) && (dev
->flags
& IFF_UP
);
1059 struct veth_priv
*priv
= netdev_priv(dev
);
1062 if (!xdp_rxq_info_is_reg(&priv
->rq
[0].xdp_rxq
)) {
1063 err
= veth_enable_xdp_range(dev
, 0, dev
->real_num_rx_queues
, napi_already_on
);
1067 if (!napi_already_on
) {
1068 err
= __veth_napi_enable(dev
);
1070 veth_disable_xdp_range(dev
, 0, dev
->real_num_rx_queues
, true);
1074 if (!veth_gro_requested(dev
)) {
1075 /* user-space did not require GRO, but adding XDP
1076 * is supposed to get GRO working
1078 dev
->features
|= NETIF_F_GRO
;
1079 netdev_features_change(dev
);
1084 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
1085 rcu_assign_pointer(priv
->rq
[i
].xdp_prog
, priv
->_xdp_prog
);
1086 rcu_assign_pointer(priv
->rq
[i
].napi
, &priv
->rq
[i
].xdp_napi
);
1092 static void veth_disable_xdp(struct net_device
*dev
)
1094 struct veth_priv
*priv
= netdev_priv(dev
);
1097 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++)
1098 rcu_assign_pointer(priv
->rq
[i
].xdp_prog
, NULL
);
1100 if (!netif_running(dev
) || !veth_gro_requested(dev
)) {
1103 /* if user-space did not require GRO, since adding XDP
1104 * enabled it, clear it now
1106 if (!veth_gro_requested(dev
) && netif_running(dev
)) {
1107 dev
->features
&= ~NETIF_F_GRO
;
1108 netdev_features_change(dev
);
1112 veth_disable_xdp_range(dev
, 0, dev
->real_num_rx_queues
, false);
1115 static int veth_napi_enable_range(struct net_device
*dev
, int start
, int end
)
1117 struct veth_priv
*priv
= netdev_priv(dev
);
1120 for (i
= start
; i
< end
; i
++) {
1121 struct veth_rq
*rq
= &priv
->rq
[i
];
1123 netif_napi_add(dev
, &rq
->xdp_napi
, veth_poll
, NAPI_POLL_WEIGHT
);
1126 err
= __veth_napi_enable_range(dev
, start
, end
);
1128 for (i
= start
; i
< end
; i
++) {
1129 struct veth_rq
*rq
= &priv
->rq
[i
];
1131 netif_napi_del(&rq
->xdp_napi
);
1138 static int veth_napi_enable(struct net_device
*dev
)
1140 return veth_napi_enable_range(dev
, 0, dev
->real_num_rx_queues
);
1143 static void veth_disable_range_safe(struct net_device
*dev
, int start
, int end
)
1145 struct veth_priv
*priv
= netdev_priv(dev
);
1150 if (priv
->_xdp_prog
) {
1151 veth_napi_del_range(dev
, start
, end
);
1152 veth_disable_xdp_range(dev
, start
, end
, false);
1153 } else if (veth_gro_requested(dev
)) {
1154 veth_napi_del_range(dev
, start
, end
);
1158 static int veth_enable_range_safe(struct net_device
*dev
, int start
, int end
)
1160 struct veth_priv
*priv
= netdev_priv(dev
);
1166 if (priv
->_xdp_prog
) {
1167 /* these channels are freshly initialized, napi is not on there even
1168 * when GRO is requeste
1170 err
= veth_enable_xdp_range(dev
, start
, end
, false);
1174 err
= __veth_napi_enable_range(dev
, start
, end
);
1176 /* on error always delete the newly added napis */
1177 veth_disable_xdp_range(dev
, start
, end
, true);
1180 } else if (veth_gro_requested(dev
)) {
1181 return veth_napi_enable_range(dev
, start
, end
);
1186 static int veth_set_channels(struct net_device
*dev
,
1187 struct ethtool_channels
*ch
)
1189 struct veth_priv
*priv
= netdev_priv(dev
);
1190 unsigned int old_rx_count
, new_rx_count
;
1191 struct veth_priv
*peer_priv
;
1192 struct net_device
*peer
;
1195 /* sanity check. Upper bounds are already enforced by the caller */
1196 if (!ch
->rx_count
|| !ch
->tx_count
)
1199 /* avoid braking XDP, if that is enabled */
1200 peer
= rtnl_dereference(priv
->peer
);
1201 peer_priv
= peer
? netdev_priv(peer
) : NULL
;
1202 if (priv
->_xdp_prog
&& peer
&& ch
->rx_count
< peer
->real_num_tx_queues
)
1205 if (peer
&& peer_priv
&& peer_priv
->_xdp_prog
&& ch
->tx_count
> peer
->real_num_rx_queues
)
1208 old_rx_count
= dev
->real_num_rx_queues
;
1209 new_rx_count
= ch
->rx_count
;
1210 if (netif_running(dev
)) {
1211 /* turn device off */
1212 netif_carrier_off(dev
);
1214 netif_carrier_off(peer
);
1216 /* try to allocate new resurces, as needed*/
1217 err
= veth_enable_range_safe(dev
, old_rx_count
, new_rx_count
);
1222 err
= netif_set_real_num_rx_queues(dev
, ch
->rx_count
);
1226 err
= netif_set_real_num_tx_queues(dev
, ch
->tx_count
);
1228 int err2
= netif_set_real_num_rx_queues(dev
, old_rx_count
);
1230 /* this error condition could happen only if rx and tx change
1231 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1232 * and we can't do anything to fully restore the original
1236 pr_warn("Can't restore rx queues config %d -> %d %d",
1237 new_rx_count
, old_rx_count
, err2
);
1243 if (netif_running(dev
)) {
1244 /* note that we need to swap the arguments WRT the enable part
1245 * to identify the range we have to disable
1247 veth_disable_range_safe(dev
, new_rx_count
, old_rx_count
);
1248 netif_carrier_on(dev
);
1250 netif_carrier_on(peer
);
1255 new_rx_count
= old_rx_count
;
1256 old_rx_count
= ch
->rx_count
;
1260 static int veth_open(struct net_device
*dev
)
1262 struct veth_priv
*priv
= netdev_priv(dev
);
1263 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
1269 if (priv
->_xdp_prog
) {
1270 err
= veth_enable_xdp(dev
);
1273 } else if (veth_gro_requested(dev
)) {
1274 err
= veth_napi_enable(dev
);
1279 if (peer
->flags
& IFF_UP
) {
1280 netif_carrier_on(dev
);
1281 netif_carrier_on(peer
);
1287 static int veth_close(struct net_device
*dev
)
1289 struct veth_priv
*priv
= netdev_priv(dev
);
1290 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
1292 netif_carrier_off(dev
);
1294 netif_carrier_off(peer
);
1296 if (priv
->_xdp_prog
)
1297 veth_disable_xdp(dev
);
1298 else if (veth_gro_requested(dev
))
1304 static int is_valid_veth_mtu(int mtu
)
1306 return mtu
>= ETH_MIN_MTU
&& mtu
<= ETH_MAX_MTU
;
1309 static int veth_alloc_queues(struct net_device
*dev
)
1311 struct veth_priv
*priv
= netdev_priv(dev
);
1314 priv
->rq
= kcalloc(dev
->num_rx_queues
, sizeof(*priv
->rq
), GFP_KERNEL
);
1318 for (i
= 0; i
< dev
->num_rx_queues
; i
++) {
1319 priv
->rq
[i
].dev
= dev
;
1320 u64_stats_init(&priv
->rq
[i
].stats
.syncp
);
1326 static void veth_free_queues(struct net_device
*dev
)
1328 struct veth_priv
*priv
= netdev_priv(dev
);
1333 static int veth_dev_init(struct net_device
*dev
)
1337 dev
->lstats
= netdev_alloc_pcpu_stats(struct pcpu_lstats
);
1341 err
= veth_alloc_queues(dev
);
1343 free_percpu(dev
->lstats
);
1350 static void veth_dev_free(struct net_device
*dev
)
1352 veth_free_queues(dev
);
1353 free_percpu(dev
->lstats
);
1356 #ifdef CONFIG_NET_POLL_CONTROLLER
1357 static void veth_poll_controller(struct net_device
*dev
)
1359 /* veth only receives frames when its peer sends one
1360 * Since it has nothing to do with disabling irqs, we are guaranteed
1361 * never to have pending data when we poll for it so
1362 * there is nothing to do here.
1364 * We need this though so netpoll recognizes us as an interface that
1365 * supports polling, which enables bridge devices in virt setups to
1366 * still use netconsole
1369 #endif /* CONFIG_NET_POLL_CONTROLLER */
1371 static int veth_get_iflink(const struct net_device
*dev
)
1373 struct veth_priv
*priv
= netdev_priv(dev
);
1374 struct net_device
*peer
;
1378 peer
= rcu_dereference(priv
->peer
);
1379 iflink
= peer
? peer
->ifindex
: 0;
1385 static netdev_features_t
veth_fix_features(struct net_device
*dev
,
1386 netdev_features_t features
)
1388 struct veth_priv
*priv
= netdev_priv(dev
);
1389 struct net_device
*peer
;
1391 peer
= rtnl_dereference(priv
->peer
);
1393 struct veth_priv
*peer_priv
= netdev_priv(peer
);
1395 if (peer_priv
->_xdp_prog
)
1396 features
&= ~NETIF_F_GSO_SOFTWARE
;
1398 if (priv
->_xdp_prog
)
1399 features
|= NETIF_F_GRO
;
1404 static int veth_set_features(struct net_device
*dev
,
1405 netdev_features_t features
)
1407 netdev_features_t changed
= features
^ dev
->features
;
1408 struct veth_priv
*priv
= netdev_priv(dev
);
1411 if (!(changed
& NETIF_F_GRO
) || !(dev
->flags
& IFF_UP
) || priv
->_xdp_prog
)
1414 if (features
& NETIF_F_GRO
) {
1415 err
= veth_napi_enable(dev
);
1424 static void veth_set_rx_headroom(struct net_device
*dev
, int new_hr
)
1426 struct veth_priv
*peer_priv
, *priv
= netdev_priv(dev
);
1427 struct net_device
*peer
;
1433 peer
= rcu_dereference(priv
->peer
);
1434 if (unlikely(!peer
))
1437 peer_priv
= netdev_priv(peer
);
1438 priv
->requested_headroom
= new_hr
;
1439 new_hr
= max(priv
->requested_headroom
, peer_priv
->requested_headroom
);
1440 dev
->needed_headroom
= new_hr
;
1441 peer
->needed_headroom
= new_hr
;
1447 static int veth_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
1448 struct netlink_ext_ack
*extack
)
1450 struct veth_priv
*priv
= netdev_priv(dev
);
1451 struct bpf_prog
*old_prog
;
1452 struct net_device
*peer
;
1453 unsigned int max_mtu
;
1456 old_prog
= priv
->_xdp_prog
;
1457 priv
->_xdp_prog
= prog
;
1458 peer
= rtnl_dereference(priv
->peer
);
1462 NL_SET_ERR_MSG_MOD(extack
, "Cannot set XDP when peer is detached");
1467 max_mtu
= PAGE_SIZE
- VETH_XDP_HEADROOM
-
1468 peer
->hard_header_len
-
1469 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1470 if (peer
->mtu
> max_mtu
) {
1471 NL_SET_ERR_MSG_MOD(extack
, "Peer MTU is too large to set XDP");
1476 if (dev
->real_num_rx_queues
< peer
->real_num_tx_queues
) {
1477 NL_SET_ERR_MSG_MOD(extack
, "XDP expects number of rx queues not less than peer tx queues");
1482 if (dev
->flags
& IFF_UP
) {
1483 err
= veth_enable_xdp(dev
);
1485 NL_SET_ERR_MSG_MOD(extack
, "Setup for XDP failed");
1491 peer
->hw_features
&= ~NETIF_F_GSO_SOFTWARE
;
1492 peer
->max_mtu
= max_mtu
;
1498 if (dev
->flags
& IFF_UP
)
1499 veth_disable_xdp(dev
);
1502 peer
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1503 peer
->max_mtu
= ETH_MAX_MTU
;
1506 bpf_prog_put(old_prog
);
1509 if ((!!old_prog
^ !!prog
) && peer
)
1510 netdev_update_features(peer
);
1514 priv
->_xdp_prog
= old_prog
;
1519 static int veth_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
1521 switch (xdp
->command
) {
1522 case XDP_SETUP_PROG
:
1523 return veth_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
1529 static const struct net_device_ops veth_netdev_ops
= {
1530 .ndo_init
= veth_dev_init
,
1531 .ndo_open
= veth_open
,
1532 .ndo_stop
= veth_close
,
1533 .ndo_start_xmit
= veth_xmit
,
1534 .ndo_get_stats64
= veth_get_stats64
,
1535 .ndo_set_rx_mode
= veth_set_multicast_list
,
1536 .ndo_set_mac_address
= eth_mac_addr
,
1537 #ifdef CONFIG_NET_POLL_CONTROLLER
1538 .ndo_poll_controller
= veth_poll_controller
,
1540 .ndo_get_iflink
= veth_get_iflink
,
1541 .ndo_fix_features
= veth_fix_features
,
1542 .ndo_set_features
= veth_set_features
,
1543 .ndo_features_check
= passthru_features_check
,
1544 .ndo_set_rx_headroom
= veth_set_rx_headroom
,
1545 .ndo_bpf
= veth_xdp
,
1546 .ndo_xdp_xmit
= veth_ndo_xdp_xmit
,
1547 .ndo_get_peer_dev
= veth_peer_dev
,
1550 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1551 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1552 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1553 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1554 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1556 static void veth_setup(struct net_device
*dev
)
1560 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1561 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1562 dev
->priv_flags
|= IFF_NO_QUEUE
;
1563 dev
->priv_flags
|= IFF_PHONY_HEADROOM
;
1565 dev
->netdev_ops
= &veth_netdev_ops
;
1566 dev
->ethtool_ops
= &veth_ethtool_ops
;
1567 dev
->features
|= NETIF_F_LLTX
;
1568 dev
->features
|= VETH_FEATURES
;
1569 dev
->vlan_features
= dev
->features
&
1570 ~(NETIF_F_HW_VLAN_CTAG_TX
|
1571 NETIF_F_HW_VLAN_STAG_TX
|
1572 NETIF_F_HW_VLAN_CTAG_RX
|
1573 NETIF_F_HW_VLAN_STAG_RX
);
1574 dev
->needs_free_netdev
= true;
1575 dev
->priv_destructor
= veth_dev_free
;
1576 dev
->max_mtu
= ETH_MAX_MTU
;
1578 dev
->hw_features
= VETH_FEATURES
;
1579 dev
->hw_enc_features
= VETH_FEATURES
;
1580 dev
->mpls_features
= NETIF_F_HW_CSUM
| NETIF_F_GSO_SOFTWARE
;
1587 static int veth_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1588 struct netlink_ext_ack
*extack
)
1590 if (tb
[IFLA_ADDRESS
]) {
1591 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1593 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1594 return -EADDRNOTAVAIL
;
1597 if (!is_valid_veth_mtu(nla_get_u32(tb
[IFLA_MTU
])))
1603 static struct rtnl_link_ops veth_link_ops
;
1605 static void veth_disable_gro(struct net_device
*dev
)
1607 dev
->features
&= ~NETIF_F_GRO
;
1608 dev
->wanted_features
&= ~NETIF_F_GRO
;
1609 netdev_update_features(dev
);
1612 static int veth_init_queues(struct net_device
*dev
, struct nlattr
*tb
[])
1616 if (!tb
[IFLA_NUM_TX_QUEUES
] && dev
->num_tx_queues
> 1) {
1617 err
= netif_set_real_num_tx_queues(dev
, 1);
1621 if (!tb
[IFLA_NUM_RX_QUEUES
] && dev
->num_rx_queues
> 1) {
1622 err
= netif_set_real_num_rx_queues(dev
, 1);
1629 static int veth_newlink(struct net
*src_net
, struct net_device
*dev
,
1630 struct nlattr
*tb
[], struct nlattr
*data
[],
1631 struct netlink_ext_ack
*extack
)
1634 struct net_device
*peer
;
1635 struct veth_priv
*priv
;
1636 char ifname
[IFNAMSIZ
];
1637 struct nlattr
*peer_tb
[IFLA_MAX
+ 1], **tbp
;
1638 unsigned char name_assign_type
;
1639 struct ifinfomsg
*ifmp
;
1643 * create and register peer first
1645 if (data
!= NULL
&& data
[VETH_INFO_PEER
] != NULL
) {
1646 struct nlattr
*nla_peer
;
1648 nla_peer
= data
[VETH_INFO_PEER
];
1649 ifmp
= nla_data(nla_peer
);
1650 err
= rtnl_nla_parse_ifla(peer_tb
,
1651 nla_data(nla_peer
) + sizeof(struct ifinfomsg
),
1652 nla_len(nla_peer
) - sizeof(struct ifinfomsg
),
1657 err
= veth_validate(peer_tb
, NULL
, extack
);
1667 if (ifmp
&& tbp
[IFLA_IFNAME
]) {
1668 nla_strscpy(ifname
, tbp
[IFLA_IFNAME
], IFNAMSIZ
);
1669 name_assign_type
= NET_NAME_USER
;
1671 snprintf(ifname
, IFNAMSIZ
, DRV_NAME
"%%d");
1672 name_assign_type
= NET_NAME_ENUM
;
1675 net
= rtnl_link_get_net(src_net
, tbp
);
1677 return PTR_ERR(net
);
1679 peer
= rtnl_create_link(net
, ifname
, name_assign_type
,
1680 &veth_link_ops
, tbp
, extack
);
1683 return PTR_ERR(peer
);
1686 if (!ifmp
|| !tbp
[IFLA_ADDRESS
])
1687 eth_hw_addr_random(peer
);
1689 if (ifmp
&& (dev
->ifindex
!= 0))
1690 peer
->ifindex
= ifmp
->ifi_index
;
1692 peer
->gso_max_size
= dev
->gso_max_size
;
1693 peer
->gso_max_segs
= dev
->gso_max_segs
;
1695 err
= register_netdevice(peer
);
1699 goto err_register_peer
;
1701 /* keep GRO disabled by default to be consistent with the established
1704 veth_disable_gro(peer
);
1705 netif_carrier_off(peer
);
1707 err
= rtnl_configure_link(peer
, ifmp
);
1709 goto err_configure_peer
;
1714 * note, that since we've registered new device the dev's name
1715 * should be re-allocated
1718 if (tb
[IFLA_ADDRESS
] == NULL
)
1719 eth_hw_addr_random(dev
);
1721 if (tb
[IFLA_IFNAME
])
1722 nla_strscpy(dev
->name
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
1724 snprintf(dev
->name
, IFNAMSIZ
, DRV_NAME
"%%d");
1726 err
= register_netdevice(dev
);
1728 goto err_register_dev
;
1730 netif_carrier_off(dev
);
1733 * tie the deviced together
1736 priv
= netdev_priv(dev
);
1737 rcu_assign_pointer(priv
->peer
, peer
);
1738 err
= veth_init_queues(dev
, tb
);
1742 priv
= netdev_priv(peer
);
1743 rcu_assign_pointer(priv
->peer
, dev
);
1744 err
= veth_init_queues(peer
, tb
);
1748 veth_disable_gro(dev
);
1752 unregister_netdevice(dev
);
1756 unregister_netdevice(peer
);
1764 static void veth_dellink(struct net_device
*dev
, struct list_head
*head
)
1766 struct veth_priv
*priv
;
1767 struct net_device
*peer
;
1769 priv
= netdev_priv(dev
);
1770 peer
= rtnl_dereference(priv
->peer
);
1772 /* Note : dellink() is called from default_device_exit_batch(),
1773 * before a rcu_synchronize() point. The devices are guaranteed
1774 * not being freed before one RCU grace period.
1776 RCU_INIT_POINTER(priv
->peer
, NULL
);
1777 unregister_netdevice_queue(dev
, head
);
1780 priv
= netdev_priv(peer
);
1781 RCU_INIT_POINTER(priv
->peer
, NULL
);
1782 unregister_netdevice_queue(peer
, head
);
1786 static const struct nla_policy veth_policy
[VETH_INFO_MAX
+ 1] = {
1787 [VETH_INFO_PEER
] = { .len
= sizeof(struct ifinfomsg
) },
1790 static struct net
*veth_get_link_net(const struct net_device
*dev
)
1792 struct veth_priv
*priv
= netdev_priv(dev
);
1793 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
1795 return peer
? dev_net(peer
) : dev_net(dev
);
1798 static unsigned int veth_get_num_queues(void)
1800 /* enforce the same queue limit as rtnl_create_link */
1801 int queues
= num_possible_cpus();
1808 static struct rtnl_link_ops veth_link_ops
= {
1810 .priv_size
= sizeof(struct veth_priv
),
1811 .setup
= veth_setup
,
1812 .validate
= veth_validate
,
1813 .newlink
= veth_newlink
,
1814 .dellink
= veth_dellink
,
1815 .policy
= veth_policy
,
1816 .maxtype
= VETH_INFO_MAX
,
1817 .get_link_net
= veth_get_link_net
,
1818 .get_num_tx_queues
= veth_get_num_queues
,
1819 .get_num_rx_queues
= veth_get_num_queues
,
1826 static __init
int veth_init(void)
1828 return rtnl_link_register(&veth_link_ops
);
1831 static __exit
void veth_exit(void)
1833 rtnl_link_unregister(&veth_link_ops
);
1836 module_init(veth_init
);
1837 module_exit(veth_exit
);
1839 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1840 MODULE_LICENSE("GPL v2");
1841 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);