2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/crash_dump.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/qp.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/port.h>
44 #include <linux/mlx5/vport.h>
45 #include <linux/mlx5/transobj.h>
46 #include <linux/mlx5/fs.h>
47 #include <linux/rhashtable.h>
48 #include <net/udp_tunnel.h>
49 #include <net/switchdev.h>
51 #include <linux/dim.h>
52 #include <linux/bits.h>
54 #include "mlx5_core.h"
58 #include "lib/hv_vhca.h"
60 extern const struct net_device_ops mlx5e_netdev_ops
;
63 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
64 #define MLX5E_METADATA_ETHER_LEN 8
66 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
68 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
70 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
71 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
73 #define MLX5E_MAX_NUM_TC 8
75 #define MLX5_RX_HEADROOM NET_SKB_PAD
76 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
77 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
79 #define MLX5E_RX_MAX_HEAD (256)
81 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
82 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
83 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
84 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
85 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
86 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
88 #define MLX5_MPWRQ_LOG_WQE_SZ 18
89 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
90 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
91 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
93 #define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
94 #define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
95 #define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
96 /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
97 * WQEs, This page will absorb write overflow by the hardware, when
98 * receiving packets larger than MTU. These oversize packets are
99 * dropped by the driver at a later stage.
101 #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
102 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
103 #define MLX5E_MAX_RQ_NUM_MTTS \
104 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
105 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
106 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
107 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
108 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
109 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
110 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
112 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
113 #define MLX5E_LOG_MAX_RX_WQE_BULK \
114 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
116 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
117 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
118 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
120 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
121 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
122 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
123 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
125 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
127 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
128 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
129 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
131 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
132 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
133 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
134 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
135 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
136 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
137 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
138 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
140 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
141 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
142 #define MLX5E_MIN_NUM_CHANNELS 0x1
143 #define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
144 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
145 #define MLX5E_TX_CQ_POLL_BUDGET 128
146 #define MLX5E_TX_XSK_POLL_BUDGET 64
147 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
149 #define MLX5E_UMR_WQE_INLINE_SZ \
150 (sizeof(struct mlx5e_umr_wqe) + \
151 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
152 MLX5_UMR_MTT_ALIGNMENT))
153 #define MLX5E_UMR_WQEBBS \
154 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
156 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
158 #define mlx5e_dbg(mlevel, priv, format, ...) \
160 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
161 netdev_warn(priv->netdev, format, \
165 enum mlx5e_rq_group
{
166 MLX5E_RQ_GROUP_REGULAR
,
168 #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
171 static inline u8
mlx5e_get_num_lag_ports(struct mlx5_core_dev
*mdev
)
173 if (mlx5_lag_is_lacp_owner(mdev
))
176 return clamp_t(u8
, MLX5_CAP_GEN(mdev
, num_lag_ports
), 1, MLX5_MAX_PORTS
);
179 static inline u16
mlx5_min_rx_wqes(int wq_type
, u32 wq_size
)
182 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
183 return min_t(u16
, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW
,
186 return min_t(u16
, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES
,
191 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */
192 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev
*mdev
)
194 return is_kdump_kernel() ?
195 MLX5E_MIN_NUM_CHANNELS
:
196 min_t(int, mlx5_comp_vectors_count(mdev
), MLX5E_MAX_NUM_CHANNELS
);
199 struct mlx5e_tx_wqe
{
200 struct mlx5_wqe_ctrl_seg ctrl
;
201 struct mlx5_wqe_eth_seg eth
;
202 struct mlx5_wqe_data_seg data
[0];
205 struct mlx5e_rx_wqe_ll
{
206 struct mlx5_wqe_srq_next_seg next
;
207 struct mlx5_wqe_data_seg data
[];
210 struct mlx5e_rx_wqe_cyc
{
211 struct mlx5_wqe_data_seg data
[0];
214 struct mlx5e_umr_wqe
{
215 struct mlx5_wqe_ctrl_seg ctrl
;
216 struct mlx5_wqe_umr_ctrl_seg uctrl
;
217 struct mlx5_mkey_seg mkc
;
218 struct mlx5_mtt inline_mtts
[0];
221 extern const char mlx5e_self_tests
[][ETH_GSTRING_LEN
];
223 enum mlx5e_priv_flag
{
224 MLX5E_PFLAG_RX_CQE_BASED_MODER
,
225 MLX5E_PFLAG_TX_CQE_BASED_MODER
,
226 MLX5E_PFLAG_RX_CQE_COMPRESS
,
227 MLX5E_PFLAG_RX_STRIDING_RQ
,
228 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
,
229 MLX5E_PFLAG_XDP_TX_MPWQE
,
230 MLX5E_PFLAG_SKB_TX_MPWQE
,
231 MLX5E_PFLAG_TX_PORT_TS
,
232 MLX5E_NUM_PFLAGS
, /* Keep last */
235 #define MLX5E_SET_PFLAG(params, pflag, enable) \
238 (params)->pflags |= BIT(pflag); \
240 (params)->pflags &= ~(BIT(pflag)); \
243 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
245 struct mlx5e_params
{
248 u8 log_rq_mtu_frames
;
251 bool rx_cqe_compress_def
;
252 bool tunneled_offload_en
;
253 struct dim_cq_moder rx_cq_moderation
;
254 struct dim_cq_moder tx_cq_moderation
;
256 u8 tx_min_inline_mode
;
257 bool vlan_strip_disable
;
263 struct bpf_prog
*xdp_prog
;
264 struct mlx5e_xsk
*xsk
;
270 MLX5E_RQ_STATE_ENABLED
,
271 MLX5E_RQ_STATE_RECOVERING
,
273 MLX5E_RQ_STATE_NO_CSUM_COMPLETE
,
274 MLX5E_RQ_STATE_CSUM_FULL
, /* cqe_csum_full hw bit is set */
275 MLX5E_RQ_STATE_FPGA_TLS
, /* FPGA TLS enabled */
276 MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX
/* set when mini_cqe_resp_stride_index cap is used */
280 /* data path - accessed per cqe */
283 /* data path - accessed per napi poll */
285 struct napi_struct
*napi
;
286 struct mlx5_core_cq mcq
;
287 struct mlx5e_ch_stats
*ch_stats
;
290 struct net_device
*netdev
;
291 struct mlx5_core_dev
*mdev
;
292 struct mlx5e_priv
*priv
;
293 struct mlx5_wq_ctrl wq_ctrl
;
294 } ____cacheline_aligned_in_smp
;
296 struct mlx5e_cq_decomp
{
297 /* cqe decompression */
298 struct mlx5_cqe64 title
;
299 struct mlx5_mini_cqe8 mini_arr
[MLX5_MINI_CQE_ARRAY_SIZE
];
303 } ____cacheline_aligned_in_smp
;
305 enum mlx5e_dma_map_type
{
306 MLX5E_DMA_MAP_SINGLE
,
310 struct mlx5e_sq_dma
{
313 enum mlx5e_dma_map_type type
;
317 MLX5E_SQ_STATE_ENABLED
,
318 MLX5E_SQ_STATE_MPWQE
,
319 MLX5E_SQ_STATE_RECOVERING
,
320 MLX5E_SQ_STATE_IPSEC
,
323 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE
,
324 MLX5E_SQ_STATE_PENDING_XSK_TX
,
327 struct mlx5e_tx_mpwqe
{
328 /* Current MPWQE session */
329 struct mlx5e_tx_wqe
*wqe
;
336 struct mlx5e_skb_fifo
{
337 struct sk_buff
**fifo
;
348 /* dirtied @completion */
352 struct dim dim
; /* Adaptive Moderation */
355 u16 pc ____cacheline_aligned_in_smp
;
358 struct mlx5e_tx_mpwqe mpwqe
;
363 struct mlx5_wq_cyc wq
;
365 struct mlx5e_sq_stats
*stats
;
367 struct mlx5e_sq_dma
*dma_fifo
;
368 struct mlx5e_skb_fifo skb_fifo
;
369 struct mlx5e_tx_wqe_info
*wqe_info
;
371 void __iomem
*uar_map
;
372 struct netdev_queue
*txq
;
380 struct hwtstamp_config
*tstamp
;
381 struct mlx5_clock
*clock
;
382 struct net_device
*netdev
;
383 struct mlx5_core_dev
*mdev
;
384 struct mlx5e_priv
*priv
;
387 struct mlx5_wq_ctrl wq_ctrl
;
391 struct work_struct recover_work
;
392 struct mlx5e_ptpsq
*ptpsq
;
393 } ____cacheline_aligned_in_smp
;
395 struct mlx5e_dma_info
{
399 struct xdp_buff
*xsk
;
403 /* XDP packets can be transmitted in different ways. On completion, we need to
404 * distinguish between them to clean up things in a proper way.
406 enum mlx5e_xdp_xmit_mode
{
407 /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
408 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
411 MLX5E_XDP_XMIT_MODE_FRAME
,
413 /* The xdp_frame was created in place as a result of XDP_TX from a
414 * regular RQ. No DMA remapping happened, and the page belongs to us.
416 MLX5E_XDP_XMIT_MODE_PAGE
,
418 /* No xdp_frame was created at all, the transmit happened from a UMEM
419 * page. The UMEM Completion Ring producer pointer has to be increased.
421 MLX5E_XDP_XMIT_MODE_XSK
,
424 struct mlx5e_xdp_info
{
425 enum mlx5e_xdp_xmit_mode mode
;
428 struct xdp_frame
*xdpf
;
433 struct mlx5e_dma_info di
;
438 struct mlx5e_xmit_data
{
444 struct mlx5e_xdp_info_fifo
{
445 struct mlx5e_xdp_info
*xi
;
452 typedef int (*mlx5e_fp_xmit_xdp_frame_check
)(struct mlx5e_xdpsq
*);
453 typedef bool (*mlx5e_fp_xmit_xdp_frame
)(struct mlx5e_xdpsq
*,
454 struct mlx5e_xmit_data
*,
455 struct mlx5e_xdp_info
*,
461 /* dirtied @completion */
466 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp
;
468 struct mlx5_wqe_ctrl_seg
*doorbell_cseg
;
469 struct mlx5e_tx_mpwqe mpwqe
;
474 struct xsk_buff_pool
*xsk_pool
;
475 struct mlx5_wq_cyc wq
;
476 struct mlx5e_xdpsq_stats
*stats
;
477 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check
;
478 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame
;
480 struct mlx5e_xdp_wqe_info
*wqe_info
;
481 struct mlx5e_xdp_info_fifo xdpi_fifo
;
483 void __iomem
*uar_map
;
492 struct mlx5_wq_ctrl wq_ctrl
;
493 struct mlx5e_channel
*channel
;
494 } ____cacheline_aligned_in_smp
;
501 struct mlx5_wqe_ctrl_seg
*doorbell_cseg
;
504 /* write@xmit, read@completion */
506 struct mlx5e_icosq_wqe_info
*wqe_info
;
510 struct mlx5_wq_cyc wq
;
511 void __iomem
*uar_map
;
517 struct mlx5_wq_ctrl wq_ctrl
;
518 struct mlx5e_channel
*channel
;
520 struct work_struct recover_work
;
521 } ____cacheline_aligned_in_smp
;
523 struct mlx5e_wqe_frag_info
{
524 struct mlx5e_dma_info
*di
;
529 struct mlx5e_umr_dma_info
{
530 struct mlx5e_dma_info dma_info
[MLX5_MPWRQ_PAGES_PER_WQE
];
533 struct mlx5e_mpw_info
{
534 struct mlx5e_umr_dma_info umr
;
535 u16 consumed_strides
;
536 DECLARE_BITMAP(xdp_xmit_bitmap
, MLX5_MPWRQ_PAGES_PER_WQE
);
539 #define MLX5E_MAX_RX_FRAGS 4
541 /* a single cache unit is capable to serve one napi call (for non-striding rq)
542 * or a MPWQE (for striding rq).
544 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
545 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
546 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
547 struct mlx5e_page_cache
{
550 struct mlx5e_dma_info page_cache
[MLX5E_CACHE_SIZE
];
554 typedef void (*mlx5e_fp_handle_rx_cqe
)(struct mlx5e_rq
*, struct mlx5_cqe64
*);
555 typedef struct sk_buff
*
556 (*mlx5e_fp_skb_from_cqe_mpwrq
)(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
,
557 u16 cqe_bcnt
, u32 head_offset
, u32 page_idx
);
558 typedef struct sk_buff
*
559 (*mlx5e_fp_skb_from_cqe
)(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
,
560 struct mlx5e_wqe_frag_info
*wi
, u32 cqe_bcnt
);
561 typedef bool (*mlx5e_fp_post_rx_wqes
)(struct mlx5e_rq
*rq
);
562 typedef void (*mlx5e_fp_dealloc_wqe
)(struct mlx5e_rq
*, u16
);
564 int mlx5e_rq_set_handlers(struct mlx5e_rq
*rq
, struct mlx5e_params
*params
, bool xsk
);
567 MLX5E_RQ_FLAG_XDP_XMIT
,
568 MLX5E_RQ_FLAG_XDP_REDIRECT
,
571 struct mlx5e_rq_frag_info
{
576 struct mlx5e_rq_frags_info
{
577 struct mlx5e_rq_frag_info arr
[MLX5E_MAX_RX_FRAGS
];
587 struct mlx5_wq_cyc wq
;
588 struct mlx5e_wqe_frag_info
*frags
;
589 struct mlx5e_dma_info
*di
;
590 struct mlx5e_rq_frags_info info
;
591 mlx5e_fp_skb_from_cqe skb_from_cqe
;
594 struct mlx5_wq_ll wq
;
595 struct mlx5e_umr_wqe umr_wqe
;
596 struct mlx5e_mpw_info
*info
;
597 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq
;
609 u8 map_dir
; /* dma map direction */
613 struct net_device
*netdev
;
614 struct mlx5e_rq_stats
*stats
;
616 struct mlx5e_cq_decomp cqd
;
617 struct mlx5e_page_cache page_cache
;
618 struct hwtstamp_config
*tstamp
;
619 struct mlx5_clock
*clock
;
620 struct mlx5e_icosq
*icosq
;
621 struct mlx5e_priv
*priv
;
623 mlx5e_fp_handle_rx_cqe handle_rx_cqe
;
624 mlx5e_fp_post_rx_wqes post_wqes
;
625 mlx5e_fp_dealloc_wqe dealloc_wqe
;
631 struct dim dim
; /* Dynamic Interrupt Moderation */
634 struct bpf_prog __rcu
*xdp_prog
;
635 struct mlx5e_xdpsq
*xdpsq
;
636 DECLARE_BITMAP(flags
, 8);
637 struct page_pool
*page_pool
;
639 /* AF_XDP zero-copy */
640 struct xsk_buff_pool
*xsk_pool
;
642 struct work_struct recover_work
;
645 struct mlx5_wq_ctrl wq_ctrl
;
649 struct mlx5_core_dev
*mdev
;
650 struct mlx5_core_mkey umr_mkey
;
651 struct mlx5e_dma_info wqe_overflow
;
653 /* XDP read-mostly */
654 struct xdp_rxq_info xdp_rxq
;
655 } ____cacheline_aligned_in_smp
;
657 enum mlx5e_channel_state
{
658 MLX5E_CHANNEL_STATE_XSK
,
659 MLX5E_CHANNEL_NUM_STATES
662 struct mlx5e_channel
{
665 struct mlx5e_xdpsq rq_xdpsq
;
666 struct mlx5e_txqsq sq
[MLX5E_MAX_NUM_TC
];
667 struct mlx5e_icosq icosq
; /* internal control operations */
669 struct napi_struct napi
;
671 struct net_device
*netdev
;
677 struct mlx5e_xdpsq xdpsq
;
679 /* AF_XDP zero-copy */
680 struct mlx5e_rq xskrq
;
681 struct mlx5e_xdpsq xsksq
;
684 struct mlx5e_icosq async_icosq
;
685 /* async_icosq can be accessed from any CPU - the spinlock protects it. */
686 spinlock_t async_icosq_lock
;
688 /* data path - accessed per napi poll */
689 const struct cpumask
*aff_mask
;
690 struct mlx5e_ch_stats
*stats
;
693 struct mlx5e_priv
*priv
;
694 struct mlx5_core_dev
*mdev
;
695 struct hwtstamp_config
*tstamp
;
696 DECLARE_BITMAP(state
, MLX5E_CHANNEL_NUM_STATES
);
701 struct mlx5e_port_ptp
;
703 struct mlx5e_channels
{
704 struct mlx5e_channel
**c
;
705 struct mlx5e_port_ptp
*port_ptp
;
707 struct mlx5e_params params
;
710 struct mlx5e_channel_stats
{
711 struct mlx5e_ch_stats ch
;
712 struct mlx5e_sq_stats sq
[MLX5E_MAX_NUM_TC
];
713 struct mlx5e_rq_stats rq
;
714 struct mlx5e_rq_stats xskrq
;
715 struct mlx5e_xdpsq_stats rq_xdpsq
;
716 struct mlx5e_xdpsq_stats xdpsq
;
717 struct mlx5e_xdpsq_stats xsksq
;
718 } ____cacheline_aligned_in_smp
;
720 struct mlx5e_port_ptp_stats
{
721 struct mlx5e_ch_stats ch
;
722 struct mlx5e_sq_stats sq
[MLX5E_MAX_NUM_TC
];
723 struct mlx5e_ptp_cq_stats cq
[MLX5E_MAX_NUM_TC
];
724 } ____cacheline_aligned_in_smp
;
728 MLX5E_STATE_DESTROYING
,
729 MLX5E_STATE_XDP_TX_ENABLED
,
730 MLX5E_STATE_XDP_ACTIVE
,
740 struct mlx5e_rqt rqt
;
741 struct list_head list
;
749 struct mlx5e_rss_params
{
750 u32 indirection_rqt
[MLX5E_INDIR_RQT_SIZE
];
751 u32 rx_hash_fields
[MLX5E_NUM_INDIR_TIRS
];
752 u8 toeplitz_hash_key
[40];
756 struct mlx5e_modify_sq_param
{
763 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
764 struct mlx5e_hv_vhca_stats_agent
{
765 struct mlx5_hv_vhca_agent
*agent
;
766 struct delayed_work work
;
773 /* XSK buffer pools are stored separately from channels,
774 * because we don't want to lose them when channels are
775 * recreated. The kernel also stores buffer pool, but it doesn't
776 * distinguish between zero-copy and non-zero-copy UMEMs, so
777 * rely on our mechanism.
779 struct xsk_buff_pool
**pools
;
784 /* Temporary storage for variables that are allocated when struct mlx5e_priv is
785 * initialized, and used where we can't allocate them because that functions
786 * must not fail. Use with care and make sure the same variable is not used
787 * simultaneously by multiple users.
789 struct mlx5e_scratchpad
{
790 cpumask_var_t cpumask
;
794 /* priv data path fields - start */
795 /* +1 for port ptp ts */
796 struct mlx5e_txqsq
*txq2sq
[(MLX5E_MAX_NUM_CHANNELS
+ 1) * MLX5E_MAX_NUM_TC
];
797 int channel_tc2realtxq
[MLX5E_MAX_NUM_CHANNELS
][MLX5E_MAX_NUM_TC
];
798 int port_ptp_tc2realtxq
[MLX5E_MAX_NUM_TC
];
799 #ifdef CONFIG_MLX5_CORE_EN_DCB
800 struct mlx5e_dcbx_dp dcbx_dp
;
802 /* priv data path fields - end */
806 struct mutex state_lock
; /* Protects Interface state */
807 struct mlx5e_rq drop_rq
;
809 struct mlx5e_channels channels
;
810 u32 tisn
[MLX5_MAX_PORTS
][MLX5E_MAX_NUM_TC
];
811 struct mlx5e_rqt indir_rqt
;
812 struct mlx5e_tir indir_tir
[MLX5E_NUM_INDIR_TIRS
];
813 struct mlx5e_tir inner_indir_tir
[MLX5E_NUM_INDIR_TIRS
];
814 struct mlx5e_tir direct_tir
[MLX5E_MAX_NUM_CHANNELS
];
815 struct mlx5e_tir xsk_tir
[MLX5E_MAX_NUM_CHANNELS
];
816 struct mlx5e_rss_params rss_params
;
817 u32 tx_rates
[MLX5E_MAX_NUM_SQS
];
819 struct mlx5e_flow_steering fs
;
821 struct workqueue_struct
*wq
;
822 struct work_struct update_carrier_work
;
823 struct work_struct set_rx_mode_work
;
824 struct work_struct tx_timeout_work
;
825 struct work_struct update_stats_work
;
826 struct work_struct monitor_counters_work
;
827 struct mlx5_nb monitor_counters_nb
;
829 struct mlx5_core_dev
*mdev
;
830 struct net_device
*netdev
;
831 struct mlx5e_stats stats
;
832 struct mlx5e_channel_stats channel_stats
[MLX5E_MAX_NUM_CHANNELS
];
833 struct mlx5e_port_ptp_stats port_ptp_stats
;
836 bool port_ptp_opened
;
837 struct hwtstamp_config tstamp
;
839 u16 drop_rq_q_counter
;
840 struct notifier_block events_nb
;
843 struct udp_tunnel_nic_info nic_info
;
844 #ifdef CONFIG_MLX5_CORE_EN_DCB
845 struct mlx5e_dcbx dcbx
;
848 const struct mlx5e_profile
*profile
;
850 #ifdef CONFIG_MLX5_EN_IPSEC
851 struct mlx5e_ipsec
*ipsec
;
853 #ifdef CONFIG_MLX5_EN_TLS
854 struct mlx5e_tls
*tls
;
856 struct devlink_health_reporter
*tx_reporter
;
857 struct devlink_health_reporter
*rx_reporter
;
858 struct devlink_port dl_port
;
859 struct mlx5e_xsk xsk
;
860 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
861 struct mlx5e_hv_vhca_stats_agent stats_agent
;
863 struct mlx5e_scratchpad scratchpad
;
866 struct mlx5e_rx_handlers
{
867 mlx5e_fp_handle_rx_cqe handle_rx_cqe
;
868 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe
;
871 extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic
;
873 struct mlx5e_profile
{
874 int (*init
)(struct mlx5_core_dev
*mdev
,
875 struct net_device
*netdev
,
876 const struct mlx5e_profile
*profile
, void *ppriv
);
877 void (*cleanup
)(struct mlx5e_priv
*priv
);
878 int (*init_rx
)(struct mlx5e_priv
*priv
);
879 void (*cleanup_rx
)(struct mlx5e_priv
*priv
);
880 int (*init_tx
)(struct mlx5e_priv
*priv
);
881 void (*cleanup_tx
)(struct mlx5e_priv
*priv
);
882 void (*enable
)(struct mlx5e_priv
*priv
);
883 void (*disable
)(struct mlx5e_priv
*priv
);
884 int (*update_rx
)(struct mlx5e_priv
*priv
);
885 void (*update_stats
)(struct mlx5e_priv
*priv
);
886 void (*update_carrier
)(struct mlx5e_priv
*priv
);
887 unsigned int (*stats_grps_num
)(struct mlx5e_priv
*priv
);
888 mlx5e_stats_grp_t
*stats_grps
;
889 const struct mlx5e_rx_handlers
*rx_handlers
;
894 void mlx5e_build_ptys2ethtool_map(void);
896 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
);
897 bool mlx5e_striding_rq_possible(struct mlx5_core_dev
*mdev
,
898 struct mlx5e_params
*params
);
900 void mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
);
901 void mlx5e_fold_sw_stats64(struct mlx5e_priv
*priv
, struct rtnl_link_stats64
*s
);
903 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
);
904 int mlx5e_self_test_num(struct mlx5e_priv
*priv
);
905 void mlx5e_self_test(struct net_device
*ndev
, struct ethtool_test
*etest
,
907 void mlx5e_set_rx_mode_work(struct work_struct
*work
);
909 int mlx5e_hwstamp_set(struct mlx5e_priv
*priv
, struct ifreq
*ifr
);
910 int mlx5e_hwstamp_get(struct mlx5e_priv
*priv
, struct ifreq
*ifr
);
911 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv
*priv
, bool val
);
913 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
915 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
917 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
);
919 struct mlx5e_redirect_rqt_param
{
922 u32 rqn
; /* Direct RQN (Non-RSS) */
925 struct mlx5e_channels
*channels
;
926 } rss
; /* RSS data */
930 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
,
931 struct mlx5e_redirect_rqt_param rrp
);
932 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params
*rss_params
,
933 const struct mlx5e_tirc_config
*ttconfig
,
934 void *tirc
, bool inner
);
935 void mlx5e_modify_tirs_hash(struct mlx5e_priv
*priv
, void *in
);
936 struct mlx5e_tirc_config
mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt
);
938 struct mlx5e_xsk_param
;
940 struct mlx5e_rq_param
;
941 int mlx5e_open_rq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
942 struct mlx5e_rq_param
*param
, struct mlx5e_xsk_param
*xsk
,
943 struct xsk_buff_pool
*xsk_pool
, struct mlx5e_rq
*rq
);
944 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
, int wait_time
);
945 void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
);
946 void mlx5e_close_rq(struct mlx5e_rq
*rq
);
948 struct mlx5e_sq_param
;
949 int mlx5e_open_icosq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
950 struct mlx5e_sq_param
*param
, struct mlx5e_icosq
*sq
);
951 void mlx5e_close_icosq(struct mlx5e_icosq
*sq
);
952 int mlx5e_open_xdpsq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
953 struct mlx5e_sq_param
*param
, struct xsk_buff_pool
*xsk_pool
,
954 struct mlx5e_xdpsq
*sq
, bool is_redirect
);
955 void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
);
957 struct mlx5e_create_cq_param
{
958 struct napi_struct
*napi
;
959 struct mlx5e_ch_stats
*ch_stats
;
964 struct mlx5e_cq_param
;
965 int mlx5e_open_cq(struct mlx5e_priv
*priv
, struct dim_cq_moder moder
,
966 struct mlx5e_cq_param
*param
, struct mlx5e_create_cq_param
*ccp
,
967 struct mlx5e_cq
*cq
);
968 void mlx5e_close_cq(struct mlx5e_cq
*cq
);
970 int mlx5e_open_locked(struct net_device
*netdev
);
971 int mlx5e_close_locked(struct net_device
*netdev
);
973 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
974 struct mlx5e_channels
*chs
);
975 void mlx5e_close_channels(struct mlx5e_channels
*chs
);
977 /* Function pointer to be used to modify HW or kernel settings while
980 typedef int (*mlx5e_fp_preactivate
)(struct mlx5e_priv
*priv
, void *context
);
981 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
982 int fn##_ctx(struct mlx5e_priv *priv, void *context) \
986 int mlx5e_safe_reopen_channels(struct mlx5e_priv
*priv
);
987 int mlx5e_safe_switch_channels(struct mlx5e_priv
*priv
,
988 struct mlx5e_channels
*new_chs
,
989 mlx5e_fp_preactivate preactivate
,
991 int mlx5e_num_channels_changed(struct mlx5e_priv
*priv
);
992 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv
*priv
, void *context
);
993 void mlx5e_activate_priv_channels(struct mlx5e_priv
*priv
);
994 void mlx5e_deactivate_priv_channels(struct mlx5e_priv
*priv
);
996 void mlx5e_build_default_indir_rqt(u32
*indirection_rqt
, int len
,
999 void mlx5e_reset_tx_moderation(struct mlx5e_params
*params
, u8 cq_period_mode
);
1000 void mlx5e_reset_rx_moderation(struct mlx5e_params
*params
, u8 cq_period_mode
);
1001 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
);
1002 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
);
1004 void mlx5e_set_rq_type(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
);
1005 void mlx5e_init_rq_type_params(struct mlx5_core_dev
*mdev
,
1006 struct mlx5e_params
*params
);
1007 int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
, int next_state
);
1008 void mlx5e_activate_rq(struct mlx5e_rq
*rq
);
1009 void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
);
1010 void mlx5e_activate_icosq(struct mlx5e_icosq
*icosq
);
1011 void mlx5e_deactivate_icosq(struct mlx5e_icosq
*icosq
);
1013 int mlx5e_modify_sq(struct mlx5_core_dev
*mdev
, u32 sqn
,
1014 struct mlx5e_modify_sq_param
*p
);
1015 void mlx5e_activate_txqsq(struct mlx5e_txqsq
*sq
);
1016 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq
*sq
);
1017 void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
);
1018 void mlx5e_tx_disable_queue(struct netdev_queue
*txq
);
1019 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
);
1020 void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
);
1021 struct mlx5e_create_sq_param
;
1022 int mlx5e_create_sq_rdy(struct mlx5_core_dev
*mdev
,
1023 struct mlx5e_sq_param
*param
,
1024 struct mlx5e_create_sq_param
*csp
,
1026 void mlx5e_tx_err_cqe_work(struct work_struct
*recover_work
);
1028 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev
*mdev
)
1030 return MLX5_CAP_ETH(mdev
, swp
) &&
1031 MLX5_CAP_ETH(mdev
, swp_csum
) && MLX5_CAP_ETH(mdev
, swp_lso
);
1034 extern const struct ethtool_ops mlx5e_ethtool_ops
;
1036 int mlx5e_create_tir(struct mlx5_core_dev
*mdev
, struct mlx5e_tir
*tir
,
1038 void mlx5e_destroy_tir(struct mlx5_core_dev
*mdev
,
1039 struct mlx5e_tir
*tir
);
1040 int mlx5e_create_mdev_resources(struct mlx5_core_dev
*mdev
);
1041 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev
*mdev
);
1042 int mlx5e_refresh_tirs(struct mlx5e_priv
*priv
, bool enable_uc_lb
,
1044 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev
*mdev
, void *mkc
);
1046 /* common netdev helpers */
1047 void mlx5e_create_q_counters(struct mlx5e_priv
*priv
);
1048 void mlx5e_destroy_q_counters(struct mlx5e_priv
*priv
);
1049 int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
,
1050 struct mlx5e_rq
*drop_rq
);
1051 void mlx5e_close_drop_rq(struct mlx5e_rq
*drop_rq
);
1053 int mlx5e_create_indirect_rqt(struct mlx5e_priv
*priv
);
1055 int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
, bool inner_ttc
);
1056 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
);
1058 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
);
1059 void mlx5e_destroy_direct_rqts(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
);
1060 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
);
1061 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
);
1062 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
);
1064 int mlx5e_create_tis(struct mlx5_core_dev
*mdev
, void *in
, u32
*tisn
);
1065 void mlx5e_destroy_tis(struct mlx5_core_dev
*mdev
, u32 tisn
);
1067 int mlx5e_create_tises(struct mlx5e_priv
*priv
);
1068 void mlx5e_destroy_tises(struct mlx5e_priv
*priv
);
1069 int mlx5e_update_nic_rx(struct mlx5e_priv
*priv
);
1070 void mlx5e_update_carrier(struct mlx5e_priv
*priv
);
1071 int mlx5e_close(struct net_device
*netdev
);
1072 int mlx5e_open(struct net_device
*netdev
);
1074 void mlx5e_queue_update_stats(struct mlx5e_priv
*priv
);
1075 int mlx5e_bits_invert(unsigned long a
, int size
);
1077 int mlx5e_set_dev_port_mtu(struct mlx5e_priv
*priv
);
1078 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv
*priv
, void *context
);
1079 int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
,
1080 mlx5e_fp_preactivate preactivate
);
1081 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv
*priv
);
1083 /* ethtool helpers */
1084 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv
*priv
,
1085 struct ethtool_drvinfo
*drvinfo
);
1086 void mlx5e_ethtool_get_strings(struct mlx5e_priv
*priv
,
1087 uint32_t stringset
, uint8_t *data
);
1088 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv
*priv
, int sset
);
1089 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv
*priv
,
1090 struct ethtool_stats
*stats
, u64
*data
);
1091 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv
*priv
,
1092 struct ethtool_ringparam
*param
);
1093 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv
*priv
,
1094 struct ethtool_ringparam
*param
);
1095 void mlx5e_ethtool_get_channels(struct mlx5e_priv
*priv
,
1096 struct ethtool_channels
*ch
);
1097 int mlx5e_ethtool_set_channels(struct mlx5e_priv
*priv
,
1098 struct ethtool_channels
*ch
);
1099 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv
*priv
,
1100 struct ethtool_coalesce
*coal
);
1101 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv
*priv
,
1102 struct ethtool_coalesce
*coal
);
1103 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv
*priv
,
1104 struct ethtool_link_ksettings
*link_ksettings
);
1105 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv
*priv
,
1106 const struct ethtool_link_ksettings
*link_ksettings
);
1107 int mlx5e_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
, u8
*hfunc
);
1108 int mlx5e_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
,
1110 int mlx5e_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
1112 int mlx5e_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
);
1113 u32
mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv
*priv
);
1114 u32
mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv
*priv
);
1115 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv
*priv
,
1116 struct ethtool_ts_info
*info
);
1117 int mlx5e_ethtool_flash_device(struct mlx5e_priv
*priv
,
1118 struct ethtool_flash
*flash
);
1119 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv
*priv
,
1120 struct ethtool_pauseparam
*pauseparam
);
1121 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv
*priv
,
1122 struct ethtool_pauseparam
*pauseparam
);
1124 /* mlx5e generic netdev management API */
1125 int mlx5e_netdev_init(struct net_device
*netdev
,
1126 struct mlx5e_priv
*priv
,
1127 struct mlx5_core_dev
*mdev
,
1128 const struct mlx5e_profile
*profile
,
1130 void mlx5e_netdev_cleanup(struct net_device
*netdev
, struct mlx5e_priv
*priv
);
1132 mlx5e_create_netdev(struct mlx5_core_dev
*mdev
, const struct mlx5e_profile
*profile
,
1133 int nch
, void *ppriv
);
1134 int mlx5e_attach_netdev(struct mlx5e_priv
*priv
);
1135 void mlx5e_detach_netdev(struct mlx5e_priv
*priv
);
1136 void mlx5e_destroy_netdev(struct mlx5e_priv
*priv
);
1137 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv
*priv
);
1138 void mlx5e_build_nic_params(struct mlx5e_priv
*priv
,
1139 struct mlx5e_xsk
*xsk
,
1140 struct mlx5e_rss_params
*rss_params
,
1141 struct mlx5e_params
*params
,
1143 void mlx5e_build_rq_params(struct mlx5_core_dev
*mdev
,
1144 struct mlx5e_params
*params
);
1145 void mlx5e_build_rss_params(struct mlx5e_rss_params
*rss_params
,
1147 void mlx5e_rx_dim_work(struct work_struct
*work
);
1148 void mlx5e_tx_dim_work(struct work_struct
*work
);
1150 netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
1151 struct net_device
*netdev
,
1152 netdev_features_t features
);
1153 int mlx5e_set_features(struct net_device
*netdev
, netdev_features_t features
);
1154 #ifdef CONFIG_MLX5_ESWITCH
1155 int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
);
1156 int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
, int max_tx_rate
);
1157 int mlx5e_get_vf_config(struct net_device
*dev
, int vf
, struct ifla_vf_info
*ivi
);
1158 int mlx5e_get_vf_stats(struct net_device
*dev
, int vf
, struct ifla_vf_stats
*vf_stats
);
1160 #endif /* __MLX5_EN_H__ */